386 lines
11 KiB
Python
386 lines
11 KiB
Python
import spacy
|
||
from spacy import displacy
|
||
import nltk
|
||
from nltk.corpus import stopwords
|
||
from nltk.stem.snowball import SnowballStemmer
|
||
import datetime
|
||
import pandas as pd
|
||
import numpy as np
|
||
import pymongo
|
||
from pymongo import MongoClient
|
||
from unidecode import unidecode
|
||
from collections import Counter
|
||
import ela_spacy_common as lsc
|
||
import prj_common as mycommon
|
||
|
||
CONNECTION_STRING = "mongodb://localhost/cherifdb"
|
||
|
||
## Gle Variables
|
||
stemmer = SnowballStemmer(language='french')
|
||
nlp = spacy.load("fr_core_news_sm")
|
||
token_fr_pontuation = []
|
||
|
||
|
||
|
||
#assign the default stopwords list to a variable
|
||
STOP_WORDS = spacy.lang.fr.stop_words.STOP_WORDS
|
||
stopWords = set(stopwords.words('french'))
|
||
sentence = "Bouygues a eu une coupure de réseau à Marseille chez ses clients marseillais et son couteau"
|
||
lsc.update_stopWords(stopWords)
|
||
|
||
#print(type(stopWords))
|
||
|
||
lsc.update_token_fr_pontuation(token_fr_pontuation)
|
||
|
||
#print("token_fr_pontuation")
|
||
#print(token_fr_pontuation)
|
||
|
||
'''
|
||
initialisation
|
||
|
||
'''
|
||
def init_ch():
|
||
stemmer = SnowballStemmer(language='french')
|
||
nlp = spacy.load("fr_core_news_sm")
|
||
token_fr_pontuation = []
|
||
|
||
# assign the default stopwords list to a variable
|
||
STOP_WORDS = spacy.lang.fr.stop_words.STOP_WORDS
|
||
stopWords = set(stopwords.words('french'))
|
||
sentence = "Bouygues a eu une coupure de réseau à Marseille chez ses clients marseillais et son couteau"
|
||
lsc.update_stopWords(stopWords)
|
||
|
||
# print(type(stopWords))
|
||
|
||
lsc.update_token_fr_pontuation(token_fr_pontuation)
|
||
|
||
|
||
|
||
'''
|
||
1. Tokenisation
|
||
La tokenisation cherche à transformer un texte en une série de tokens individuels.
|
||
Egalement il supprime les mots avec des accents (à, é, etc)
|
||
|
||
:return a tab
|
||
'''
|
||
|
||
def Ela_Normalize(sentence):
|
||
|
||
sentence = str(sentence).replace(",", " ")
|
||
sentence = str(sentence).replace(";", " ")
|
||
sentence = str(sentence).replace(".", " ")
|
||
sentence = str(sentence).replace("'", " ")
|
||
|
||
return sentence
|
||
|
||
|
||
'''
|
||
Suppression des ponctuations
|
||
'''
|
||
def Ela_remove_ponct(list):
|
||
for tmp in token_fr_pontuation:
|
||
while tmp in list:
|
||
list.remove(tmp)
|
||
|
||
return list
|
||
|
||
'''
|
||
Cette fonction supprimer les parasite des listes
|
||
comme " ", "]", " ", etc
|
||
'''
|
||
def Ela_Remove_Noise_from_list(list):
|
||
while ' ' in list:
|
||
list.remove(' ')
|
||
|
||
while '[' in list:
|
||
list.remove('[')
|
||
|
||
while ']' in list:
|
||
list.remove(']')
|
||
|
||
while '\\n' in list:
|
||
list.remove('\\n')
|
||
|
||
while ' ' in list:
|
||
list.remove(' ')
|
||
|
||
while '"' in list:
|
||
list.remove('"')
|
||
|
||
while ' ' in list:
|
||
list.remove(' ')
|
||
|
||
while ' ' in list:
|
||
list.remove(' ')
|
||
|
||
return list
|
||
|
||
|
||
|
||
def Ela_Tokenize(sentence):
|
||
# Tokeniser la phrase
|
||
|
||
sentence = Ela_Normalize(sentence)
|
||
|
||
doc = nlp(str(sentence).lower())
|
||
# Retourner le texte de chaque token
|
||
return [unidecode(X.text) for X in doc]
|
||
|
||
'''
|
||
2. Enlever les mots les plus fréquents
|
||
Certains mots se retrouvent très fréquemment dans la langue française.
|
||
En anglais, on les appelle les “stop words”.
|
||
Ces mots, bien souvent, n’apportent pas d’information dans les tâches suivantes.
|
||
exemple :
|
||
{'ai', 'aie', 'aient', 'aies', 'ait', 'as', etc}
|
||
|
||
:return a tab
|
||
:input : Tab of tokens
|
||
'''
|
||
def Ela_remove_stop_words(tab_tokens):
|
||
clean_words = []
|
||
|
||
for token in tab_tokens:
|
||
if token not in stopWords:
|
||
clean_words.append(token)
|
||
|
||
return clean_words
|
||
|
||
|
||
'''
|
||
3.- Suppression des : ADP : adposition, DET : déterminant, CCONJ : coordination conjonction
|
||
La liste est la suivante:
|
||
|
||
ADJ : adjectif, ADP : adposition, ADV : adverbe, AUX : verbe auxiliaire, CONJ : coordination conjonction, DET : déterminant
|
||
INTJ : interjection, NOUN : nom, NUM : chiffre, PART : particule, PRON : pronom, PROPN : nom propre, PUNCT : ponctuation
|
||
SCONJ : conjonction subordonnée, SYM : symbole, VERB : verbe, X : autre
|
||
'''
|
||
|
||
def Ela_remove_pronoun(tab_tokens):
|
||
|
||
mywords = []
|
||
for token in tab_tokens:
|
||
mytok = nlp(str(token).lower())
|
||
for token2 in mytok:
|
||
if token2.pos_ != 'DET' and token2.pos_ != 'CCONJ' and token2.pos_ != 'ADP':
|
||
mywords.append(mytok)
|
||
return mywords
|
||
|
||
'''
|
||
# 4. Stemming
|
||
Le stemming consiste à réduire un mot dans sa forme “racine”. Le but du stemming est de regrouper de nombreuses variantes
|
||
d’un mot comme un seul et même mot. Par exemple, une fois que l’on applique un stemming sur “Chiens” ou “Chien”,
|
||
le mot résultant est le même.
|
||
'''
|
||
def Ela_stemmize(tab_tokens):
|
||
return [stemmer.stem(token.text) for token in tab_tokens]
|
||
|
||
|
||
|
||
'''
|
||
|
||
ELA NTLK :
|
||
cette fonction prends un texte et retour un tableau des mots
|
||
apres :
|
||
- Ela_remove_stop_words
|
||
- Ela_remove_pronoun et
|
||
- Ela_stemmize
|
||
|
||
'''
|
||
|
||
def ela_read_file():
|
||
with open('ela_test_file_v2.txt', mode="r", encoding="utf-8") as f:
|
||
lines = f.readlines()
|
||
|
||
tab_tokens = Ela_Tokenize(lines)
|
||
tab_tokens2 = Ela_remove_stop_words(tab_tokens)
|
||
tab_tokens3 = Ela_remove_pronoun(tab_tokens2)
|
||
tab_tokens4 = Ela_stemmize(tab_tokens3)
|
||
|
||
|
||
tab_tokens4 = Ela_Remove_Noise_from_list(tab_tokens4)
|
||
tab_tokens4 = Ela_remove_ponct(tab_tokens4)
|
||
|
||
# Enregistrement dans la base mongodb
|
||
Ela_list_to_mongo(tab_tokens4, "Tid_33345")
|
||
|
||
size_tab = len(tab_tokens4)
|
||
print("size_tab = " + str(size_tab))
|
||
occurrences = Counter(tab_tokens4)
|
||
most_common = occurrences.most_common()
|
||
print(most_common)
|
||
|
||
|
||
print(tab_tokens4)
|
||
my_file = open("ela_output_test_file.txt", "w")
|
||
my_file.write(str(tab_tokens4))
|
||
my_file.write(str("\\n---------- MOST COMMON -------\\n"))
|
||
my_file.write(str(most_common))
|
||
my_file.close()
|
||
|
||
'''
|
||
Indexation et enregistrement d'un token
|
||
IMPORTANT : Pour mutualiser le process entre l'indexation des tous les champs ou l'indexation de uniquement de certain champs
|
||
ex : indexation du champ 'title' ou du champ 'objectif', je vais introduitre la notion de 'source_fied'
|
||
pour l'indexation title, 'source_fied' = 'title'
|
||
pour l'indexation objectif, 'source_fied' = 'objectif'
|
||
'''
|
||
|
||
def ela_index_record_field(lines, class_id, source_field = ""):
|
||
|
||
try:
|
||
'''
|
||
## Suppression de toutes les indexation qui existe de cette formation
|
||
'''
|
||
|
||
client = MongoClient(CONNECTION_STRING)
|
||
dbname = client['cherifdb']
|
||
coll_name = dbname['elaindex']
|
||
|
||
# check default value of parameter : source_field
|
||
if len(str(source_field)) == 0 :
|
||
source_field = 'default'
|
||
|
||
|
||
myquery = {"id_formation": class_id, "source_field":source_field}
|
||
delete_row = coll_name.delete_many(myquery)
|
||
mycommon.myprint(" elaindex - "+str(delete_row.deleted_count)+" documents deleted. Training ==> "+str(class_id)+" ")
|
||
|
||
'''
|
||
Ajout des indexe
|
||
'''
|
||
tab_tokens = Ela_Tokenize(lines)
|
||
tab_tokens2 = Ela_remove_stop_words(tab_tokens)
|
||
tab_tokens3 = Ela_remove_pronoun(tab_tokens2)
|
||
tab_tokens4 = Ela_stemmize(tab_tokens3)
|
||
|
||
tab_tokens4 = Ela_Remove_Noise_from_list(tab_tokens4)
|
||
tab_tokens4 = Ela_remove_ponct(tab_tokens4)
|
||
|
||
# Enregistrement dans la base mongodb
|
||
Ela_list_to_mongo(tab_tokens4,class_id, source_field)
|
||
|
||
size_tab = len(tab_tokens4)
|
||
print("size_tab = " + str(size_tab))
|
||
occurrences = Counter(tab_tokens4)
|
||
most_common = occurrences.most_common()
|
||
print(most_common)
|
||
|
||
|
||
return True
|
||
except Exception as e:
|
||
mycommon.myprint(e)
|
||
return False
|
||
|
||
|
||
|
||
def Ela_ntlk(mysentence, traning_id):
|
||
tab_tokens = Ela_Tokenize(mysentence)
|
||
tab_tokens2 = Ela_remove_stop_words(tab_tokens)
|
||
tab_tokens3 = Ela_remove_pronoun(tab_tokens2)
|
||
tab_tokens4 = Ela_stemmize(tab_tokens3)
|
||
|
||
print("Ela_ntlk = "+str(tab_tokens4))
|
||
|
||
## Utiliser penda pour la manipulation des moyennes, sommes, etc
|
||
return tab_tokens4.sort()
|
||
|
||
|
||
def Ela_list_to_mongo(tab_tokens, traning_id, source_field):
|
||
try:
|
||
# clear / truncate elaindex
|
||
#db.elaindex.remove({})
|
||
|
||
# Recherche des occurence d'une valeur
|
||
my_file = open("ela_output_test_file_pandas_2.txt", "w")
|
||
size_tab = len(tab_tokens)
|
||
|
||
occurrences = Counter(tab_tokens)
|
||
|
||
# insert the list to the set
|
||
list_set = set(tab_tokens)
|
||
# convert the set to the list
|
||
unique_list = (list(list_set))
|
||
|
||
final_lists = []
|
||
for tmp in unique_list:
|
||
moyenne = round(int(occurrences[str(tmp)]) / size_tab, 2)
|
||
final_lists.append([str(tmp), str(occurrences[str(tmp)]), str(moyenne), str(traning_id), str(source_field)])
|
||
|
||
print(final_lists)
|
||
data = pd.DataFrame(final_lists, columns=('mots', "occurence", 'moyenne', 'id_formation', 'source_field'))
|
||
my_file.write(str(data))
|
||
my_file.close()
|
||
|
||
# Making a Connection with MongoClient
|
||
client = MongoClient("mongodb://localhost:27017/")
|
||
# database
|
||
db = client["cherifdb"]
|
||
# collection
|
||
collection = db["elaindex"]
|
||
|
||
data.reset_index(inplace=True)
|
||
data_dict = data.to_dict("records")
|
||
|
||
print(data_dict)
|
||
# Insert collection
|
||
collection.insert_many(data_dict)
|
||
data.to_csv("data_indexees.csv")
|
||
|
||
return True
|
||
except Exception as e :
|
||
mycommon.myprint(e)
|
||
return False
|
||
|
||
|
||
|
||
|
||
def test_ela_myntlk():
|
||
|
||
tab = []
|
||
tab_tokens = Ela_Tokenize(sentence)
|
||
tab_tokens2 = Ela_remove_stop_words(tab_tokens)
|
||
tab_tokens3 = Ela_remove_pronoun(tab_tokens2)
|
||
tab_tokens4 = Ela_stemmize(tab_tokens3)
|
||
|
||
tab_tokens4.sort()
|
||
Ela_list_to_mongo(tab_tokens4, 'Tid_3245')
|
||
exit()
|
||
|
||
'''
|
||
my_file = open("ela_output_test_file_pandas.txt", "w")
|
||
|
||
|
||
# Recherche des occurence d'une valeur
|
||
size_tab = len(tab_tokens4)
|
||
print("size_tab = "+str(size_tab))
|
||
occurrences = Counter(tab_tokens4)
|
||
|
||
# insert the list to the set
|
||
list_set = set(tab_tokens4)
|
||
# convert the set to the list
|
||
unique_list = (list(list_set))
|
||
|
||
final_lists = []
|
||
print(" MOT ==> Occurrence ==> Moyenne ")
|
||
for tmp in unique_list :
|
||
moyenne = round(int(occurrences[str(tmp)])/size_tab, 2)
|
||
#print(str(tmp)+" ==> "+str(occurrences[str(tmp)])+" ==> "+str(moyenne))
|
||
list_tmp = [tmp, str(occurrences[str(tmp)]), str(moyenne) ]
|
||
#print(list_tmp)
|
||
final_lists.append([str(tmp),str(occurrences[str(tmp)]),str(moyenne)])
|
||
|
||
|
||
print(" Finals Liste ")
|
||
my_file.write("\nWorld\n")
|
||
print(final_lists)
|
||
data = pd.DataFrame(final_lists, columns={'mots',"occurence",'moyenne'})
|
||
my_file.write(str("\n############ occurrences ############# \n"))
|
||
my_file.write(str(data))
|
||
|
||
most_common = occurrences.most_common()
|
||
print(most_common)
|
||
my_file.close()
|
||
'''
|
||
|