26/04/22 - 15h30

master
ChérifBALDE 2022-04-26 14:36:08 +02:00 committed by cherif
parent 4480b8b45b
commit 40ea13cfb2
3 changed files with 253 additions and 90 deletions

View File

@ -16,12 +16,15 @@ import inspect
import sys, os
from autocorrect import Speller
from datetime import datetime
import re
from spellchecker import SpellChecker
CONNECTION_STRING = "mongodb://localhost/cherifdb"
## Gle Variables
stemmer = SnowballStemmer(language='french')
nlp = spacy.load("fr_core_news_sm")
spell = SpellChecker(language='fr')
token_fr_pontuation = []
@ -114,17 +117,30 @@ def correct_fr_word(word):
Suppression des ponctuations
'''
def Ela_remove_ponct(list):
for tmp in token_fr_pontuation:
while tmp in list:
list.remove(tmp)
try:
for tmp in token_fr_pontuation:
while tmp in list:
list.remove(tmp)
return list
return True, list
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
mycommon.myprint(str(inspect.stack()[0][3]) + " -" + str(e)+" - Line : "+ str(exc_tb.tb_lineno) )
return False, " Impossible Ela_remove_ponct"
'''
Cette fonction supprimer les parasite des listes
comme " ", "]", " ", etc
'''
def Ela_Remove_Noise_from_list(list):
list_noises = ['...', '.', ';', ',', ':', '!', '?', ')', '(', '[', ']',
'{', '}','-', '=', '°', '#', '-', '/', '~', '&', '\\','.', '^', '$', '*', '+',
'?', '{', '}', '[', ']', '|', '(', ')', '-', '>', '<', '@']
for noise in list_noises:
list.remove(noise)
while ' ' in list:
list.remove(' ')
@ -152,29 +168,101 @@ def Ela_Remove_Noise_from_list(list):
return list
'''
Cette fonction supprimer les
patter non pertinents, comme par exemple :
- 21h10
- 1er ou 14ieme
def Ela_Tokenize(sentence):
# Tokeniser la phrase
'''
def Ela_Remove_Bad_Pattern(sentence):
try:
print(" Tokenaisee de du mot "+str(sentence))
sentence = Ela_Normalize(sentence)
text = sentence.lower() # mettre les mots en minuscule
# Retirons les caractères spéciaux :
doc = nlp(str(sentence).lower())
patter2 = re.compile(r"^([0-9]+)[:]([a-zA-Z0-9èéêë])+$")
patter3 = re.compile(r"^([0-9]+)[hH]([0-9])+$")
patter4 = re.compile(r"^([0-9]+)[a-zA-Z0-9èéêë]+$")
patter5 = re.compile(r"^([0-9]+)+$")
print(" doc '" + str(doc)+"' ")
doc = nlp(str(text).lower())
final_text = ""
for val in doc :
print(" str(val) = "+str(val))
val_str = str(val)
val_str = re.sub(patter2, ' ', val_str)
val_str = re.sub(patter3, ' ', val_str)
val_str = re.sub(patter4, ' ', val_str)
val_str = re.sub(patter5, ' ', val_str)
final_text = str(final_text) + " "+str(val_str)
for val in doc:
print(" VAL = "+str(val.text)+" unidecode(X.text) = "+str(unidecode(val.text)) )
# Retourner le texte de chaque token
return [unidecode(X.text) for X in doc]
print("final_text = "+str(final_text))
return True, final_text
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
mycommon.myprint(str(inspect.stack()[0][3]) + " -" + str(e)+" - Line : "+ str(exc_tb.tb_lineno) )
return False, " Impossible de recuperer la formation"
return False, " Impossible Ela_Remove_Bad_Pattern"
'''
Cette fonction replace les caractères speciaux et ponctuation par des space
'''
def Ela_Remove_Ponct_Special_Caractere(sentence):
try:
text = sentence.lower() # mettre les mots en minuscule
# Retirons les caractères spéciaux :
text = re.sub(r"[,\!\?\%\(\)\/\"]", " ", text)
text = re.sub(r"\&\S*\s", " ", text)
text = re.sub(r"\-", " ", text)
list_noises = ['...', '.', ';', ',', ':', '!', '?', ')', '(', '[', ']', '\'', '"',
'{', '}', '-', '=', '°', '#', '-', '/', '~', '&', '\\', '.', '^', '$', '*', '+','\\n',
'?', '{', '}', '[', ']', '|', '(', ')', '-', '>', '<', '@']
sentence = text
for noise in list_noises:
sentence = sentence.replace(str(noise), " ")
return True, sentence
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
mycommon.myprint(str(inspect.stack()[0][3]) + " -" + str(e)+" - Line : "+ str(exc_tb.tb_lineno) )
return False, " Impossible Ela_Remove_Ponct_Special_Caractere"
def Ela_Tokenize(sentence):
try:
#print(" Tokenaisee de du mot "+str(sentence))
status, sentence = Ela_Remove_Bad_Pattern(sentence)
if( status is False ):
return False
#print(" AFTER Ela_Remove_Bad_Pattern " + str(sentence))
status, sentence = Ela_Remove_Ponct_Special_Caractere(sentence)
if (status is False):
return False
doc = nlp(str(sentence).lower())
#print(" Tokenize = '" + str(doc)+"' ")
retval = []
for X in doc:
if len(str(unidecode(X.text)).strip()) > 0 :
retval.append( str(X.text).strip())
# Retourner le texte de chaque token
return True, retval
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
mycommon.myprint(str(inspect.stack()[0][3]) + " -" + str(e)+" - Line : "+ str(exc_tb.tb_lineno) )
return False, " Impossible Ela_Tokenize"
'''
2. Enlever les mots les plus fréquents
@ -188,13 +276,20 @@ exemple :
:input : Tab of tokens
'''
def Ela_remove_stop_words(tab_tokens):
clean_words = []
for token in tab_tokens:
if token not in stopWords:
clean_words.append(token)
try:
clean_words = []
return clean_words
for token in tab_tokens:
if token not in stopWords:
clean_words.append(token)
return True, clean_words
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
mycommon.myprint(str(inspect.stack()[0][3]) + " -" + str(e)+" - Line : "+ str(exc_tb.tb_lineno) )
return False, " Impossible Ela_remove_stop_words"
'''
@ -208,14 +303,18 @@ SCONJ : conjonction subordonnée, SYM : symbole, VERB : verbe, X : autre
def Ela_remove_pronoun(tab_tokens):
mywords = []
for token in tab_tokens:
mytok = nlp(str(token).lower())
for token2 in mytok:
if token2.pos_ != 'DET' and token2.pos_ != 'CCONJ' and token2.pos_ != 'ADP':
mywords.append(str(mytok))
return mywords
try:
mywords = []
for token in tab_tokens:
mytok = nlp(str(token).lower())
for token2 in mytok:
if token2.pos_ != 'DET' and token2.pos_ != 'CCONJ' and token2.pos_ != 'ADP':
mywords.append(str(mytok))
return True, mywords
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
mycommon.myprint(str(inspect.stack()[0][3]) + " -" + str(e)+" - Line : "+ str(exc_tb.tb_lineno) )
return False, " Impossible Ela_remove_pronoun"
'''
# 4. Stemming
@ -230,7 +329,7 @@ def Ela_stemmize(tab_tokens):
try:
tab_ret_val = []
print(" VERIFICATION SI LE MOT EST FR : " + str(tab_tokens))
#print(" VERIFICATION SI LE MOT EST FR : " + str(tab_tokens))
for mot in tab_tokens:
if( mycommon.check_word_in_fr_dict(str(mot)) ):
@ -241,58 +340,88 @@ def Ela_stemmize(tab_tokens):
else:
tab_ret_val.append(mot)
print(" STMISATION TAB = "+str(tab_ret_val))
#print(" STMISATION TAB = "+str(tab_ret_val))
return tab_ret_val
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
mycommon.myprint(str(inspect.stack()[0][3]) + " -" + str(e)+" - Line : "+ str(exc_tb.tb_lineno) )
return False, " Impossible de recuperer la formation"
return False, " Impossible Ela_stemmize"
#return [stemmer.stem(token.text) for token in tab_tokens]
'''
Dans le cas de la stemisation d'une
formation, il ne faut pas changer les mots du formateur
ou du documents de formation.
==> Ici pas de correction avant stem.
Aussi, on fait le choix de liberé de ne pas stémiser certain mots'''
def Ela_stemmize_Class(tab_tokens):
try:
tab_ret_val = []
#print(" VERIFICATION SI LE MOT DOIT ETRE STEMISE _ CLASS: " + str(tab_tokens))
for mot in tab_tokens:
if( mycommon.Word_Not_Stemmize(str(mot)) ):
tab_ret_val.append(unidecode(mot))
else:
print(" AVANT STEM MOT ="+str(mot))
tab_ret_val.append( unidecode( str(stemmer.stem(mot))))
print(" AVANT STEM MOT =" + unidecode( str(stemmer.stem(mot))))
#print(" STMISATION TAB = "+str(tab_ret_val))
return tab_ret_val
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
mycommon.myprint(str(inspect.stack()[0][3]) + " -" + str(e)+" - Line : "+ str(exc_tb.tb_lineno) )
return False, " Impossible de Ela_stemmize_Class"
#return [stemmer.stem(token.text) for token in tab_tokens]
'''
Dans le cas d'un recherche utilsateur, il y a de forte
chance que fasse une mauvaise saisie,
==> Donc avant de faire le stem des mots, il faut faire une correction
orthographique
Aussi, on fait le choix de liberé de ne pas stémiser certain mots'''
ELA NTLK :
cette fonction prends un texte et retour un tableau des mots
apres :
- Ela_remove_stop_words
- Ela_remove_pronoun et
- Ela_stemmize
def Ela_stemmize_search(tab_tokens):
try:
tab_ret_val = []
print(" VERIFICATION SI LE MOT DOIT ETRE STEMISE: " + str(tab_tokens))
for mot in tab_tokens:
if (mycommon.Word_Not_Stemmize(str(mot))):
tab_ret_val.append(unidecode(mot))
else:
'''
On fait la correction orthographe avant
'''
print("AVANT COORECTION ORH ="+str(mot)+" ==> APRES = "+unidecode(str(spell.correction(mot))))
corrected_str = str(spell.correction(mot))
tab_ret_val.append( unidecode (str(stemmer.stem(corrected_str))))
'''
print(" STMISATION TAB = "+str(tab_ret_val))
def ela_read_file():
with open('ela_test_file_v2.txt', mode="r", encoding="utf-8") as f:
lines = f.readlines()
return tab_ret_val
tab_tokens = Ela_Tokenize(lines)
tab_tokens2 = Ela_remove_stop_words(tab_tokens)
tab_tokens3 = Ela_remove_pronoun(tab_tokens2)
tab_tokens4 = Ela_stemmize(tab_tokens3)
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
mycommon.myprint(str(inspect.stack()[0][3]) + " -" + str(e)+" - Line : "+ str(exc_tb.tb_lineno) )
return False, " Impossible de Ela_stemmize_search"
tab_tokens4 = Ela_Remove_Noise_from_list(tab_tokens4)
tab_tokens4 = Ela_remove_ponct(tab_tokens4)
# Enregistrement dans la base mongodb
Ela_list_to_mongo(tab_tokens4, "Tid_33345")
size_tab = len(tab_tokens4)
print("size_tab = " + str(size_tab))
occurrences = Counter(tab_tokens4)
most_common = occurrences.most_common()
print(most_common)
#return [stemmer.stem(token.text) for token in tab_tokens]
print(tab_tokens4)
my_file = open("ela_output_test_file.txt", "w")
my_file.write(str(tab_tokens4))
my_file.write(str("\\n---------- MOST COMMON -------\\n"))
my_file.write(str(most_common))
my_file.close()
'''
Indexation et enregistrement d'un token
@ -324,13 +453,25 @@ def ela_index_record_field(lines, class_id, source_field = ""):
'''
Ajout des indexe
'''
tab_tokens = Ela_Tokenize(lines)
tab_tokens2 = Ela_remove_stop_words(tab_tokens)
tab_tokens3 = Ela_remove_pronoun(tab_tokens2)
tab_tokens4 = Ela_stemmize(tab_tokens3)
status, tab_tokens = Ela_Tokenize(lines)
if( status is False):
return False
tab_tokens4 = Ela_Remove_Noise_from_list(tab_tokens4)
tab_tokens4 = Ela_remove_ponct(tab_tokens4)
#print(" AFFICHAGE TAB TOKEN")
#print(tab_tokens)
#print(" FINNN TAB TOKEN")
status, tab_tokens2 = Ela_remove_stop_words(tab_tokens)
if (status is False):
return False
status, tab_tokens3 = Ela_remove_pronoun(tab_tokens2)
if (status is False):
return False
tab_tokens4 = Ela_stemmize_Class(tab_tokens3)
#tab_tokens4 = Ela_Remove_Noise_from_list(tab_tokens4)
#tab_tokens4 = Ela_remove_ponct(tab_tokens4)
# Enregistrement dans la base mongodb
Ela_list_to_mongo(tab_tokens4,class_id, source_field)
@ -344,7 +485,8 @@ def ela_index_record_field(lines, class_id, source_field = ""):
return True
except Exception as e:
mycommon.myprint(e)
exc_type, exc_obj, exc_tb = sys.exc_info()
mycommon.myprint(str(inspect.stack()[0][3]) + " -" + str(e) + " - Line : " + str(exc_tb.tb_lineno))
return False
@ -353,7 +495,7 @@ def Ela_ntlk(mysentence, traning_id):
tab_tokens = Ela_Tokenize(mysentence)
tab_tokens2 = Ela_remove_stop_words(tab_tokens)
tab_tokens3 = Ela_remove_pronoun(tab_tokens2)
tab_tokens4 = Ela_stemmize(tab_tokens3)
tab_tokens4 = Ela_stemmize_Class(tab_tokens3)
print("Ela_ntlk = "+str(tab_tokens4))
@ -416,7 +558,7 @@ def test_ela_myntlk():
tab_tokens = Ela_Tokenize(sentence)
tab_tokens2 = Ela_remove_stop_words(tab_tokens)
tab_tokens3 = Ela_remove_pronoun(tab_tokens2)
tab_tokens4 = Ela_stemmize(tab_tokens3)
tab_tokens4 = Ela_stemmize_Class(tab_tokens3)
tab_tokens4.sort()
Ela_list_to_mongo(tab_tokens4, 'Tid_3245')
@ -482,10 +624,10 @@ def ela_index_article_avis_record_field(lines, article_avis_id, source_field = "
tab_tokens = Ela_Tokenize(lines)
tab_tokens2 = Ela_remove_stop_words(tab_tokens)
tab_tokens3 = Ela_remove_pronoun(tab_tokens2)
tab_tokens4 = Ela_stemmize(tab_tokens3)
tab_tokens4 = Ela_stemmize_Class(tab_tokens3)
tab_tokens4 = Ela_Remove_Noise_from_list(tab_tokens4)
tab_tokens4 = Ela_remove_ponct(tab_tokens4)
#tab_tokens4 = Ela_Remove_Noise_from_list(tab_tokens4)
#tab_tokens4 = Ela_remove_ponct(tab_tokens4)
# Enregistrement dans la base mongodb

View File

@ -419,9 +419,18 @@ def ela_recherche_tokens(sentence):
tab_training_id = []
tab_tokens = ls.Ela_Tokenize(sentence)
tab_tokens2 = ls.Ela_remove_stop_words(tab_tokens)
tab_tokens3 = ls.Ela_remove_pronoun(tab_tokens2)
status, tab_tokens = ls.Ela_Tokenize(sentence)
if( status is False):
return False
status, tab_tokens2 = ls.Ela_remove_stop_words(tab_tokens)
if (status is False):
return False
status, tab_tokens3 = ls.Ela_remove_pronoun(tab_tokens2)
if (status is False):
return False
'''
note : 26/03 : Faire evolution la fonction ici pour
gerer les mots qui ne doivent pas etre racinisé comme le cas du mot "Responsive"
@ -430,17 +439,8 @@ def ela_recherche_tokens(sentence):
'''
print(" VERIF : "+str(tab_tokens3))
tab_corrected_word = []
for mot in tab_tokens3:
mycommon.recherche_check_word_in_fr_dict(str(mot))
val = ls.correct_fr_word(str(mot))
if( val ):
tab_corrected_word.append(str(val))
print("corrected word = "+str(tab_corrected_word))
tab_tokens4 = ls.Ela_stemmize(tab_corrected_word)
tab_tokens4 = ls.Ela_stemmize_search(tab_tokens3)
print(" VERIF APRES STEMISATION : " + str(tab_tokens4))

View File

@ -329,6 +329,27 @@ def tryInt(val):
return 0
'''
Verification que le mot n'est pas
stemisable à traver la tabla "word_not_stem
'''
def Word_Not_Stemmize(word = None):
try:
coll_not_stem = dbname["word_not_stem"]
val_tmp = coll_not_stem.count_documents({'mot': str(word)})
if (val_tmp > 0):
return True
else:
return False
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
myprint(str(inspect.stack()[0][3]) + " -" + str(e) + " - Line : " + str(exc_tb.tb_lineno))
return False, " Impossible de verifier Word_Not_Stemmize"
'''
Cette fonction verifie si un mot est dans le dictionnaire français - une table interne
si non, le mot est enregistré dans une table pour traitement utérieur.