Elyos_FI_Back_Office/Ela_Spacy.py

695 lines
22 KiB
Python
Raw Blame History

This file contains ambiguous Unicode characters!

This file contains ambiguous Unicode characters that may be confused with others in your current locale. If your use case is intentional and legitimate, you can safely ignore this warning. Use the Escape button to highlight these characters.

import spacy
from spacy import displacy
import nltk
from nltk.corpus import stopwords
from nltk.stem.snowball import SnowballStemmer
import datetime
import pandas as pd
import numpy as np
import pymongo
from pymongo import MongoClient
from collections import Counter
import ela_spacy_common as lsc
import prj_common as mycommon
from unidecode import unidecode
import inspect
import sys, os
from autocorrect import Speller
from datetime import datetime
import re
from spellchecker import SpellChecker
CONNECTION_STRING = "mongodb://localhost/cherifdb"
## Gle Variables
stemmer = SnowballStemmer(language='french')
nlp = spacy.load("fr_core_news_sm")
spell = SpellChecker(language='fr')
token_fr_pontuation = []
#assign the default stopwords list to a variable
STOP_WORDS = spacy.lang.fr.stop_words.STOP_WORDS
stopWords = set(stopwords.words('french'))
sentence = "Bouygues a eu une coupure de réseau à Marseille chez ses clients marseillais et son couteau"
lsc.update_stopWords(stopWords)
#print(type(stopWords))
lsc.update_token_fr_pontuation(token_fr_pontuation)
spell_fr = Speller(lang='fr')
CONNECTION_STRING = "mongodb://localhost/cherifdb"
client = MongoClient(CONNECTION_STRING)
dbname = client['cherifdb']
#print("token_fr_pontuation")
#print(token_fr_pontuation)
'''
initialisation
'''
def init_ch():
stemmer = SnowballStemmer(language='french')
nlp = spacy.load("fr_core_news_sm")
token_fr_pontuation = []
# assign the default stopwords list to a variable
STOP_WORDS = spacy.lang.fr.stop_words.STOP_WORDS
stopWords = set(stopwords.words('french'))
sentence = "Bouygues a eu une coupure de réseau à Marseille chez ses clients marseillais et son couteau"
lsc.update_stopWords(stopWords)
# print(type(stopWords))
lsc.update_token_fr_pontuation(token_fr_pontuation)
'''
1. Tokenisation
La tokenisation cherche à transformer un texte en une série de tokens individuels.
Egalement il supprime les mots avec des accents (à, é, etc)
:return a tab
'''
def Ela_Normalize(sentence):
sentence = str(sentence).replace(",", " ")
sentence = str(sentence).replace(";", " ")
sentence = str(sentence).replace(".", " ")
sentence = str(sentence).replace("'", " ")
return sentence
'''
Cette fonction prend un mot et retourne
sa correction orthographique ne français
'''
def correct_fr_word(word):
try:
mydata = {}
print(" Fonction : correct_fr_word : '"+word+"' =======> "+spell_fr(word))
mydata['mot'] = str(word)
mydata['mot_corrected'] = str(spell_fr(word))
mydata['date_update'] = str(datetime.now())
mydata['treated'] = 0
coll_name = dbname['correction_ortho']
ret_val_tmp = coll_name.insert_one(mydata)
if (ret_val_tmp is False):
mycommon.myprint(str(inspect.stack()[0][3]) + " - Impossbile d'inserer le mot "+str(word)+" dans correction_ortho ")
return spell_fr(word)
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
mycommon.myprint(str(inspect.stack()[0][3]) + " -" + str(e) + " - Line : " + str(exc_tb.tb_lineno))
return False
'''
Suppression des ponctuations
'''
def Ela_remove_ponct(list):
try:
for tmp in token_fr_pontuation:
while tmp in list:
list.remove(tmp)
return True, list
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
mycommon.myprint(str(inspect.stack()[0][3]) + " -" + str(e)+" - Line : "+ str(exc_tb.tb_lineno) )
return False, " Impossible Ela_remove_ponct"
'''
Cette fonction supprimer les parasite des listes
comme " ", "]", " ", etc
'''
def Ela_Remove_Noise_from_list(list):
list_noises = ['...', '.', ';', ',', ':', '!', '?', ')', '(', '[', ']',
'{', '}','-', '=', '°', '#', '-', '/', '~', '&', '\\','.', '^', '$', '*', '+',
'?', '{', '}', '[', ']', '|', '(', ')', '-', '>', '<', '@']
for noise in list_noises:
list.remove(noise)
while ' ' in list:
list.remove(' ')
while '[' in list:
list.remove('[')
while ']' in list:
list.remove(']')
while '\\n' in list:
list.remove('\\n')
while ' ' in list:
list.remove(' ')
while '"' in list:
list.remove('"')
while ' ' in list:
list.remove(' ')
while ' ' in list:
list.remove(' ')
return list
'''
Cette fonction supprimer les
patter non pertinents, comme par exemple :
- 21h10
- 1er ou 14ieme
'''
def Ela_Remove_Bad_Pattern(sentence):
try:
text = sentence.lower() # mettre les mots en minuscule
# Retirons les caractères spéciaux :
patter2 = re.compile(r"^([0-9]+)[:]([a-zA-Z0-9èéêë])+$")
patter3 = re.compile(r"^([0-9]+)[hH]([0-9])+$")
patter4 = re.compile(r"^([0-9]+)[a-zA-Z0-9èéêë]+$")
patter5 = re.compile(r"^([0-9]+)+$")
doc = nlp(str(text).lower())
final_text = ""
for val in doc :
print(" str(val) = "+str(val))
val_str = str(val)
val_str = re.sub(patter2, ' ', val_str)
val_str = re.sub(patter3, ' ', val_str)
val_str = re.sub(patter4, ' ', val_str)
val_str = re.sub(patter5, ' ', val_str)
final_text = str(final_text) + " "+str(val_str)
print("final_text = "+str(final_text))
return True, final_text
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
mycommon.myprint(str(inspect.stack()[0][3]) + " -" + str(e)+" - Line : "+ str(exc_tb.tb_lineno) )
return False, " Impossible Ela_Remove_Bad_Pattern"
'''
Cette fonction replace les caractères speciaux et ponctuation par des space
'''
def Ela_Remove_Ponct_Special_Caractere(sentence):
try:
text = sentence.lower() # mettre les mots en minuscule
# Retirons les caractères spéciaux :
text = re.sub(r"[,\!\?\%\(\)\/\"]", " ", text)
text = re.sub(r"\&\S*\s", " ", text)
text = re.sub(r"\-", " ", text)
list_noises = ['...', '.', ';', ',', ':', '!', '?', ')', '(', '[', ']', '\'', '"',
'{', '}', '-', '=', '°', '#', '-', '/', '~', '&', '\\', '.', '^', '$', '*', '+','\\n',
'?', '{', '}', '[', ']', '|', '(', ')', '-', '>', '<', '@']
sentence = text
for noise in list_noises:
sentence = sentence.replace(str(noise), " ")
return True, sentence
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
mycommon.myprint(str(inspect.stack()[0][3]) + " -" + str(e)+" - Line : "+ str(exc_tb.tb_lineno) )
return False, " Impossible Ela_Remove_Ponct_Special_Caractere"
def Ela_Tokenize(sentence):
try:
#print(" Tokenaisee de du mot "+str(sentence))
status, sentence = Ela_Remove_Bad_Pattern(sentence)
if( status is False ):
return False
#print(" AFTER Ela_Remove_Bad_Pattern " + str(sentence))
status, sentence = Ela_Remove_Ponct_Special_Caractere(sentence)
if (status is False):
return False
doc = nlp(str(sentence).lower())
#print(" Tokenize = '" + str(doc)+"' ")
retval = []
for X in doc:
if len(str(unidecode(X.text)).strip()) > 0 :
retval.append( str(X.text).strip())
# Retourner le texte de chaque token
return True, retval
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
mycommon.myprint(str(inspect.stack()[0][3]) + " -" + str(e)+" - Line : "+ str(exc_tb.tb_lineno) )
return False, " Impossible Ela_Tokenize"
'''
2. Enlever les mots les plus fréquents
Certains mots se retrouvent très fréquemment dans la langue française.
En anglais, on les appelle les “stop words”.
Ces mots, bien souvent, napportent pas dinformation dans les tâches suivantes.
exemple :
{'ai', 'aie', 'aient', 'aies', 'ait', 'as', etc}
:return a tab
:input : Tab of tokens
'''
def Ela_remove_stop_words(tab_tokens):
try:
clean_words = []
for token in tab_tokens:
if token not in stopWords:
clean_words.append(token)
return True, clean_words
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
mycommon.myprint(str(inspect.stack()[0][3]) + " -" + str(e)+" - Line : "+ str(exc_tb.tb_lineno) )
return False, " Impossible Ela_remove_stop_words"
'''
3.- Suppression des : ADP : adposition, DET : déterminant, CCONJ : coordination conjonction
La liste est la suivante:
ADJ : adjectif, ADP : adposition, ADV : adverbe, AUX : verbe auxiliaire, CONJ : coordination conjonction, DET : déterminant
INTJ : interjection, NOUN : nom, NUM : chiffre, PART : particule, PRON : pronom, PROPN : nom propre, PUNCT : ponctuation
SCONJ : conjonction subordonnée, SYM : symbole, VERB : verbe, X : autre
'''
def Ela_remove_pronoun(tab_tokens):
try:
mywords = []
for token in tab_tokens:
mytok = nlp(str(token).lower())
for token2 in mytok:
if token2.pos_ != 'DET' and token2.pos_ != 'CCONJ' and token2.pos_ != 'ADP':
mywords.append(str(mytok))
return True, mywords
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
mycommon.myprint(str(inspect.stack()[0][3]) + " -" + str(e)+" - Line : "+ str(exc_tb.tb_lineno) )
return False, " Impossible Ela_remove_pronoun"
'''
# 4. Stemming
Le stemming consiste à réduire un mot dans sa forme “racine”. Le but du stemming est de regrouper de nombreuses variantes
dun mot comme un seul et même mot. Par exemple, une fois que lon applique un stemming sur “Chiens” ou “Chien”,
le mot résultant est le même.
important :
seuls les mots français sont stemmizé. les autres non.
'''
def Ela_stemmize(tab_tokens):
try:
tab_ret_val = []
#print(" VERIFICATION SI LE MOT EST FR : " + str(tab_tokens))
for mot in tab_tokens:
if( mycommon.check_word_in_fr_dict(str(mot)) ):
if( type(mot) is str ):
tab_ret_val.append(stemmer.stem(mot))
else:
tab_ret_val.append(stemmer.stem(mot.text))
else:
tab_ret_val.append(mot)
#print(" STMISATION TAB = "+str(tab_ret_val))
return tab_ret_val
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
mycommon.myprint(str(inspect.stack()[0][3]) + " -" + str(e)+" - Line : "+ str(exc_tb.tb_lineno) )
return False, " Impossible Ela_stemmize"
#return [stemmer.stem(token.text) for token in tab_tokens]
'''
Dans le cas de la stemisation d'une
formation, il ne faut pas changer les mots du formateur
ou du documents de formation.
==> Ici pas de correction avant stem.
Aussi, on fait le choix de liberé de ne pas stémiser certain mots'''
def Ela_stemmize_Class(tab_tokens):
try:
tab_ret_val = []
#print(" VERIFICATION SI LE MOT DOIT ETRE STEMISE _ CLASS: " + str(tab_tokens))
for mot in tab_tokens:
if( mycommon.Word_Not_Stemmize(str(mot)) ):
tab_ret_val.append(unidecode(mot))
else:
print(" AVANT STEM MOT ="+str(mot))
tab_ret_val.append( unidecode( str(stemmer.stem(mot))))
print(" AVANT STEM MOT =" + unidecode( str(stemmer.stem(mot))))
#print(" STMISATION TAB = "+str(tab_ret_val))
return tab_ret_val
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
mycommon.myprint(str(inspect.stack()[0][3]) + " -" + str(e)+" - Line : "+ str(exc_tb.tb_lineno) )
return False, " Impossible de Ela_stemmize_Class"
#return [stemmer.stem(token.text) for token in tab_tokens]
'''
Dans le cas d'un recherche utilsateur, il y a de forte
chance que fasse une mauvaise saisie,
==> Donc avant de faire le stem des mots, il faut faire une correction
orthographique
Aussi, on fait le choix de liberé de ne pas stémiser certain mots'''
def Ela_stemmize_search(tab_tokens):
try:
tab_ret_val = []
print(" VERIFICATION SI LE MOT DOIT ETRE STEMISE: " + str(tab_tokens))
for mot in tab_tokens:
if (mycommon.Word_Not_Stemmize(str(mot))):
tab_ret_val.append(unidecode(mot))
else:
'''
On fait la correction orthographe avant
'''
print("AVANT COORECTION ORH ="+str(mot)+" ==> APRES = "+unidecode(str(spell.correction(mot))))
corrected_str = str(spell.correction(mot))
tab_ret_val.append( unidecode (str(stemmer.stem(corrected_str))))
print(" STMISATION TAB = "+str(tab_ret_val))
return tab_ret_val
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
mycommon.myprint(str(inspect.stack()[0][3]) + " -" + str(e)+" - Line : "+ str(exc_tb.tb_lineno) )
return False, " Impossible de Ela_stemmize_search"
#return [stemmer.stem(token.text) for token in tab_tokens]
'''
Indexation et enregistrement d'un token
IMPORTANT : Pour mutualiser le process entre l'indexation des tous les champs ou l'indexation de uniquement de certain champs
ex : indexation du champ 'title' ou du champ 'objectif', je vais introduitre la notion de 'source_fied'
pour l'indexation title, 'source_fied' = 'title'
pour l'indexation objectif, 'source_fied' = 'objectif'
'''
def ela_index_record_field(lines, class_id, source_field = ""):
try:
'''
## Suppression de toutes les indexation qui existe de cette formation
'''
client = MongoClient(CONNECTION_STRING)
dbname = client['cherifdb']
coll_name = dbname['elaindex']
# check default value of parameter : source_field
if len(str(source_field)) == 0 :
source_field = 'default'
myquery = {"id_formation": class_id, "source_field":source_field}
delete_row = coll_name.delete_many(myquery)
mycommon.myprint(" elaindex - "+str(delete_row.deleted_count)+" documents deleted. Training ==> "+str(class_id)+" ")
'''
Ajout des indexe
'''
status, tab_tokens = Ela_Tokenize(lines)
if( status is False):
return False
#print(" AFFICHAGE TAB TOKEN")
#print(tab_tokens)
#print(" FINNN TAB TOKEN")
status, tab_tokens2 = Ela_remove_stop_words(tab_tokens)
if (status is False):
return False
status, tab_tokens3 = Ela_remove_pronoun(tab_tokens2)
if (status is False):
return False
tab_tokens4 = Ela_stemmize_Class(tab_tokens3)
#tab_tokens4 = Ela_Remove_Noise_from_list(tab_tokens4)
#tab_tokens4 = Ela_remove_ponct(tab_tokens4)
# Enregistrement dans la base mongodb
Ela_list_to_mongo(tab_tokens4,class_id, source_field)
size_tab = len(tab_tokens4)
print("size_tab = " + str(size_tab))
occurrences = Counter(tab_tokens4)
most_common = occurrences.most_common()
print(most_common)
return True
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
mycommon.myprint(str(inspect.stack()[0][3]) + " -" + str(e) + " - Line : " + str(exc_tb.tb_lineno))
return False
def Ela_ntlk(mysentence, traning_id):
tab_tokens = Ela_Tokenize(mysentence)
tab_tokens2 = Ela_remove_stop_words(tab_tokens)
tab_tokens3 = Ela_remove_pronoun(tab_tokens2)
tab_tokens4 = Ela_stemmize_Class(tab_tokens3)
print("Ela_ntlk = "+str(tab_tokens4))
## Utiliser penda pour la manipulation des moyennes, sommes, etc
return tab_tokens4.sort()
def Ela_list_to_mongo(tab_tokens, traning_id, source_field):
try:
# clear / truncate elaindex
#db.elaindex.remove({})
# Recherche des occurence d'une valeur
my_file = open("ela_output_test_file_pandas_2.txt", "w")
size_tab = len(tab_tokens)
occurrences = Counter(tab_tokens)
# insert the list to the set
list_set = set(tab_tokens)
# convert the set to the list
unique_list = (list(list_set))
final_lists = []
for tmp in unique_list:
moyenne = round(int(occurrences[str(tmp)]) / size_tab, 2)
final_lists.append([str(tmp), int(str(occurrences[str(tmp)])), str(moyenne), str(traning_id), str(source_field)])
print(final_lists)
data = pd.DataFrame(final_lists, columns=('mots', "occurence", 'moyenne', 'id_formation', 'source_field'))
my_file.write(str(data))
my_file.close()
# Making a Connection with MongoClient
client = MongoClient("mongodb://localhost:27017/")
# database
db = client["cherifdb"]
# collection
collection = db["elaindex"]
data.reset_index(inplace=True)
data_dict = data.to_dict("records")
print(data_dict)
# Insert collection
collection.insert_many(data_dict)
data.to_csv("data_indexees.csv")
return True
except Exception as e :
mycommon.myprint(e)
return False
def test_ela_myntlk():
tab = []
tab_tokens = Ela_Tokenize(sentence)
tab_tokens2 = Ela_remove_stop_words(tab_tokens)
tab_tokens3 = Ela_remove_pronoun(tab_tokens2)
tab_tokens4 = Ela_stemmize_Class(tab_tokens3)
tab_tokens4.sort()
Ela_list_to_mongo(tab_tokens4, 'Tid_3245')
exit()
'''
my_file = open("ela_output_test_file_pandas.txt", "w")
# Recherche des occurence d'une valeur
size_tab = len(tab_tokens4)
print("size_tab = "+str(size_tab))
occurrences = Counter(tab_tokens4)
# insert the list to the set
list_set = set(tab_tokens4)
# convert the set to the list
unique_list = (list(list_set))
final_lists = []
print(" MOT ==> Occurrence ==> Moyenne ")
for tmp in unique_list :
moyenne = round(int(occurrences[str(tmp)])/size_tab, 2)
#print(str(tmp)+" ==> "+str(occurrences[str(tmp)])+" ==> "+str(moyenne))
list_tmp = [tmp, str(occurrences[str(tmp)]), str(moyenne) ]
#print(list_tmp)
final_lists.append([str(tmp),str(occurrences[str(tmp)]),str(moyenne)])
print(" Finals Liste ")
my_file.write("\nWorld\n")
print(final_lists)
data = pd.DataFrame(final_lists, columns={'mots',"occurence",'moyenne'})
my_file.write(str("\n############ occurrences ############# \n"))
my_file.write(str(data))
most_common = occurrences.most_common()
print(most_common)
my_file.close()
'''
def ela_index_article_avis_record_field(lines, article_avis_id, source_field = ""):
try:
'''
## Suppression de toutes les indexation qui existe de cette formation
'''
client = MongoClient(CONNECTION_STRING)
dbname = client['cherifdb']
coll_name = dbname['elaindex_article_avis']
# check default value of parameter : source_field
if len(str(source_field)) == 0 :
source_field = 'default'
myquery = {"id_articles_avis": article_avis_id, "source_field":source_field}
delete_row = coll_name.delete_many(myquery)
mycommon.myprint(" elaindex Article AVIS : - "+str(delete_row.deleted_count)+" documents deleted. Article_Avis ==> "+str(article_avis_id)+" ")
'''
Ajout des indexe
'''
tab_tokens = Ela_Tokenize(lines)
tab_tokens2 = Ela_remove_stop_words(tab_tokens)
tab_tokens3 = Ela_remove_pronoun(tab_tokens2)
tab_tokens4 = Ela_stemmize_Class(tab_tokens3)
#tab_tokens4 = Ela_Remove_Noise_from_list(tab_tokens4)
#tab_tokens4 = Ela_remove_ponct(tab_tokens4)
# Enregistrement dans la base mongodb
Ela_article_avis_list_to_mongo(tab_tokens4, article_avis_id, source_field)
size_tab = len(tab_tokens4)
occurrences = Counter(tab_tokens4)
most_common = occurrences.most_common()
print(most_common)
return True
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
mycommon.myprint(str(inspect.stack()[0][3]) + " -" + str(e) + " - Line : " + str(exc_tb.tb_lineno))
return False
def Ela_article_avis_list_to_mongo(tab_tokens, traning_id, source_field):
try:
# clear / truncate elaindex
#db.elaindex.remove({})
# Recherche des occurence d'une valeur
my_file = open("ela_output_article_file_pandas_2.txt", "w")
size_tab = len(tab_tokens)
occurrences = Counter(tab_tokens)
# insert the list to the set
list_set = set(tab_tokens)
# convert the set to the list
unique_list = (list(list_set))
final_lists = []
for tmp in unique_list:
moyenne = round(int(occurrences[str(tmp)]) / size_tab, 2)
final_lists.append([str(tmp), int(str(occurrences[str(tmp)])), str(moyenne), str(traning_id), str(source_field)])
print(final_lists)
data = pd.DataFrame(final_lists, columns=('mots', "occurence", 'moyenne', 'id_article_avis', 'source_field'))
my_file.write(str(data))
my_file.close()
# Making a Connection with MongoClient
client = MongoClient("mongodb://localhost:27017/")
# database
db = client["cherifdb"]
# collection
collection = db["elaindex_article_avis"]
data.reset_index(inplace=True)
data_dict = data.to_dict("records")
# Insert collection
collection.insert_many(data_dict)
data.to_csv("data_indexees_article_avis.csv")
return True
except Exception as e :
exc_type, exc_obj, exc_tb = sys.exc_info()
mycommon.myprint(str(inspect.stack()[0][3]) + " -" + str(e) + " - Line : " + str(exc_tb.tb_lineno))
return False