27/04/22 - 13h30
parent
40ea13cfb2
commit
4d16f4039f
90
Ela_Spacy.py
90
Ela_Spacy.py
|
@ -131,7 +131,7 @@ def Ela_remove_ponct(list):
|
|||
'''
|
||||
Cette fonction supprimer les parasite des listes
|
||||
comme " ", "]", " ", etc
|
||||
'''
|
||||
|
||||
def Ela_Remove_Noise_from_list(list):
|
||||
|
||||
list_noises = ['...', '.', ';', ',', ':', '!', '?', ')', '(', '[', ']',
|
||||
|
@ -166,7 +166,7 @@ def Ela_Remove_Noise_from_list(list):
|
|||
list.remove(' ')
|
||||
|
||||
return list
|
||||
|
||||
'''
|
||||
|
||||
'''
|
||||
Cette fonction supprimer les
|
||||
|
@ -174,29 +174,39 @@ patter non pertinents, comme par exemple :
|
|||
- 21h10
|
||||
- 1er ou 14ieme
|
||||
|
||||
Aussi, cette fonction permet de supprimer de l'indexation les mots ayant MOINS de 3 caractères.
|
||||
'''
|
||||
def Ela_Remove_Bad_Pattern(sentence):
|
||||
try:
|
||||
text = sentence.lower() # mettre les mots en minuscule
|
||||
# Retirons les caractères spéciaux :
|
||||
|
||||
patter2 = re.compile(r"^([0-9]+)[:]([a-zA-Z0-9èéêë])+$")
|
||||
patter3 = re.compile(r"^([0-9]+)[hH]([0-9])+$")
|
||||
patter4 = re.compile(r"^([0-9]+)[a-zA-Z0-9èéêë]+$")
|
||||
patter5 = re.compile(r"^([0-9]+)+$")
|
||||
# Gestion des heures du types : 3:30
|
||||
patter2 = re.compile(r"([0-9]+)[:]([a-zA-Z0-9èéêë])+")
|
||||
|
||||
# Gestion des heures du types : 3h30min
|
||||
patter3 = re.compile(r"([0-9]+)[hH]([0-9])+")
|
||||
|
||||
# Gestion des 'rang' du types : 1ere , 14ième, etc
|
||||
patter4 = re.compile(r"([0-9]+)[a-zA-Z0-9èéêë]+")
|
||||
|
||||
# Gestion des chiffres, car non indexés
|
||||
patter5 = re.compile(r"([0-9]+)+")
|
||||
|
||||
doc = nlp(str(text).lower())
|
||||
final_text = ""
|
||||
for val in doc :
|
||||
print(" str(val) = "+str(val))
|
||||
#print(" str(val) = '"+str(val)+"' ")
|
||||
val_str = str(val)
|
||||
val_str = re.sub(patter2, ' ', val_str)
|
||||
val_str = re.sub(patter3, ' ', val_str)
|
||||
val_str = re.sub(patter4, ' ', val_str)
|
||||
val_str = re.sub(patter5, ' ', val_str)
|
||||
final_text = str(final_text) + " "+str(val_str)
|
||||
|
||||
print("final_text = "+str(final_text))
|
||||
if( len(val_str) >= 3):
|
||||
val_str = re.sub(patter2, ' ', val_str)
|
||||
val_str = re.sub(patter3, ' ', val_str)
|
||||
val_str = re.sub(patter4, ' ', val_str)
|
||||
val_str = re.sub(patter5, ' ', val_str)
|
||||
final_text = str(final_text) + " "+str(val_str)
|
||||
|
||||
#print("final_text = "+str(final_text))
|
||||
return True, final_text
|
||||
|
||||
except Exception as e:
|
||||
|
@ -218,14 +228,16 @@ def Ela_Remove_Ponct_Special_Caractere(sentence):
|
|||
text = re.sub(r"\&\S*\s", " ", text)
|
||||
text = re.sub(r"\-", " ", text)
|
||||
|
||||
list_noises = ['...', '.', ';', ',', ':', '!', '?', ')', '(', '[', ']', '\'', '"',
|
||||
list_noises = ['...', '.', ';', ',', ':', '!', '?', ')', '(', '[', ']', '\'', '"', '’', '`','©', '–',
|
||||
'{', '}', '-', '=', '°', '#', '-', '/', '~', '&', '\\', '.', '^', '$', '*', '+','\\n',
|
||||
'?', '{', '}', '[', ']', '|', '(', ')', '-', '>', '<', '@']
|
||||
'?', '{', '}', '[', ']', '|', '(', ')', '-', '>', '<', '@','®', '™', '«', '»']
|
||||
|
||||
sentence = text
|
||||
for noise in list_noises:
|
||||
#print(" suppression de : '"+str(noise)+"' ")
|
||||
sentence = sentence.replace(str(noise), " ")
|
||||
|
||||
print(" AFTER REPLACE NOISES = "+str(sentence))
|
||||
return True, sentence
|
||||
|
||||
except Exception as e:
|
||||
|
@ -238,6 +250,13 @@ def Ela_Remove_Ponct_Special_Caractere(sentence):
|
|||
def Ela_Tokenize(sentence):
|
||||
try:
|
||||
#print(" Tokenaisee de du mot "+str(sentence))
|
||||
'''
|
||||
Cas particulier :
|
||||
Il arrive que les ponctuations ne soient pas respectées, comme par exemple : blabla.Blabla,sdslk
|
||||
Pour y remedier, toutes les poncutations seront remplacées par "espace"<poncutation>"espace".
|
||||
Donc la fr
|
||||
|
||||
'''
|
||||
status, sentence = Ela_Remove_Bad_Pattern(sentence)
|
||||
if( status is False ):
|
||||
return False
|
||||
|
@ -342,7 +361,7 @@ def Ela_stemmize(tab_tokens):
|
|||
|
||||
#print(" STMISATION TAB = "+str(tab_ret_val))
|
||||
|
||||
return tab_ret_val
|
||||
return True, tab_ret_val
|
||||
except Exception as e:
|
||||
exc_type, exc_obj, exc_tb = sys.exc_info()
|
||||
mycommon.myprint(str(inspect.stack()[0][3]) + " -" + str(e)+" - Line : "+ str(exc_tb.tb_lineno) )
|
||||
|
@ -371,9 +390,9 @@ def Ela_stemmize_Class(tab_tokens):
|
|||
if( mycommon.Word_Not_Stemmize(str(mot)) ):
|
||||
tab_ret_val.append(unidecode(mot))
|
||||
else:
|
||||
print(" AVANT STEM MOT ="+str(mot))
|
||||
#print(" AVANT STEM MOT ="+str(mot))
|
||||
tab_ret_val.append( unidecode( str(stemmer.stem(mot))))
|
||||
print(" AVANT STEM MOT =" + unidecode( str(stemmer.stem(mot))))
|
||||
#print(" AVANT STEM MOT =" + unidecode( str(stemmer.stem(mot))))
|
||||
|
||||
#print(" STMISATION TAB = "+str(tab_ret_val))
|
||||
|
||||
|
@ -411,7 +430,7 @@ def Ela_stemmize_search(tab_tokens):
|
|||
|
||||
print(" STMISATION TAB = "+str(tab_ret_val))
|
||||
|
||||
return tab_ret_val
|
||||
return True, tab_ret_val
|
||||
|
||||
except Exception as e:
|
||||
exc_type, exc_obj, exc_tb = sys.exc_info()
|
||||
|
@ -468,7 +487,9 @@ def ela_index_record_field(lines, class_id, source_field = ""):
|
|||
if (status is False):
|
||||
return False
|
||||
|
||||
tab_tokens4 = Ela_stemmize_Class(tab_tokens3)
|
||||
status, tab_tokens4 = Ela_stemmize_Class(tab_tokens3)
|
||||
if (status is False):
|
||||
return False
|
||||
|
||||
#tab_tokens4 = Ela_Remove_Noise_from_list(tab_tokens4)
|
||||
#tab_tokens4 = Ela_remove_ponct(tab_tokens4)
|
||||
|
@ -477,7 +498,7 @@ def ela_index_record_field(lines, class_id, source_field = ""):
|
|||
Ela_list_to_mongo(tab_tokens4,class_id, source_field)
|
||||
|
||||
size_tab = len(tab_tokens4)
|
||||
print("size_tab = " + str(size_tab))
|
||||
#print("size_tab = " + str(size_tab))
|
||||
occurrences = Counter(tab_tokens4)
|
||||
most_common = occurrences.most_common()
|
||||
print(most_common)
|
||||
|
@ -490,7 +511,7 @@ def ela_index_record_field(lines, class_id, source_field = ""):
|
|||
return False
|
||||
|
||||
|
||||
|
||||
'''
|
||||
def Ela_ntlk(mysentence, traning_id):
|
||||
tab_tokens = Ela_Tokenize(mysentence)
|
||||
tab_tokens2 = Ela_remove_stop_words(tab_tokens)
|
||||
|
@ -501,7 +522,7 @@ def Ela_ntlk(mysentence, traning_id):
|
|||
|
||||
## Utiliser penda pour la manipulation des moyennes, sommes, etc
|
||||
return tab_tokens4.sort()
|
||||
|
||||
'''
|
||||
|
||||
def Ela_list_to_mongo(tab_tokens, traning_id, source_field):
|
||||
try:
|
||||
|
@ -551,7 +572,7 @@ def Ela_list_to_mongo(tab_tokens, traning_id, source_field):
|
|||
|
||||
|
||||
|
||||
|
||||
'''
|
||||
def test_ela_myntlk():
|
||||
|
||||
tab = []
|
||||
|
@ -563,6 +584,7 @@ def test_ela_myntlk():
|
|||
tab_tokens4.sort()
|
||||
Ela_list_to_mongo(tab_tokens4, 'Tid_3245')
|
||||
exit()
|
||||
'''
|
||||
|
||||
'''
|
||||
my_file = open("ela_output_test_file_pandas.txt", "w")
|
||||
|
@ -621,10 +643,22 @@ def ela_index_article_avis_record_field(lines, article_avis_id, source_field = "
|
|||
'''
|
||||
Ajout des indexe
|
||||
'''
|
||||
tab_tokens = Ela_Tokenize(lines)
|
||||
tab_tokens2 = Ela_remove_stop_words(tab_tokens)
|
||||
tab_tokens3 = Ela_remove_pronoun(tab_tokens2)
|
||||
tab_tokens4 = Ela_stemmize_Class(tab_tokens3)
|
||||
status, tab_tokens = Ela_Tokenize(lines)
|
||||
if( status is False):
|
||||
return False
|
||||
|
||||
status, tab_tokens2 = Ela_remove_stop_words(tab_tokens)
|
||||
if (status is False):
|
||||
return False
|
||||
|
||||
status, tab_tokens3 = Ela_remove_pronoun(tab_tokens2)
|
||||
if (status is False):
|
||||
return False
|
||||
|
||||
status, tab_tokens4 = Ela_stemmize_Class(tab_tokens3)
|
||||
if (status is False):
|
||||
return False
|
||||
|
||||
|
||||
#tab_tokens4 = Ela_Remove_Noise_from_list(tab_tokens4)
|
||||
#tab_tokens4 = Ela_remove_ponct(tab_tokens4)
|
||||
|
|
|
@ -1,39 +1,13 @@
|
|||
,index,mots,occurence,moyenne,id_formation,source_field
|
||||
0,0,"
|
||||
",8,0.15,mysy_ytubes_04,description
|
||||
1,1,depart,1,0.02,mysy_ytubes_04,description
|
||||
2,2,resultat,1,0.02,mysy_ytubes_04,description
|
||||
3,3,dcg,1,0.02,mysy_ytubes_04,description
|
||||
4,4,bts,1,0.02,mysy_ytubes_04,description
|
||||
5,5,"
|
||||
|
||||
",1,0.02,mysy_ytubes_04,description
|
||||
6,6,gratuit,1,0.02,mysy_ytubes_04,description
|
||||
7,7,marg,1,0.02,mysy_ytubes_04,description
|
||||
8,8,analys,1,0.02,mysy_ytubes_04,description
|
||||
9,9,notion,1,0.02,mysy_ytubes_04,description
|
||||
10,10,paris,1,0.02,mysy_ytubes_04,description
|
||||
11,11,villetaneuse,1,0.02,mysy_ytubes_04,description
|
||||
12,12,iut,1,0.02,mysy_ytubes_04,description
|
||||
13,13,zambotto,1,0.02,mysy_ytubes_04,description
|
||||
14,14,cout,3,0.06,mysy_ytubes_04,description
|
||||
15,15,cour,3,0.06,mysy_ytubes_04,description
|
||||
16,16,different,1,0.02,mysy_ytubes_04,description
|
||||
17,17,licenc,1,0.02,mysy_ytubes_04,description
|
||||
18,18,calcul,1,0.02,mysy_ytubes_04,description
|
||||
19,19,professeur,1,0.02,mysy_ytubes_04,description
|
||||
20,20,corinne,1,0.02,mysy_ytubes_04,description
|
||||
21,21,lign,1,0.02,mysy_ytubes_04,description
|
||||
22,22,universit,1,0.02,mysy_ytubes_04,description
|
||||
23,23,charg,1,0.02,mysy_ytubes_04,description
|
||||
24,24,nord,1,0.02,mysy_ytubes_04,description
|
||||
25,25,comptabilit,4,0.08,mysy_ytubes_04,description
|
||||
26,26,sorbonne,1,0.02,mysy_ytubes_04,description
|
||||
27,27,general,1,0.02,mysy_ytubes_04,description
|
||||
28,28,debut,1,0.02,mysy_ytubes_04,description
|
||||
29,29,stmg,1,0.02,mysy_ytubes_04,description
|
||||
30,30,niveau,1,0.02,mysy_ytubes_04,description
|
||||
31,31,prix,1,0.02,mysy_ytubes_04,description
|
||||
32,32,incorporees,1,0.02,mysy_ytubes_04,description
|
||||
33,33,gea,2,0.04,mysy_ytubes_04,description
|
||||
34,34,gestion,3,0.06,mysy_ytubes_04,description
|
||||
0,0,regl,1,0.08,8866,objectif
|
||||
1,1,const,1,0.08,8866,objectif
|
||||
2,2,object,1,0.08,8866,objectif
|
||||
3,3,publiqu,1,0.08,8866,objectif
|
||||
4,4,format,1,0.08,8866,objectif
|
||||
5,5,comptabl,1,0.08,8866,objectif
|
||||
6,6,impos,1,0.08,8866,objectif
|
||||
7,7,evolu,1,0.08,8866,objectif
|
||||
8,8,reglement,1,0.08,8866,objectif
|
||||
9,9,appliqu,1,0.08,8866,objectif
|
||||
10,10,maitris,1,0.08,8866,objectif
|
||||
11,11,princip,1,0.08,8866,objectif
|
||||
|
|
|
|
@ -440,7 +440,9 @@ def ela_recherche_tokens(sentence):
|
|||
'''
|
||||
print(" VERIF : "+str(tab_tokens3))
|
||||
|
||||
tab_tokens4 = ls.Ela_stemmize_search(tab_tokens3)
|
||||
status, tab_tokens4 = ls.Ela_stemmize_search(tab_tokens3)
|
||||
if( status is False):
|
||||
return False
|
||||
|
||||
print(" VERIF APRES STEMISATION : " + str(tab_tokens4))
|
||||
|
||||
|
@ -504,10 +506,24 @@ def ela_recherche_tokens_source_field(sentence, source_fied=""):
|
|||
|
||||
print(" ici: sentence = "+sentence+", -- source_fied ="+source_fied)
|
||||
tab_training_id = []
|
||||
tab_tokens = ls.Ela_Tokenize(sentence)
|
||||
tab_tokens2 = ls.Ela_remove_stop_words(tab_tokens)
|
||||
tab_tokens3 = ls.Ela_remove_pronoun(tab_tokens2)
|
||||
tab_tokens4 = ls.Ela_stemmize(tab_tokens3)
|
||||
status, tab_tokens = ls.Ela_Tokenize(sentence)
|
||||
if( status is False):
|
||||
return False
|
||||
|
||||
status, tab_tokens2 = ls.Ela_remove_stop_words(tab_tokens)
|
||||
if (status is False):
|
||||
return False
|
||||
|
||||
status, tab_tokens3 = ls.Ela_remove_pronoun(tab_tokens2)
|
||||
if (status is False):
|
||||
return False
|
||||
|
||||
status, tab_tokens4 = ls.Ela_stemmize_search(tab_tokens3)
|
||||
if (status is False):
|
||||
return False
|
||||
|
||||
|
||||
|
||||
tab_tokens4.sort()
|
||||
|
||||
'''
|
||||
|
@ -518,7 +534,6 @@ def ela_recherche_tokens_source_field(sentence, source_fied=""):
|
|||
|
||||
collection = db["elaindex"]
|
||||
|
||||
|
||||
for token in tab_tokens4:
|
||||
print(" #### Token rechercher dans l'index est : '"+str(token)+"' et le source_field = '"+str(source_fied)+"' ")
|
||||
for doc in collection.find({"mots":token, "source_field":source_fied}):
|
||||
|
@ -651,7 +666,9 @@ def ela_recherche_article_avis_tokens(sentence):
|
|||
|
||||
print("corrected word = " + str(tab_corrected_word))
|
||||
|
||||
tab_tokens4 = ls.Ela_stemmize(tab_corrected_word)
|
||||
status, tab_tokens4 = ls.Ela_stemmize(tab_corrected_word)
|
||||
if( status is False):
|
||||
return False
|
||||
|
||||
print(" VERIF APRES STEMISATION : " + str(tab_tokens4))
|
||||
|
||||
|
|
|
@ -1,36 +1,13 @@
|
|||
mots occurence moyenne id_formation source_field
|
||||
0 \n 8 0.15 mysy_ytubes_04 description
|
||||
1 depart 1 0.02 mysy_ytubes_04 description
|
||||
2 resultat 1 0.02 mysy_ytubes_04 description
|
||||
3 dcg 1 0.02 mysy_ytubes_04 description
|
||||
4 bts 1 0.02 mysy_ytubes_04 description
|
||||
5 \n\n 1 0.02 mysy_ytubes_04 description
|
||||
6 gratuit 1 0.02 mysy_ytubes_04 description
|
||||
7 marg 1 0.02 mysy_ytubes_04 description
|
||||
8 analys 1 0.02 mysy_ytubes_04 description
|
||||
9 notion 1 0.02 mysy_ytubes_04 description
|
||||
10 paris 1 0.02 mysy_ytubes_04 description
|
||||
11 villetaneuse 1 0.02 mysy_ytubes_04 description
|
||||
12 iut 1 0.02 mysy_ytubes_04 description
|
||||
13 zambotto 1 0.02 mysy_ytubes_04 description
|
||||
14 cout 3 0.06 mysy_ytubes_04 description
|
||||
15 cour 3 0.06 mysy_ytubes_04 description
|
||||
16 different 1 0.02 mysy_ytubes_04 description
|
||||
17 licenc 1 0.02 mysy_ytubes_04 description
|
||||
18 calcul 1 0.02 mysy_ytubes_04 description
|
||||
19 professeur 1 0.02 mysy_ytubes_04 description
|
||||
20 corinne 1 0.02 mysy_ytubes_04 description
|
||||
21 lign 1 0.02 mysy_ytubes_04 description
|
||||
22 universit 1 0.02 mysy_ytubes_04 description
|
||||
23 charg 1 0.02 mysy_ytubes_04 description
|
||||
24 nord 1 0.02 mysy_ytubes_04 description
|
||||
25 comptabilit 4 0.08 mysy_ytubes_04 description
|
||||
26 sorbonne 1 0.02 mysy_ytubes_04 description
|
||||
27 general 1 0.02 mysy_ytubes_04 description
|
||||
28 debut 1 0.02 mysy_ytubes_04 description
|
||||
29 stmg 1 0.02 mysy_ytubes_04 description
|
||||
30 niveau 1 0.02 mysy_ytubes_04 description
|
||||
31 prix 1 0.02 mysy_ytubes_04 description
|
||||
32 incorporees 1 0.02 mysy_ytubes_04 description
|
||||
33 gea 2 0.04 mysy_ytubes_04 description
|
||||
34 gestion 3 0.06 mysy_ytubes_04 description
|
||||
mots occurence moyenne id_formation source_field
|
||||
0 regl 1 0.08 8866 objectif
|
||||
1 const 1 0.08 8866 objectif
|
||||
2 object 1 0.08 8866 objectif
|
||||
3 publiqu 1 0.08 8866 objectif
|
||||
4 format 1 0.08 8866 objectif
|
||||
5 comptabl 1 0.08 8866 objectif
|
||||
6 impos 1 0.08 8866 objectif
|
||||
7 evolu 1 0.08 8866 objectif
|
||||
8 reglement 1 0.08 8866 objectif
|
||||
9 appliqu 1 0.08 8866 objectif
|
||||
10 maitris 1 0.08 8866 objectif
|
||||
11 princip 1 0.08 8866 objectif
|
|
@ -14,6 +14,8 @@ import inspect
|
|||
import sys
|
||||
from datetime import datetime
|
||||
from pymongo import ReturnDocument
|
||||
from unidecode import unidecode
|
||||
|
||||
|
||||
TOKEN_SIZE = 25
|
||||
CONNECTION_STRING = "mongodb://localhost/cherifdb"
|
||||
|
@ -428,3 +430,39 @@ def check_source_ipv4(source_ip=None):
|
|||
exc_type, exc_obj, exc_tb = sys.exc_info()
|
||||
myprint(str(inspect.stack()[0][3]) + " -" + str(e) + " - Line : " + str(exc_tb.tb_lineno))
|
||||
return False
|
||||
|
||||
|
||||
'''
|
||||
Dans le cadre de la recherche, l'expression saisit par l'utilisateur dans
|
||||
la search bar doit etre nettoyé, traité avant de rentrer dans le process.
|
||||
|
||||
Puis la phrase est renvoyée en mode "unicode"
|
||||
'''
|
||||
def Parse_Clean_Search_Text(sentence=None):
|
||||
try:
|
||||
if (len(str(sentence)) <= 0 ):
|
||||
return False, ""
|
||||
|
||||
'''
|
||||
/!\ : On supprime tous les caratère "spaciaux" et ponctuation EXCEPTE
|
||||
- le ":" dont on a besoin pour identifier les patterns et
|
||||
- le ' " ' dont on a besoin pour identifier les patterns
|
||||
'''
|
||||
list_noises = ['...', '.', ';', ',', '!', '?', ')', '(', '[', ']', '\'', '’', '`', '©', '–',
|
||||
'{', '}', '-', '=', '°', '#', '-', '/', '~', '&', '\\', '.', '^', '$', '*', '+', '\\n',
|
||||
'?', '{', '}', '[', ']', '|', '(', ')', '-', '>', '<', '@', '®', '™', '«', '»']
|
||||
|
||||
for noise in list_noises:
|
||||
# print(" suppression de : '"+str(noise)+"' ")
|
||||
sentence = sentence.replace(str(noise), " ")
|
||||
|
||||
unicode_sentence = unidecode(sentence)
|
||||
|
||||
|
||||
return True, unicode_sentence
|
||||
|
||||
except Exception as e:
|
||||
exc_type, exc_obj, exc_tb = sys.exc_info()
|
||||
myprint(str(inspect.stack()[0][3]) + " -" + str(e) + " - Line : " + str(exc_tb.tb_lineno))
|
||||
return False, ""
|
||||
|
||||
|
|
12
wrapper.py
12
wrapper.py
|
@ -18,6 +18,7 @@ import inspect
|
|||
import sys, os
|
||||
import csv
|
||||
import pandas as pd
|
||||
from unidecode import unidecode
|
||||
|
||||
|
||||
|
||||
|
@ -380,9 +381,16 @@ def recherche_text_simple(diction):
|
|||
Si c'est le cas, nous sommes dans le cadre d'un recherche par type
|
||||
|
||||
'''
|
||||
regexp = r"[\w\.-]+:\"[\w\s]*\""
|
||||
|
||||
tips = re.findall(regexp, search_text, re.MULTILINE)
|
||||
cleaned_search_text = mycommon.Parse_Clean_Search_Text(search_text)
|
||||
|
||||
print(" NOT CLEANED search_text = " + str(search_text))
|
||||
print(" CLEANED search_text = "+str(cleaned_search_text))
|
||||
|
||||
|
||||
|
||||
regexp = r"[\w\.-]+:\"[\w\s]*\""
|
||||
tips = re.findall(regexp, str(cleaned_search_text), re.MULTILINE)
|
||||
nb_tips = len(tips)
|
||||
final_message3 = {}
|
||||
|
||||
|
|
Loading…
Reference in New Issue