887 lines
28 KiB
Python
887 lines
28 KiB
Python
import datetime
|
||
import pandas as pd
|
||
import numpy as np
|
||
import pymongo
|
||
from pymongo import MongoClient
|
||
from collections import Counter
|
||
|
||
import prj_common as mycommon
|
||
from unidecode import unidecode
|
||
import inspect
|
||
import sys, os
|
||
from autocorrect import Speller
|
||
from datetime import datetime
|
||
import re
|
||
import GlobalVariable as MYSY_GV
|
||
|
||
|
||
|
||
|
||
|
||
#print("token_fr_pontuation")
|
||
#print(token_fr_pontuation)
|
||
|
||
'''
|
||
initialisation
|
||
|
||
'''
|
||
|
||
|
||
'''
|
||
1. Tokenisation
|
||
La tokenisation cherche à transformer un texte en une série de tokens individuels.
|
||
Egalement il supprime les mots avec des accents (à, é, etc)
|
||
|
||
:return a tab
|
||
'''
|
||
|
||
def Ela_Normalize(sentence):
|
||
|
||
sentence = str(sentence).replace(",", " ")
|
||
sentence = str(sentence).replace(";", " ")
|
||
sentence = str(sentence).replace(".", " ")
|
||
sentence = str(sentence).replace("'", " ")
|
||
|
||
return sentence
|
||
|
||
'''
|
||
Cette fonction prend un mot et retourne
|
||
sa correction orthographique ne français
|
||
'''
|
||
def correct_fr_word(word):
|
||
|
||
try:
|
||
mydata = {}
|
||
|
||
print(" Fonction : correct_fr_word : '"+word+"' =======> "+MYSY_GV.spell_fr(word))
|
||
mydata['mot'] = str(word)
|
||
mydata['mot_corrected'] = str(MYSY_GV.spell_fr(word))
|
||
mydata['date_update'] = str(datetime.now())
|
||
mydata['treated'] = 0
|
||
coll_name = MYSY_GV.dbname['correction_ortho']
|
||
ret_val_tmp = coll_name.insert_one(mydata)
|
||
|
||
if (ret_val_tmp is False):
|
||
mycommon.myprint(str(inspect.stack()[0][3]) + " - Impossbile d'inserer le mot "+str(word)+" dans correction_ortho ")
|
||
|
||
|
||
return MYSY_GV.spell_fr(word)
|
||
|
||
except Exception as e:
|
||
exc_type, exc_obj, exc_tb = sys.exc_info()
|
||
mycommon.myprint(str(inspect.stack()[0][3]) + " -" + str(e) + " - ERRORRRR AT Line : " + str(exc_tb.tb_lineno))
|
||
return False
|
||
|
||
'''
|
||
Suppression des ponctuations
|
||
'''
|
||
def Ela_remove_ponct(list):
|
||
try:
|
||
for tmp in MYSY_GV.token_fr_pontuation:
|
||
while tmp in list:
|
||
list.remove(tmp)
|
||
|
||
return True, list
|
||
except Exception as e:
|
||
exc_type, exc_obj, exc_tb = sys.exc_info()
|
||
mycommon.myprint(str(inspect.stack()[0][3]) + " -" + str(e)+" - Line : "+ str(exc_tb.tb_lineno) )
|
||
return False, " Impossible Ela_remove_ponct"
|
||
|
||
'''
|
||
Cette fonction supprimer les parasite des listes
|
||
comme " ", "]", " ", etc
|
||
|
||
def Ela_Remove_Noise_from_list(list):
|
||
|
||
list_noises = ['...', '.', ';', ',', ':', '!', '?', ')', '(', '[', ']',
|
||
'{', '}','-', '=', '°', '#', '-', '/', '~', '&', '\\','.', '^', '$', '*', '+',
|
||
'?', '{', '}', '[', ']', '|', '(', ')', '-', '>', '<', '@']
|
||
|
||
for noise in list_noises:
|
||
list.remove(noise)
|
||
|
||
while ' ' in list:
|
||
list.remove(' ')
|
||
|
||
while '[' in list:
|
||
list.remove('[')
|
||
|
||
while ']' in list:
|
||
list.remove(']')
|
||
|
||
while '\\n' in list:
|
||
list.remove('\\n')
|
||
|
||
while ' ' in list:
|
||
list.remove(' ')
|
||
|
||
while '"' in list:
|
||
list.remove('"')
|
||
|
||
while ' ' in list:
|
||
list.remove(' ')
|
||
|
||
while ' ' in list:
|
||
list.remove(' ')
|
||
|
||
return list
|
||
'''
|
||
|
||
'''
|
||
Cette fonction supprimer les
|
||
patter non pertinents, comme par exemple :
|
||
- 21h10
|
||
- 1er ou 14ieme
|
||
|
||
Aussi, cette fonction permet de supprimer de l'indexation les mots ayant MOINS de 3 caractères.
|
||
'''
|
||
def Ela_Remove_Bad_Pattern(sentence):
|
||
try:
|
||
text = sentence.lower() # mettre les mots en minuscule
|
||
# Retirons les caractères spéciaux :
|
||
|
||
# Gestion des heures du types : 3:30
|
||
patter2 = re.compile(r"([0-9]+)[:]([a-zA-Z0-9èéêë])+")
|
||
|
||
# Gestion des heures du types : 3h30min
|
||
patter3 = re.compile(r"([0-9]+)[hH]([0-9])+")
|
||
|
||
# Gestion des 'rang' du types : 1ere , 14ième, etc
|
||
patter4 = re.compile(r"([0-9]+)[a-zA-Z0-9èéêë]+")
|
||
|
||
# Gestion des chiffres, car non indexés
|
||
patter5 = re.compile(r"([0-9]+)+")
|
||
|
||
doc = MYSY_GV.nlp(str(text).lower())
|
||
final_text = ""
|
||
for val in doc :
|
||
#print(" str(val) = '"+str(val)+"' ")
|
||
val_str = str(val)
|
||
|
||
if( len(val_str) >= 3):
|
||
val_str = re.sub(patter2, ' ', val_str)
|
||
val_str = re.sub(patter3, ' ', val_str)
|
||
val_str = re.sub(patter4, ' ', val_str)
|
||
val_str = re.sub(patter5, ' ', val_str)
|
||
final_text = str(final_text) + " "+str(val_str)
|
||
|
||
#print("final_text = "+str(final_text))
|
||
return True, final_text
|
||
|
||
except Exception as e:
|
||
exc_type, exc_obj, exc_tb = sys.exc_info()
|
||
mycommon.myprint(str(inspect.stack()[0][3]) + " -" + str(e)+" - Line : "+ str(exc_tb.tb_lineno) )
|
||
return False, " Impossible Ela_Remove_Bad_Pattern"
|
||
|
||
|
||
'''
|
||
Cette fonction replace les caractères speciaux et ponctuation par des space
|
||
'''
|
||
def Ela_Remove_Ponct_Special_Caractere(sentence):
|
||
try:
|
||
text = sentence.lower() # mettre les mots en minuscule
|
||
|
||
# Retirons les caractères spéciaux :
|
||
|
||
text = re.sub(r"[,\!\?\%\(\)\/\"]", " ", text)
|
||
text = re.sub(r"\&\S*\s", " ", text)
|
||
text = re.sub(r"\-", " ", text)
|
||
|
||
list_noises = ['...', '.', ';', ',', ':', '!', '?', ')', '(', '[', ']', '\'', '"', '’', '`','©', '–',
|
||
'{', '}', '-', '=', '°', '#', '-', '/', '~', '&', '\\', '.', '^', '$', '*', '+','\\n','\n'
|
||
'?', '{', '}', '[', ']', '|', '(', ')', '-', '>', '<', '@','®', '™', '«', '»']
|
||
|
||
sentence = text
|
||
for noise in list_noises:
|
||
#print(" suppression de : '"+str(noise)+"' ")
|
||
sentence = sentence.replace(str(noise), " ")
|
||
|
||
#print(" AFTER REPLACE NOISES = "+str(sentence))
|
||
return True, sentence
|
||
|
||
except Exception as e:
|
||
exc_type, exc_obj, exc_tb = sys.exc_info()
|
||
mycommon.myprint(str(inspect.stack()[0][3]) + " -" + str(e)+" - Line : "+ str(exc_tb.tb_lineno) )
|
||
return False, " Impossible Ela_Remove_Ponct_Special_Caractere"
|
||
|
||
|
||
|
||
def Ela_Tokenize(sentence):
|
||
try:
|
||
#print(" Tokenaisee de du mot "+str(sentence))
|
||
'''
|
||
Cas particulier :
|
||
Il arrive que les ponctuations ne soient pas respectées, comme par exemple : blabla.Blabla,sdslk
|
||
Pour y remedier, toutes les poncutations seront remplacées par "espace"<poncutation>"espace".
|
||
Donc la fr
|
||
|
||
'''
|
||
status, sentence = Ela_Remove_Bad_Pattern(sentence)
|
||
if( status is False ):
|
||
return False
|
||
|
||
#print(" AFTER Ela_Remove_Bad_Pattern " + str(sentence))
|
||
status, sentence = Ela_Remove_Ponct_Special_Caractere(sentence)
|
||
if (status is False):
|
||
return False
|
||
|
||
doc = MYSY_GV.nlp(str(sentence).lower())
|
||
|
||
#print(" Tokenize = '" + str(doc)+"' ")
|
||
|
||
retval = []
|
||
for X in doc:
|
||
if len(str(unidecode(X.text)).strip()) > 0 :
|
||
retval.append( str(X.text).strip())
|
||
|
||
# Retourner le texte de chaque token
|
||
return True, retval
|
||
|
||
except Exception as e:
|
||
exc_type, exc_obj, exc_tb = sys.exc_info()
|
||
mycommon.myprint(str(inspect.stack()[0][3]) + " -" + str(e)+" - Line : "+ str(exc_tb.tb_lineno) )
|
||
return False, " Impossible Ela_Tokenize"
|
||
|
||
'''
|
||
2. Enlever les mots les plus fréquents
|
||
Certains mots se retrouvent très fréquemment dans la langue française.
|
||
En anglais, on les appelle les “stop words”.
|
||
Ces mots, bien souvent, n’apportent pas d’information dans les tâches suivantes.
|
||
exemple :
|
||
{'ai', 'aie', 'aient', 'aies', 'ait', 'as', etc}
|
||
|
||
:return a tab
|
||
:input : Tab of tokens
|
||
'''
|
||
def Ela_remove_stop_words(tab_tokens):
|
||
|
||
try:
|
||
clean_words = []
|
||
|
||
for token in tab_tokens:
|
||
if token not in MYSY_GV.stopWords:
|
||
clean_words.append(token)
|
||
|
||
return True, clean_words
|
||
|
||
except Exception as e:
|
||
exc_type, exc_obj, exc_tb = sys.exc_info()
|
||
mycommon.myprint(str(inspect.stack()[0][3]) + " -" + str(e)+" - Line : "+ str(exc_tb.tb_lineno) )
|
||
return False, " Impossible Ela_remove_stop_words"
|
||
|
||
|
||
'''
|
||
3.- Suppression des : ADP : adposition, DET : déterminant, CCONJ : coordination conjonction
|
||
La liste est la suivante:
|
||
|
||
ADJ : adjectif, ADP : adposition, ADV : adverbe, AUX : verbe auxiliaire, CONJ : coordination conjonction, DET : déterminant
|
||
INTJ : interjection, NOUN : nom, NUM : chiffre, PART : particule, PRON : pronom, PROPN : nom propre, PUNCT : ponctuation
|
||
SCONJ : conjonction subordonnée, SYM : symbole, VERB : verbe, X : autre
|
||
'''
|
||
|
||
|
||
def Ela_remove_pronoun(tab_tokens):
|
||
try:
|
||
mywords = []
|
||
for token in tab_tokens:
|
||
mytok = MYSY_GV.nlp(str(token).lower())
|
||
for token2 in mytok:
|
||
if token2.pos_ != 'DET' and token2.pos_ != 'CCONJ' and token2.pos_ != 'ADP':
|
||
mywords.append(str(mytok))
|
||
return True, mywords
|
||
except Exception as e:
|
||
exc_type, exc_obj, exc_tb = sys.exc_info()
|
||
mycommon.myprint(str(inspect.stack()[0][3]) + " -" + str(e)+" - Line : "+ str(exc_tb.tb_lineno) )
|
||
return False, " Impossible Ela_remove_pronoun"
|
||
|
||
'''
|
||
# 4. Stemming
|
||
Le stemming consiste à réduire un mot dans sa forme “racine”. Le but du stemming est de regrouper de nombreuses variantes
|
||
d’un mot comme un seul et même mot. Par exemple, une fois que l’on applique un stemming sur “Chiens” ou “Chien”,
|
||
le mot résultant est le même.
|
||
|
||
important :
|
||
seuls les mots français sont stemmizé. les autres non.
|
||
'''
|
||
def Ela_stemmize(tab_tokens):
|
||
|
||
try:
|
||
tab_ret_val = []
|
||
#print(" VERIFICATION SI LE MOT EST FR : " + str(tab_tokens))
|
||
for mot in tab_tokens:
|
||
if( mycommon.check_word_in_fr_dict(str(mot)) ):
|
||
|
||
if( type(mot) is str ):
|
||
tab_ret_val.append(MYSY_GV.stemmer.stem(mot))
|
||
else:
|
||
tab_ret_val.append(MYSY_GV.stemmer.stem(mot.text))
|
||
else:
|
||
tab_ret_val.append(mot)
|
||
|
||
#print(" STMISATION TAB = "+str(tab_ret_val))
|
||
|
||
return True, tab_ret_val
|
||
except Exception as e:
|
||
exc_type, exc_obj, exc_tb = sys.exc_info()
|
||
mycommon.myprint(str(inspect.stack()[0][3]) + " -" + str(e)+" - Line : "+ str(exc_tb.tb_lineno) )
|
||
return False, " Impossible Ela_stemmize"
|
||
|
||
#return [stemmer.stem(token.text) for token in tab_tokens]
|
||
|
||
|
||
|
||
|
||
|
||
'''
|
||
Dans le cas de la stemisation d'une
|
||
formation, il ne faut pas changer les mots du formateur
|
||
ou du documents de formation.
|
||
==> Ici pas de correction avant stem.
|
||
|
||
Aussi, on fait le choix de liberé de ne pas stémiser certain mots'''
|
||
|
||
def Ela_stemmize_Class(tab_tokens):
|
||
|
||
try:
|
||
tab_ret_val = []
|
||
#print(" VERIFICATION SI LE MOT DOIT ETRE STEMISE _ CLASS: " + str(tab_tokens))
|
||
for mot in tab_tokens:
|
||
if( mycommon.Word_Not_Stemmize(str(mot)) ):
|
||
tab_ret_val.append(unidecode(mot))
|
||
else:
|
||
#print(" AVANT STEM MOT ="+str(mot))
|
||
tab_ret_val.append( unidecode( str(MYSY_GV.stemmer.stem(mot))))
|
||
#print(" AVANT STEM MOT =" + unidecode( str(stemmer.stem(mot))))
|
||
|
||
#print(" STMISATION TAB = "+str(tab_ret_val))
|
||
|
||
return True, tab_ret_val
|
||
except Exception as e:
|
||
exc_type, exc_obj, exc_tb = sys.exc_info()
|
||
mycommon.myprint(str(inspect.stack()[0][3]) + " -" + str(e)+" - Line : "+ str(exc_tb.tb_lineno) )
|
||
return False, " Impossible de Ela_stemmize_Class"
|
||
|
||
#return [stemmer.stem(token.text) for token in tab_tokens]
|
||
|
||
|
||
|
||
'''
|
||
Dans le cas d'un recherche utilsateur, il y a de forte
|
||
chance que fasse une mauvaise saisie,
|
||
==> Donc avant de faire le stem des mots, il faut faire une correction
|
||
orthographique
|
||
Aussi, on fait le choix de liberé de ne pas stémiser certain mots'''
|
||
|
||
def Ela_stemmize_search(tab_tokens):
|
||
try:
|
||
tab_ret_val = []
|
||
#print(" VERIFICATION SI LE MOT DOIT ETRE STEMISE: " + str(tab_tokens))
|
||
for mot in tab_tokens:
|
||
if (mycommon.Word_Not_Stemmize(str(mot))):
|
||
tab_ret_val.append(unidecode(mot))
|
||
else:
|
||
'''
|
||
On fait la correction orthographe avant
|
||
'''
|
||
#print("AVANT CORRECTION ORTHOGRAHIQUE ="+str(mot)+" ==> APRES = "+unidecode(str(MYSY_GV.spell.correction(mot))))
|
||
corrected_str = str(MYSY_GV.spell.correction(mot))
|
||
|
||
|
||
'''
|
||
Evolution du 02/05/22 :
|
||
On supprime la stemisation, mais on va chercher le masculin singulier du mot
|
||
Puis on regarde si masculin singulier existe dans la la collecion "elaindex"
|
||
|
||
|
||
'''
|
||
corrected_str_unicode = unidecode(corrected_str)
|
||
status, corrected_str_masc_sing = mycommon.GetMasculinSingulier(corrected_str_unicode)
|
||
if( status is True ):
|
||
tab_ret_val.append(corrected_str_masc_sing)
|
||
|
||
'''
|
||
cette ligne est commentées pour eviter la stemisation.
|
||
evolution du 02/05/22
|
||
tab_ret_val.append( unidecode (str(MYSY_GV.stemmer.stem(corrected_str))))
|
||
'''
|
||
|
||
#print(" STMISATION TAB = "+str(tab_ret_val))
|
||
|
||
return True, tab_ret_val
|
||
|
||
except Exception as e:
|
||
exc_type, exc_obj, exc_tb = sys.exc_info()
|
||
mycommon.myprint(str(inspect.stack()[0][3]) + " -" + str(e)+" - Line : "+ str(exc_tb.tb_lineno) )
|
||
return False, " Impossible de Ela_stemmize_search"
|
||
|
||
|
||
#return [stemmer.stem(token.text) for token in tab_tokens]
|
||
|
||
|
||
|
||
'''
|
||
Indexation et enregistrement d'un token
|
||
IMPORTANT : Pour mutualiser le process entre l'indexation des tous les champs ou l'indexation de uniquement de certain champs
|
||
ex : indexation du champ 'title' ou du champ 'objectif', je vais introduitre la notion de 'source_fied'
|
||
pour l'indexation title, 'source_fied' = 'title'
|
||
pour l'indexation objectif, 'source_fied' = 'objectif'
|
||
'''
|
||
|
||
def ela_index_record_field(lines, class_id, source_field = ""):
|
||
|
||
try:
|
||
'''
|
||
## Suppression de toutes les indexation qui existe de cette formation
|
||
'''
|
||
|
||
coll_name = MYSY_GV.dbname['elaindex']
|
||
coll_name_StopWord = MYSY_GV.dbname['mysystopwords']
|
||
|
||
# check default value of parameter : source_field
|
||
if len(str(source_field)) == 0 :
|
||
source_field = 'default'
|
||
|
||
myquery = {"id_formation": class_id, "source_field":source_field}
|
||
delete_row = coll_name.delete_many(myquery)
|
||
#mycommon.myprint(" elaindex - "+str(delete_row.deleted_count)+" documents deleted. Training ==> "+str(class_id)+" ")
|
||
|
||
'''
|
||
Ajout des indexe
|
||
La fontion Tokenize fait les fonctions suivantes :
|
||
- Ela_Remove_Bad_Pattern
|
||
- Ela_Remove_Ponct_Special_Caractere
|
||
- Unidecode
|
||
'''
|
||
status, tab_tokens = Ela_Tokenize(lines)
|
||
if( status is False):
|
||
return False
|
||
|
||
'''
|
||
Ajout des indexe
|
||
La fontion Tokenize fait les fonctions suivantes :
|
||
- Ela_Remove_Bad_Pattern
|
||
- Ela_Remove_Ponct_Special_Caractere
|
||
- Unidecode
|
||
'''
|
||
status, tab_tokens2 = Ela_remove_stop_words(tab_tokens)
|
||
if (status is False):
|
||
return False
|
||
|
||
status, tab_tokens3 = Ela_remove_pronoun(tab_tokens2)
|
||
if (status is False):
|
||
return False
|
||
|
||
#print(" AVANT STEM " + str(tab_tokens3))
|
||
# Mettre en unicode et virer d'autre stop word
|
||
tab_tokens3_unicode = []
|
||
for val in tab_tokens3:
|
||
val_unicode = unidecode(val)
|
||
# print(' val_unicode = '+str(val_unicode))
|
||
|
||
'''
|
||
Verification que le mot n'est pas dans les nouveau stopwords'''
|
||
val_tmp = coll_name_StopWord.count_documents({'stop_word': str(val_unicode)})
|
||
if (val_tmp <= 0):
|
||
'''
|
||
Recuperation du masculin singulier du mot'''
|
||
status, newword = mycommon.GetMasculinSingulier(str(val_unicode))
|
||
if (status is True):
|
||
tab_tokens3_unicode.append(newword)
|
||
|
||
#print("TTTTT " + str(tab_tokens3_unicode))
|
||
|
||
|
||
status, tab_tokens4 = Ela_stemmize_Class(tab_tokens3)
|
||
if (status is False):
|
||
return False
|
||
|
||
#tab_tokens4 = Ela_Remove_Noise_from_list(tab_tokens4)
|
||
#tab_tokens4 = Ela_remove_ponct(tab_tokens4)
|
||
|
||
# Enregistrement dans la base mongodb
|
||
Ela_list_to_mongo(tab_tokens3_unicode,class_id, source_field)
|
||
|
||
size_tab = len(tab_tokens4)
|
||
#print("size_tab = " + str(size_tab))
|
||
occurrences = Counter(tab_tokens4)
|
||
most_common = occurrences.most_common()
|
||
#print(most_common)
|
||
|
||
|
||
return True
|
||
except Exception as e:
|
||
exc_type, exc_obj, exc_tb = sys.exc_info()
|
||
mycommon.myprint(str(inspect.stack()[0][3]) + " -" + str(e) + " - ERRORRRR AT Line : " + str(exc_tb.tb_lineno))
|
||
return False
|
||
|
||
|
||
'''
|
||
Version Youtubes
|
||
'''
|
||
def YTUBES_ela_index_record_field(lines, class_id, source_field = ""):
|
||
|
||
try:
|
||
'''
|
||
## Suppression de toutes les indexation qui existe de cette formation
|
||
'''
|
||
|
||
coll_name = MYSY_GV.dbname['elaindex']
|
||
coll_name_StopWord = MYSY_GV.dbname['mysystopwords']
|
||
|
||
|
||
# check default value of parameter : source_field
|
||
if len(str(source_field)) == 0 :
|
||
source_field = 'default'
|
||
|
||
myquery = {"id_formation": class_id, "source_field":source_field}
|
||
|
||
#print(" Mydelete Query = "+str(myquery))
|
||
delete_row = coll_name.delete_many(myquery)
|
||
#mycommon.myprint(" elaindex - "+str(delete_row.deleted_count)+" documents deleted. Training ==> "+str(class_id)+" ")
|
||
|
||
'''
|
||
Ajout des indexe
|
||
La fontion Tokenize fait les fonctions suivantes :
|
||
- Ela_Remove_Bad_Pattern
|
||
- Ela_Remove_Ponct_Special_Caractere
|
||
- Unidecode
|
||
'''
|
||
status, tab_tokens = Ela_Tokenize(lines)
|
||
if( status is False):
|
||
return False
|
||
|
||
'''
|
||
la fonction Ela_remove_stop_words supprimer les stops
|
||
cette ligne et deprecié remplacé par une ligne plus loin
|
||
qui lit dans la collection "mysystopwords"
|
||
|
||
'''
|
||
status, tab_tokens2 = Ela_remove_stop_words(tab_tokens)
|
||
if (status is False):
|
||
return False
|
||
|
||
status, tab_tokens3 = Ela_remove_pronoun(tab_tokens2)
|
||
if (status is False):
|
||
return False
|
||
|
||
print(" AVANT STEM "+str(tab_tokens3))
|
||
|
||
|
||
# Mettre en unicode et virer d'autre stop word
|
||
tab_tokens3_unicode = []
|
||
for val in tab_tokens3:
|
||
val_unicode = unidecode(val)
|
||
#print(' val_unicode = '+str(val_unicode))
|
||
|
||
'''
|
||
Verification que le mot n'est pas dans les nouveau stopwords'''
|
||
val_tmp = coll_name_StopWord.count_documents({'stop_word': str(val_unicode)})
|
||
if( val_tmp <= 0 ):
|
||
'''
|
||
Recuperation du masculin singulier du mot'''
|
||
status, newword = mycommon.GetMasculinSingulier(str(val_unicode))
|
||
if( status is True):
|
||
tab_tokens3_unicode.append(newword)
|
||
|
||
|
||
print("TTTTT "+str(tab_tokens3_unicode))
|
||
status, tab_tokens4 = Ela_stemmize_Class(tab_tokens3_unicode)
|
||
if (status is False):
|
||
return False
|
||
|
||
#tab_tokens4 = Ela_Remove_Noise_from_list(tab_tokens4)
|
||
#tab_tokens4 = Ela_remove_ponct(tab_tokens4)
|
||
|
||
# Enregistrement dans la base mongodb
|
||
YTUBES_Ela_list_to_mongo(tab_tokens3_unicode,class_id, source_field)
|
||
|
||
size_tab = len(tab_tokens4)
|
||
#print("size_tab = " + str(size_tab))
|
||
occurrences = Counter(tab_tokens4)
|
||
most_common = occurrences.most_common()
|
||
#print(most_common)
|
||
|
||
|
||
return True
|
||
except Exception as e:
|
||
exc_type, exc_obj, exc_tb = sys.exc_info()
|
||
mycommon.myprint(str(inspect.stack()[0][3]) + " -" + str(e) + " - ERRORRRR AT Line : " + str(exc_tb.tb_lineno))
|
||
return False
|
||
|
||
|
||
|
||
'''
|
||
def Ela_ntlk(mysentence, traning_id):
|
||
tab_tokens = Ela_Tokenize(mysentence)
|
||
tab_tokens2 = Ela_remove_stop_words(tab_tokens)
|
||
tab_tokens3 = Ela_remove_pronoun(tab_tokens2)
|
||
tab_tokens4 = Ela_stemmize_Class(tab_tokens3)
|
||
|
||
print("Ela_ntlk = "+str(tab_tokens4))
|
||
|
||
## Utiliser penda pour la manipulation des moyennes, sommes, etc
|
||
return tab_tokens4.sort()
|
||
'''
|
||
|
||
def Ela_list_to_mongo(tab_tokens, traning_id, source_field):
|
||
try:
|
||
# clear / truncate elaindex
|
||
#db.elaindex.remove({})
|
||
|
||
# Recherche des occurence d'une valeur
|
||
my_file = open("ela_output_test_file_pandas_2.txt", "w")
|
||
size_tab = len(tab_tokens)
|
||
|
||
occurrences = Counter(tab_tokens)
|
||
|
||
# insert the list to the set
|
||
list_set = set(tab_tokens)
|
||
# convert the set to the list
|
||
unique_list = (list(list_set))
|
||
|
||
final_lists = []
|
||
for tmp in unique_list:
|
||
moyenne = round(int(occurrences[str(tmp)]) / size_tab, 2)
|
||
final_lists.append([str(tmp), int(str(occurrences[str(tmp)])), str(moyenne), str(traning_id), str(source_field)])
|
||
|
||
print(final_lists)
|
||
data = pd.DataFrame(final_lists, columns=('mots', "occurence", 'moyenne', 'id_formation', 'source_field'))
|
||
my_file.write(str(data))
|
||
my_file.close()
|
||
|
||
|
||
collection = MYSY_GV.dbname["elaindex"]
|
||
|
||
data.reset_index(inplace=True)
|
||
data_dict = data.to_dict("records")
|
||
|
||
#print(data_dict)
|
||
# Insert collection
|
||
collection.insert_many(data_dict)
|
||
data.to_csv("data_indexees.csv")
|
||
|
||
return True
|
||
except Exception as e :
|
||
mycommon.myprint(e)
|
||
return False
|
||
|
||
|
||
'''
|
||
Version Youtubes
|
||
'''
|
||
def YTUBES_Ela_list_to_mongo(tab_tokens, traning_id, source_field):
|
||
try:
|
||
# clear / truncate elaindex
|
||
#db.elaindex.remove({})
|
||
|
||
# Recherche des occurence d'une valeur
|
||
my_file = open("ela_output_test_file_pandas_2.txt", "w")
|
||
size_tab = len(tab_tokens)
|
||
|
||
occurrences = Counter(tab_tokens)
|
||
|
||
# insert the list to the set
|
||
list_set = set(tab_tokens)
|
||
# convert the set to the list
|
||
unique_list = (list(list_set))
|
||
|
||
final_lists = []
|
||
'''
|
||
Pour gerer la notion de limite d'occurrence >= 5, on definit un paramettre appelé : seuil
|
||
'''
|
||
seuil = round(int(str(MYSY_GV.INDEX_MIN_OCCURENCE)) / size_tab, 3)
|
||
print(" SEUIL = "+str(seuil))
|
||
for tmp in unique_list:
|
||
moyenne = round(int(occurrences[str(tmp)]) / size_tab, 3)
|
||
if( moyenne >= seuil ):
|
||
#print(str(moyenne)+ " CMP "+str(seuil))
|
||
final_lists.append([str(tmp), int(str(occurrences[str(tmp)])), str(moyenne), str(traning_id), str(source_field)])
|
||
|
||
#print(final_lists)
|
||
data = pd.DataFrame(final_lists, columns=('mots', "occurence", 'moyenne', 'id_formation', 'source_field'))
|
||
my_file.write(str(data))
|
||
my_file.close()
|
||
|
||
print(" YTUBES INSERT MAY")
|
||
|
||
'''
|
||
YTUBES_CONNECTION_STRING = "mongodb://localhost:27017/"
|
||
YTUBES_client = MongoClient(YTUBES_CONNECTION_STRING)
|
||
YTUBES_dbname = YTUBES_client['mysyvideodb']
|
||
|
||
|
||
collection = YTUBES_dbname["mysyindex"]
|
||
'''
|
||
|
||
collection = MYSY_GV.dbname["elaindex"]
|
||
|
||
data.reset_index(inplace=True)
|
||
data_dict = data.to_dict("records")
|
||
|
||
#print(data_dict)
|
||
# Insert collection
|
||
collection.insert_many(data_dict)
|
||
#data.to_csv("data_indexees.csv")
|
||
|
||
return True
|
||
except Exception as e :
|
||
mycommon.myprint(e)
|
||
return False
|
||
|
||
|
||
|
||
'''
|
||
def test_ela_myntlk():
|
||
|
||
tab = []
|
||
tab_tokens = Ela_Tokenize(sentence)
|
||
tab_tokens2 = Ela_remove_stop_words(tab_tokens)
|
||
tab_tokens3 = Ela_remove_pronoun(tab_tokens2)
|
||
tab_tokens4 = Ela_stemmize_Class(tab_tokens3)
|
||
|
||
tab_tokens4.sort()
|
||
Ela_list_to_mongo(tab_tokens4, 'Tid_3245')
|
||
exit()
|
||
'''
|
||
|
||
'''
|
||
my_file = open("ela_output_test_file_pandas.txt", "w")
|
||
|
||
|
||
# Recherche des occurence d'une valeur
|
||
size_tab = len(tab_tokens4)
|
||
print("size_tab = "+str(size_tab))
|
||
occurrences = Counter(tab_tokens4)
|
||
|
||
# insert the list to the set
|
||
list_set = set(tab_tokens4)
|
||
# convert the set to the list
|
||
unique_list = (list(list_set))
|
||
|
||
final_lists = []
|
||
print(" MOT ==> Occurrence ==> Moyenne ")
|
||
for tmp in unique_list :
|
||
moyenne = round(int(occurrences[str(tmp)])/size_tab, 2)
|
||
#print(str(tmp)+" ==> "+str(occurrences[str(tmp)])+" ==> "+str(moyenne))
|
||
list_tmp = [tmp, str(occurrences[str(tmp)]), str(moyenne) ]
|
||
#print(list_tmp)
|
||
final_lists.append([str(tmp),str(occurrences[str(tmp)]),str(moyenne)])
|
||
|
||
|
||
print(" Finals Liste ")
|
||
my_file.write("\nWorld\n")
|
||
print(final_lists)
|
||
data = pd.DataFrame(final_lists, columns={'mots',"occurence",'moyenne'})
|
||
my_file.write(str("\n############ occurrences ############# \n"))
|
||
my_file.write(str(data))
|
||
|
||
most_common = occurrences.most_common()
|
||
print(most_common)
|
||
my_file.close()
|
||
'''
|
||
|
||
def ela_index_article_avis_record_field(lines, article_avis_id, source_field = ""):
|
||
|
||
try:
|
||
'''
|
||
## Suppression de toutes les indexation qui existe de cette formation
|
||
'''
|
||
client = MongoClient(MYSY_GV.CONNECTION_STRING)
|
||
MYSY_GV.dbname = client['cherifdb']
|
||
coll_name = MYSY_GV.dbname['elaindex_article_avis']
|
||
|
||
# check default value of parameter : source_field
|
||
if len(str(source_field)) == 0 :
|
||
source_field = 'default'
|
||
|
||
myquery = {"id_articles_avis": article_avis_id, "source_field":source_field}
|
||
delete_row = coll_name.delete_many(myquery)
|
||
mycommon.myprint(" elaindex Article AVIS : - "+str(delete_row.deleted_count)+" documents deleted. Article_Avis ==> "+str(article_avis_id)+" ")
|
||
|
||
'''
|
||
Ajout des indexe
|
||
'''
|
||
status, tab_tokens = Ela_Tokenize(lines)
|
||
if( status is False):
|
||
return False
|
||
|
||
status, tab_tokens2 = Ela_remove_stop_words(tab_tokens)
|
||
if (status is False):
|
||
return False
|
||
|
||
status, tab_tokens3 = Ela_remove_pronoun(tab_tokens2)
|
||
if (status is False):
|
||
return False
|
||
|
||
status, tab_tokens4 = Ela_stemmize_Class(tab_tokens3)
|
||
if (status is False):
|
||
return False
|
||
|
||
|
||
#tab_tokens4 = Ela_Remove_Noise_from_list(tab_tokens4)
|
||
#tab_tokens4 = Ela_remove_ponct(tab_tokens4)
|
||
|
||
# Enregistrement dans la base mongodb
|
||
|
||
Ela_article_avis_list_to_mongo(tab_tokens4, article_avis_id, source_field)
|
||
|
||
size_tab = len(tab_tokens4)
|
||
|
||
occurrences = Counter(tab_tokens4)
|
||
most_common = occurrences.most_common()
|
||
print(most_common)
|
||
|
||
return True
|
||
except Exception as e:
|
||
exc_type, exc_obj, exc_tb = sys.exc_info()
|
||
mycommon.myprint(str(inspect.stack()[0][3]) + " -" + str(e) + " - ERRORRRR AT Line : " + str(exc_tb.tb_lineno))
|
||
return False
|
||
|
||
|
||
def Ela_article_avis_list_to_mongo(tab_tokens, traning_id, source_field):
|
||
try:
|
||
# clear / truncate elaindex
|
||
#db.elaindex.remove({})
|
||
|
||
# Recherche des occurence d'une valeur
|
||
my_file = open("ela_output_article_file_pandas_2.txt", "w")
|
||
size_tab = len(tab_tokens)
|
||
|
||
occurrences = Counter(tab_tokens)
|
||
|
||
# insert the list to the set
|
||
list_set = set(tab_tokens)
|
||
# convert the set to the list
|
||
unique_list = (list(list_set))
|
||
|
||
final_lists = []
|
||
for tmp in unique_list:
|
||
moyenne = round(int(occurrences[str(tmp)]) / size_tab, 2)
|
||
final_lists.append([str(tmp), int(str(occurrences[str(tmp)])), str(moyenne), str(traning_id), str(source_field)])
|
||
|
||
print(final_lists)
|
||
data = pd.DataFrame(final_lists, columns=('mots', "occurence", 'moyenne', 'id_article_avis', 'source_field'))
|
||
my_file.write(str(data))
|
||
my_file.close()
|
||
|
||
# Making a Connection with MongoClient
|
||
client = MongoClient("mongodb://localhost:27017/")
|
||
# database
|
||
db = client["cherifdb"]
|
||
# collection
|
||
collection = db["elaindex_article_avis"]
|
||
|
||
data.reset_index(inplace=True)
|
||
data_dict = data.to_dict("records")
|
||
|
||
# Insert collection
|
||
collection.insert_many(data_dict)
|
||
data.to_csv("data_indexees_article_avis.csv")
|
||
|
||
return True
|
||
except Exception as e :
|
||
exc_type, exc_obj, exc_tb = sys.exc_info()
|
||
mycommon.myprint(str(inspect.stack()[0][3]) + " -" + str(e) + " - ERRORRRR AT Line : " + str(exc_tb.tb_lineno))
|
||
return False
|
||
|