28/04/22 - 13h30
parent
4d16f4039f
commit
934b21de5a
|
@ -23,6 +23,7 @@ import logging
|
|||
from datetime import datetime
|
||||
import prj_common as mycommon
|
||||
import secrets
|
||||
import GlobalVariable as MYSY_GV
|
||||
|
||||
|
||||
class JSONEncoder(json.JSONEncoder):
|
||||
|
@ -31,7 +32,6 @@ class JSONEncoder(json.JSONEncoder):
|
|||
return str(o)
|
||||
return json.JSONEncoder.default(self, o)
|
||||
|
||||
CONNECTION_STRING = "mongodb://localhost/cherifdb"
|
||||
|
||||
|
||||
'''
|
||||
|
@ -40,7 +40,7 @@ Cette fonction supprimer tous les tokens d'un user
|
|||
'''
|
||||
def Dev_del_user_token(diction):
|
||||
try:
|
||||
client = MongoClient(CONNECTION_STRING)
|
||||
client = MongoClient(MYSY_GV.CONNECTION_STRING)
|
||||
|
||||
'''
|
||||
# Verification que les champs reçus dans l'API sont bien dans la liste des champs autorisés
|
||||
|
|
239
Ela_Spacy.py
239
Ela_Spacy.py
|
@ -1,15 +1,10 @@
|
|||
import spacy
|
||||
from spacy import displacy
|
||||
import nltk
|
||||
from nltk.corpus import stopwords
|
||||
from nltk.stem.snowball import SnowballStemmer
|
||||
import datetime
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
import pymongo
|
||||
from pymongo import MongoClient
|
||||
from collections import Counter
|
||||
import ela_spacy_common as lsc
|
||||
|
||||
import prj_common as mycommon
|
||||
from unidecode import unidecode
|
||||
import inspect
|
||||
|
@ -17,32 +12,11 @@ import sys, os
|
|||
from autocorrect import Speller
|
||||
from datetime import datetime
|
||||
import re
|
||||
from spellchecker import SpellChecker
|
||||
|
||||
CONNECTION_STRING = "mongodb://localhost/cherifdb"
|
||||
|
||||
## Gle Variables
|
||||
stemmer = SnowballStemmer(language='french')
|
||||
nlp = spacy.load("fr_core_news_sm")
|
||||
spell = SpellChecker(language='fr')
|
||||
token_fr_pontuation = []
|
||||
import GlobalVariable as MYSY_GV
|
||||
|
||||
|
||||
|
||||
#assign the default stopwords list to a variable
|
||||
STOP_WORDS = spacy.lang.fr.stop_words.STOP_WORDS
|
||||
stopWords = set(stopwords.words('french'))
|
||||
sentence = "Bouygues a eu une coupure de réseau à Marseille chez ses clients marseillais et son couteau"
|
||||
lsc.update_stopWords(stopWords)
|
||||
|
||||
#print(type(stopWords))
|
||||
|
||||
lsc.update_token_fr_pontuation(token_fr_pontuation)
|
||||
spell_fr = Speller(lang='fr')
|
||||
|
||||
CONNECTION_STRING = "mongodb://localhost/cherifdb"
|
||||
client = MongoClient(CONNECTION_STRING)
|
||||
dbname = client['cherifdb']
|
||||
|
||||
#print("token_fr_pontuation")
|
||||
#print(token_fr_pontuation)
|
||||
|
@ -51,21 +25,6 @@ dbname = client['cherifdb']
|
|||
initialisation
|
||||
|
||||
'''
|
||||
def init_ch():
|
||||
stemmer = SnowballStemmer(language='french')
|
||||
nlp = spacy.load("fr_core_news_sm")
|
||||
token_fr_pontuation = []
|
||||
|
||||
# assign the default stopwords list to a variable
|
||||
STOP_WORDS = spacy.lang.fr.stop_words.STOP_WORDS
|
||||
stopWords = set(stopwords.words('french'))
|
||||
sentence = "Bouygues a eu une coupure de réseau à Marseille chez ses clients marseillais et son couteau"
|
||||
lsc.update_stopWords(stopWords)
|
||||
|
||||
# print(type(stopWords))
|
||||
|
||||
lsc.update_token_fr_pontuation(token_fr_pontuation)
|
||||
|
||||
|
||||
|
||||
'''
|
||||
|
@ -94,19 +53,19 @@ def correct_fr_word(word):
|
|||
try:
|
||||
mydata = {}
|
||||
|
||||
print(" Fonction : correct_fr_word : '"+word+"' =======> "+spell_fr(word))
|
||||
print(" Fonction : correct_fr_word : '"+word+"' =======> "+MYSY_GV.spell_fr(word))
|
||||
mydata['mot'] = str(word)
|
||||
mydata['mot_corrected'] = str(spell_fr(word))
|
||||
mydata['mot_corrected'] = str(MYSY_GV.spell_fr(word))
|
||||
mydata['date_update'] = str(datetime.now())
|
||||
mydata['treated'] = 0
|
||||
coll_name = dbname['correction_ortho']
|
||||
coll_name = MYSY_GV.dbname['correction_ortho']
|
||||
ret_val_tmp = coll_name.insert_one(mydata)
|
||||
|
||||
if (ret_val_tmp is False):
|
||||
mycommon.myprint(str(inspect.stack()[0][3]) + " - Impossbile d'inserer le mot "+str(word)+" dans correction_ortho ")
|
||||
|
||||
|
||||
return spell_fr(word)
|
||||
return MYSY_GV.spell_fr(word)
|
||||
|
||||
except Exception as e:
|
||||
exc_type, exc_obj, exc_tb = sys.exc_info()
|
||||
|
@ -118,7 +77,7 @@ Suppression des ponctuations
|
|||
'''
|
||||
def Ela_remove_ponct(list):
|
||||
try:
|
||||
for tmp in token_fr_pontuation:
|
||||
for tmp in MYSY_GV.token_fr_pontuation:
|
||||
while tmp in list:
|
||||
list.remove(tmp)
|
||||
|
||||
|
@ -193,7 +152,7 @@ def Ela_Remove_Bad_Pattern(sentence):
|
|||
# Gestion des chiffres, car non indexés
|
||||
patter5 = re.compile(r"([0-9]+)+")
|
||||
|
||||
doc = nlp(str(text).lower())
|
||||
doc = MYSY_GV.nlp(str(text).lower())
|
||||
final_text = ""
|
||||
for val in doc :
|
||||
#print(" str(val) = '"+str(val)+"' ")
|
||||
|
@ -237,7 +196,7 @@ def Ela_Remove_Ponct_Special_Caractere(sentence):
|
|||
#print(" suppression de : '"+str(noise)+"' ")
|
||||
sentence = sentence.replace(str(noise), " ")
|
||||
|
||||
print(" AFTER REPLACE NOISES = "+str(sentence))
|
||||
#print(" AFTER REPLACE NOISES = "+str(sentence))
|
||||
return True, sentence
|
||||
|
||||
except Exception as e:
|
||||
|
@ -266,7 +225,7 @@ def Ela_Tokenize(sentence):
|
|||
if (status is False):
|
||||
return False
|
||||
|
||||
doc = nlp(str(sentence).lower())
|
||||
doc = MYSY_GV.nlp(str(sentence).lower())
|
||||
|
||||
#print(" Tokenize = '" + str(doc)+"' ")
|
||||
|
||||
|
@ -300,7 +259,7 @@ def Ela_remove_stop_words(tab_tokens):
|
|||
clean_words = []
|
||||
|
||||
for token in tab_tokens:
|
||||
if token not in stopWords:
|
||||
if token not in MYSY_GV.stopWords:
|
||||
clean_words.append(token)
|
||||
|
||||
return True, clean_words
|
||||
|
@ -325,7 +284,7 @@ def Ela_remove_pronoun(tab_tokens):
|
|||
try:
|
||||
mywords = []
|
||||
for token in tab_tokens:
|
||||
mytok = nlp(str(token).lower())
|
||||
mytok = MYSY_GV.nlp(str(token).lower())
|
||||
for token2 in mytok:
|
||||
if token2.pos_ != 'DET' and token2.pos_ != 'CCONJ' and token2.pos_ != 'ADP':
|
||||
mywords.append(str(mytok))
|
||||
|
@ -353,9 +312,9 @@ def Ela_stemmize(tab_tokens):
|
|||
if( mycommon.check_word_in_fr_dict(str(mot)) ):
|
||||
|
||||
if( type(mot) is str ):
|
||||
tab_ret_val.append(stemmer.stem(mot))
|
||||
tab_ret_val.append(MYSY_GV.stemmer.stem(mot))
|
||||
else:
|
||||
tab_ret_val.append(stemmer.stem(mot.text))
|
||||
tab_ret_val.append(MYSY_GV.stemmer.stem(mot.text))
|
||||
else:
|
||||
tab_ret_val.append(mot)
|
||||
|
||||
|
@ -391,12 +350,12 @@ def Ela_stemmize_Class(tab_tokens):
|
|||
tab_ret_val.append(unidecode(mot))
|
||||
else:
|
||||
#print(" AVANT STEM MOT ="+str(mot))
|
||||
tab_ret_val.append( unidecode( str(stemmer.stem(mot))))
|
||||
tab_ret_val.append( unidecode( str(MYSY_GV.stemmer.stem(mot))))
|
||||
#print(" AVANT STEM MOT =" + unidecode( str(stemmer.stem(mot))))
|
||||
|
||||
#print(" STMISATION TAB = "+str(tab_ret_val))
|
||||
|
||||
return tab_ret_val
|
||||
return True, tab_ret_val
|
||||
except Exception as e:
|
||||
exc_type, exc_obj, exc_tb = sys.exc_info()
|
||||
mycommon.myprint(str(inspect.stack()[0][3]) + " -" + str(e)+" - Line : "+ str(exc_tb.tb_lineno) )
|
||||
|
@ -424,9 +383,9 @@ def Ela_stemmize_search(tab_tokens):
|
|||
'''
|
||||
On fait la correction orthographe avant
|
||||
'''
|
||||
print("AVANT COORECTION ORH ="+str(mot)+" ==> APRES = "+unidecode(str(spell.correction(mot))))
|
||||
corrected_str = str(spell.correction(mot))
|
||||
tab_ret_val.append( unidecode (str(stemmer.stem(corrected_str))))
|
||||
print("AVANT COORECTION ORH ="+str(mot)+" ==> APRES = "+unidecode(str(MYSY_GV.spell.correction(mot))))
|
||||
corrected_str = str(MYSY_GV.spell.correction(mot))
|
||||
tab_ret_val.append( unidecode (str(MYSY_GV.stemmer.stem(corrected_str))))
|
||||
|
||||
print(" STMISATION TAB = "+str(tab_ret_val))
|
||||
|
||||
|
@ -457,9 +416,7 @@ def ela_index_record_field(lines, class_id, source_field = ""):
|
|||
## Suppression de toutes les indexation qui existe de cette formation
|
||||
'''
|
||||
|
||||
client = MongoClient(CONNECTION_STRING)
|
||||
dbname = client['cherifdb']
|
||||
coll_name = dbname['elaindex']
|
||||
coll_name = MYSY_GV.dbname['elaindex']
|
||||
|
||||
# check default value of parameter : source_field
|
||||
if len(str(source_field)) == 0 :
|
||||
|
@ -467,7 +424,7 @@ def ela_index_record_field(lines, class_id, source_field = ""):
|
|||
|
||||
myquery = {"id_formation": class_id, "source_field":source_field}
|
||||
delete_row = coll_name.delete_many(myquery)
|
||||
mycommon.myprint(" elaindex - "+str(delete_row.deleted_count)+" documents deleted. Training ==> "+str(class_id)+" ")
|
||||
#mycommon.myprint(" elaindex - "+str(delete_row.deleted_count)+" documents deleted. Training ==> "+str(class_id)+" ")
|
||||
|
||||
'''
|
||||
Ajout des indexe
|
||||
|
@ -501,7 +458,7 @@ def ela_index_record_field(lines, class_id, source_field = ""):
|
|||
#print("size_tab = " + str(size_tab))
|
||||
occurrences = Counter(tab_tokens4)
|
||||
most_common = occurrences.most_common()
|
||||
print(most_common)
|
||||
#print(most_common)
|
||||
|
||||
|
||||
return True
|
||||
|
@ -511,6 +468,83 @@ def ela_index_record_field(lines, class_id, source_field = ""):
|
|||
return False
|
||||
|
||||
|
||||
'''
|
||||
Version Youtubes
|
||||
'''
|
||||
def YTUBES_ela_index_record_field(lines, class_id, source_field = ""):
|
||||
|
||||
try:
|
||||
'''
|
||||
## Suppression de toutes les indexation qui existe de cette formation
|
||||
'''
|
||||
|
||||
coll_name = MYSY_GV.YTUBES_dbname['mysyindex']
|
||||
coll_name_StopWord = MYSY_GV.YTUBES_dbname['mysystopwords']
|
||||
|
||||
print("lolololo")
|
||||
# check default value of parameter : source_field
|
||||
if len(str(source_field)) == 0 :
|
||||
source_field = 'default'
|
||||
|
||||
myquery = {"id_formation": class_id, "source_field":source_field}
|
||||
delete_row = coll_name.delete_many(myquery)
|
||||
#mycommon.myprint(" elaindex - "+str(delete_row.deleted_count)+" documents deleted. Training ==> "+str(class_id)+" ")
|
||||
|
||||
'''
|
||||
Ajout des indexe
|
||||
'''
|
||||
status, tab_tokens = Ela_Tokenize(lines)
|
||||
if( status is False):
|
||||
return False
|
||||
|
||||
#print(" AFFICHAGE TAB TOKEN")
|
||||
#print(tab_tokens)
|
||||
#print(" FINNN TAB TOKEN")
|
||||
status, tab_tokens2 = Ela_remove_stop_words(tab_tokens)
|
||||
if (status is False):
|
||||
return False
|
||||
|
||||
status, tab_tokens3 = Ela_remove_pronoun(tab_tokens2)
|
||||
if (status is False):
|
||||
return False
|
||||
|
||||
print(" AVANT STEM ")
|
||||
|
||||
# Mettre en unicode et virer d'autre stop word
|
||||
tab_tokens3_unicode = []
|
||||
for val in tab_tokens3:
|
||||
val_unicode = unidecode(val)
|
||||
val_tmp = coll_name_StopWord.count_documents({'stop_word': str(val_unicode)})
|
||||
if( val_tmp <= 0 ):
|
||||
tab_tokens3_unicode.append(val_unicode)
|
||||
|
||||
|
||||
#print(str(tab_tokens3_unicode))
|
||||
status, tab_tokens4 = Ela_stemmize_Class(tab_tokens3)
|
||||
if (status is False):
|
||||
return False
|
||||
|
||||
#tab_tokens4 = Ela_Remove_Noise_from_list(tab_tokens4)
|
||||
#tab_tokens4 = Ela_remove_ponct(tab_tokens4)
|
||||
|
||||
# Enregistrement dans la base mongodb
|
||||
YTUBES_Ela_list_to_mongo(tab_tokens3_unicode,class_id, source_field)
|
||||
|
||||
size_tab = len(tab_tokens4)
|
||||
#print("size_tab = " + str(size_tab))
|
||||
occurrences = Counter(tab_tokens4)
|
||||
most_common = occurrences.most_common()
|
||||
#print(most_common)
|
||||
|
||||
|
||||
return True
|
||||
except Exception as e:
|
||||
exc_type, exc_obj, exc_tb = sys.exc_info()
|
||||
mycommon.myprint(str(inspect.stack()[0][3]) + " -" + str(e) + " - Line : " + str(exc_tb.tb_lineno))
|
||||
return False
|
||||
|
||||
|
||||
|
||||
'''
|
||||
def Ela_ntlk(mysentence, traning_id):
|
||||
tab_tokens = Ela_Tokenize(mysentence)
|
||||
|
@ -550,17 +584,72 @@ def Ela_list_to_mongo(tab_tokens, traning_id, source_field):
|
|||
my_file.write(str(data))
|
||||
my_file.close()
|
||||
|
||||
# Making a Connection with MongoClient
|
||||
client = MongoClient("mongodb://localhost:27017/")
|
||||
# database
|
||||
db = client["cherifdb"]
|
||||
# collection
|
||||
collection = db["elaindex"]
|
||||
|
||||
collection = MYSY_GV.dbname["elaindex"]
|
||||
|
||||
data.reset_index(inplace=True)
|
||||
data_dict = data.to_dict("records")
|
||||
|
||||
print(data_dict)
|
||||
#print(data_dict)
|
||||
# Insert collection
|
||||
collection.insert_many(data_dict)
|
||||
data.to_csv("data_indexees.csv")
|
||||
|
||||
return True
|
||||
except Exception as e :
|
||||
mycommon.myprint(e)
|
||||
return False
|
||||
|
||||
|
||||
'''
|
||||
Version Youtubes
|
||||
'''
|
||||
def YTUBES_Ela_list_to_mongo(tab_tokens, traning_id, source_field):
|
||||
try:
|
||||
# clear / truncate elaindex
|
||||
#db.elaindex.remove({})
|
||||
|
||||
# Recherche des occurence d'une valeur
|
||||
my_file = open("ela_output_test_file_pandas_2.txt", "w")
|
||||
size_tab = len(tab_tokens)
|
||||
|
||||
occurrences = Counter(tab_tokens)
|
||||
|
||||
# insert the list to the set
|
||||
list_set = set(tab_tokens)
|
||||
# convert the set to the list
|
||||
unique_list = (list(list_set))
|
||||
|
||||
final_lists = []
|
||||
'''
|
||||
Pour gerer la notion de limite d'occurrence >= 5, on definit un paramettre appelé : seuil
|
||||
'''
|
||||
seuil = round(int(str(MYSY_GV.INDEX_MIN_OCCURENCE)) / size_tab, 3)
|
||||
print(" SEUIL = "+str(seuil))
|
||||
for tmp in unique_list:
|
||||
moyenne = round(int(occurrences[str(tmp)]) / size_tab, 3)
|
||||
if( moyenne >= seuil ):
|
||||
print(str(moyenne)+ " CMP "+str(seuil))
|
||||
final_lists.append([str(tmp), int(str(occurrences[str(tmp)])), str(moyenne), str(traning_id), str(source_field)])
|
||||
|
||||
#print(final_lists)
|
||||
data = pd.DataFrame(final_lists, columns=('mots', "occurence", 'moyenne', 'id_formation', 'source_field'))
|
||||
my_file.write(str(data))
|
||||
my_file.close()
|
||||
|
||||
print(" YTUBES INSERT MAY")
|
||||
|
||||
YTUBES_CONNECTION_STRING = "mongodb://localhost:27017/"
|
||||
YTUBES_client = MongoClient(YTUBES_CONNECTION_STRING)
|
||||
YTUBES_dbname = YTUBES_client['mysyvideodb']
|
||||
|
||||
|
||||
collection = YTUBES_dbname["mysyindex"]
|
||||
|
||||
data.reset_index(inplace=True)
|
||||
data_dict = data.to_dict("records")
|
||||
|
||||
#print(data_dict)
|
||||
# Insert collection
|
||||
collection.insert_many(data_dict)
|
||||
data.to_csv("data_indexees.csv")
|
||||
|
@ -628,9 +717,9 @@ def ela_index_article_avis_record_field(lines, article_avis_id, source_field = "
|
|||
'''
|
||||
## Suppression de toutes les indexation qui existe de cette formation
|
||||
'''
|
||||
client = MongoClient(CONNECTION_STRING)
|
||||
dbname = client['cherifdb']
|
||||
coll_name = dbname['elaindex_article_avis']
|
||||
client = MongoClient(MYSY_GV.CONNECTION_STRING)
|
||||
MYSY_GV.dbname = client['cherifdb']
|
||||
coll_name = MYSY_GV.dbname['elaindex_article_avis']
|
||||
|
||||
# check default value of parameter : source_field
|
||||
if len(str(source_field)) == 0 :
|
||||
|
|
|
@ -0,0 +1,73 @@
|
|||
'''
|
||||
Ce fichier contient les variables globales du systeme
|
||||
'''
|
||||
from pymongo import MongoClient
|
||||
from spellchecker import SpellChecker
|
||||
from nltk.stem.snowball import SnowballStemmer
|
||||
import spacy
|
||||
import logging
|
||||
from autocorrect import Speller
|
||||
from nltk.corpus import stopwords
|
||||
import ela_spacy_common as lsc
|
||||
|
||||
CONNECTION_STRING = "mongodb://localhost:27017/cherifdb"
|
||||
client = MongoClient(CONNECTION_STRING)
|
||||
dbname = client['cherifdb']
|
||||
|
||||
|
||||
|
||||
|
||||
YTUBES_CONNECTION_STRING = "mongodb://localhost:27017/"
|
||||
YTUBES_client = MongoClient(YTUBES_CONNECTION_STRING)
|
||||
YTUBES_dbname = YTUBES_client['mysyvideodb']
|
||||
|
||||
|
||||
|
||||
'''
|
||||
Cette Variable defini le nombre minimum
|
||||
de frequence d'un mot pour qu'il soit indexé.
|
||||
|
||||
en gros, dans une text, les mots apparaissent moins de 5 fois sont consideré
|
||||
comme des mots à faible raisonnance'''
|
||||
INDEX_MIN_OCCURENCE = 5
|
||||
|
||||
|
||||
|
||||
## Gle Variables
|
||||
stemmer = SnowballStemmer(language='french')
|
||||
nlp = spacy.load("fr_core_news_sm")
|
||||
spell = SpellChecker(language='fr')
|
||||
token_fr_pontuation = []
|
||||
|
||||
|
||||
'''
|
||||
La taille maximal d'un champs à retourner.
|
||||
Au dela de 300 caractère, le système rame
|
||||
'''
|
||||
MAX_CARACT = 300
|
||||
MAX_CARACT_DEDUIT = 50
|
||||
TOKEN_SIZE = 25
|
||||
|
||||
|
||||
|
||||
DEBUG_LEVEL = logging.DEBUG
|
||||
LOG_FILE_NAME = "./Log/log_file.log"
|
||||
|
||||
|
||||
upload_folder = "./Data/"
|
||||
ALLOWED_EXTENSIONS = {'txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'}
|
||||
logging.basicConfig( level=DEBUG_LEVEL, filename=LOG_FILE_NAME)
|
||||
AUTORIZED_SOURCE_IPV4=["127.0.0.1", "localhost", "88.170.110.220", "192.168.1.21", "192.168.1.48"]
|
||||
|
||||
|
||||
#assign the default stopwords list to a variable
|
||||
STOP_WORDS = spacy.lang.fr.stop_words.STOP_WORDS
|
||||
stopWords = set(stopwords.words('french'))
|
||||
sentence = "Bouygues a eu une coupure de réseau à Marseille chez ses clients marseillais et son couteau"
|
||||
lsc.update_stopWords(stopWords)
|
||||
|
||||
#print(type(stopWords))
|
||||
|
||||
lsc.update_token_fr_pontuation(token_fr_pontuation)
|
||||
spell_fr = Speller(lang='fr')
|
||||
|
|
@ -22,6 +22,7 @@ import inspect
|
|||
import sys, os
|
||||
import csv
|
||||
import pandas as pd
|
||||
import GlobalVariable as MYSY_GV
|
||||
|
||||
|
||||
|
||||
|
@ -37,13 +38,6 @@ CONNECTION_STRING = "mongodb://localhost/cherifdb"
|
|||
client = MongoClient(CONNECTION_STRING)
|
||||
dbname = client['cherifdb']
|
||||
|
||||
'''
|
||||
La taille maximal d'un champs à retourner.
|
||||
Au dela de 300 caractère, le système rame
|
||||
'''
|
||||
MAX_CARACT = 300
|
||||
MAX_CARACT_DEDUIT = 50
|
||||
|
||||
|
||||
|
||||
'''
|
||||
|
@ -79,18 +73,18 @@ def get_all_articles_avis(diction):
|
|||
[("title_formation",pymongo.ASCENDING), ("date_avis",pymongo.ASCENDING)]):
|
||||
# mycommon.myprint("AVANT ==> "+str(x['description']))
|
||||
val = x['qualite']
|
||||
if (len(x['qualite']) > MAX_CARACT):
|
||||
x['qualite'] = val[:MAX_CARACT] + " ..."
|
||||
if (len(x['qualite']) > MYSY_GV.MAX_CARACT):
|
||||
x['qualite'] = val[:MYSY_GV.MAX_CARACT] + " ..."
|
||||
|
||||
else:
|
||||
x['qualite'] = val[:MAX_CARACT]
|
||||
x['qualite'] = val[:MYSY_GV.MAX_CARACT]
|
||||
|
||||
val = x['avis']
|
||||
if (len(x['avis']) > MAX_CARACT):
|
||||
x['avis'] = val[:MAX_CARACT] + " ..."
|
||||
if (len(x['avis']) > MYSY_GV.MAX_CARACT):
|
||||
x['avis'] = val[:MYSY_GV.MAX_CARACT] + " ..."
|
||||
|
||||
else:
|
||||
x['avis'] = val[:MAX_CARACT]
|
||||
x['avis'] = val[:MYSY_GV.MAX_CARACT]
|
||||
|
||||
# mycommon.myprint("APRES ==> " + str(x['description']))
|
||||
|
||||
|
@ -485,18 +479,18 @@ def recherche_articles_avis(diction):
|
|||
|
||||
|
||||
val = x['qualite']
|
||||
if (len(x['qualite']) > MAX_CARACT):
|
||||
x['qualite'] = val[:MAX_CARACT] + " ..."
|
||||
if (len(x['qualite']) > MYSY_GV.MAX_CARACT):
|
||||
x['qualite'] = val[:MYSY_GV.MAX_CARACT] + " ..."
|
||||
|
||||
else:
|
||||
x['qualite'] = val[:MAX_CARACT]
|
||||
x['qualite'] = val[:MYSY_GV.MAX_CARACT]
|
||||
|
||||
val = x['avis']
|
||||
if (len(x['avis']) > MAX_CARACT):
|
||||
x['avis'] = val[:MAX_CARACT] + " ..."
|
||||
if (len(x['avis']) > MYSY_GV.MAX_CARACT):
|
||||
x['avis'] = val[:MYSY_GV.MAX_CARACT] + " ..."
|
||||
|
||||
else:
|
||||
x['avis'] = val[:MAX_CARACT]
|
||||
x['avis'] = val[:MYSY_GV.MAX_CARACT]
|
||||
|
||||
|
||||
insertObject.append(JSONEncoder().encode(user))
|
||||
|
@ -678,18 +672,18 @@ def get_article_avis_alaune(diction):
|
|||
[("date_avis", pymongo.ASCENDING), ("title_formation", pymongo.ASCENDING)]):
|
||||
|
||||
val = x['qualite']
|
||||
if (len(x['qualite']) > MAX_CARACT_DEDUIT):
|
||||
x['qualite'] = val[:MAX_CARACT_DEDUIT] + " ..."
|
||||
if (len(x['qualite']) > MYSY_GV.MAX_CARACT_DEDUIT):
|
||||
x['qualite'] = val[:MYSY_GV.MAX_CARACT_DEDUIT] + " ..."
|
||||
|
||||
else:
|
||||
x['qualite'] = val[:MAX_CARACT_DEDUIT]
|
||||
x['qualite'] = val[:MYSY_GV.MAX_CARACT_DEDUIT]
|
||||
|
||||
val = x['avis']
|
||||
if (len(x['avis']) > MAX_CARACT_DEDUIT):
|
||||
x['avis'] = val[:MAX_CARACT_DEDUIT] + " ..."
|
||||
if (len(x['avis']) > MYSY_GV.MAX_CARACT_DEDUIT):
|
||||
x['avis'] = val[:MYSY_GV.MAX_CARACT_DEDUIT] + " ..."
|
||||
|
||||
else:
|
||||
x['avis'] = val[:MAX_CARACT_DEDUIT]
|
||||
x['avis'] = val[:MYSY_GV.MAX_CARACT_DEDUIT]
|
||||
|
||||
user = x
|
||||
insertObject.append(JSONEncoder().encode(user))
|
||||
|
|
|
@ -1,13 +1,21 @@
|
|||
,index,mots,occurence,moyenne,id_formation,source_field
|
||||
0,0,regl,1,0.08,8866,objectif
|
||||
1,1,const,1,0.08,8866,objectif
|
||||
2,2,object,1,0.08,8866,objectif
|
||||
3,3,publiqu,1,0.08,8866,objectif
|
||||
4,4,format,1,0.08,8866,objectif
|
||||
5,5,comptabl,1,0.08,8866,objectif
|
||||
6,6,impos,1,0.08,8866,objectif
|
||||
7,7,evolu,1,0.08,8866,objectif
|
||||
8,8,reglement,1,0.08,8866,objectif
|
||||
9,9,appliqu,1,0.08,8866,objectif
|
||||
10,10,maitris,1,0.08,8866,objectif
|
||||
11,11,princip,1,0.08,8866,objectif
|
||||
0,0,dynamiques,7,0.012,DmZxdIvG3YQ,video_text
|
||||
1,1,valeurs,5,0.009,DmZxdIvG3YQ,video_text
|
||||
2,2,emporter,5,0.009,DmZxdIvG3YQ,video_text
|
||||
3,3,colonne,6,0.011,DmZxdIvG3YQ,video_text
|
||||
4,4,graphique,6,0.011,DmZxdIvG3YQ,video_text
|
||||
5,5,nombre,5,0.009,DmZxdIvG3YQ,video_text
|
||||
6,6,vendu,8,0.014,DmZxdIvG3YQ,video_text
|
||||
7,7,prix,10,0.018,DmZxdIvG3YQ,video_text
|
||||
8,8,croises,6,0.011,DmZxdIvG3YQ,video_text
|
||||
9,9,somme,5,0.009,DmZxdIvG3YQ,video_text
|
||||
10,10,cellules,5,0.009,DmZxdIvG3YQ,video_text
|
||||
11,11,fonction,5,0.009,DmZxdIvG3YQ,video_text
|
||||
12,12,groupe,7,0.012,DmZxdIvG3YQ,video_text
|
||||
13,13,tableaux,7,0.012,DmZxdIvG3YQ,video_text
|
||||
14,14,modifier,6,0.011,DmZxdIvG3YQ,video_text
|
||||
15,15,tableau,7,0.012,DmZxdIvG3YQ,video_text
|
||||
16,16,donnees,8,0.014,DmZxdIvG3YQ,video_text
|
||||
17,17,vis,6,0.011,DmZxdIvG3YQ,video_text
|
||||
18,18,produits,12,0.021,DmZxdIvG3YQ,video_text
|
||||
19,19,besoin,6,0.011,DmZxdIvG3YQ,video_text
|
||||
|
|
|
|
@ -6,11 +6,7 @@ from pymongo import ReturnDocument
|
|||
import inspect
|
||||
import sys, os
|
||||
from operator import itemgetter, attrgetter
|
||||
|
||||
client = MongoClient("mongodb://localhost:27017/")
|
||||
# database
|
||||
db = client["cherifdb"]
|
||||
# collection
|
||||
import GlobalVariable as MYSY_GV
|
||||
|
||||
'''
|
||||
Cette fonction indexe une formation
|
||||
|
@ -22,7 +18,7 @@ Par defaut sont pris en compte :
|
|||
def ela_index_class(external_code="", source_field = ""):
|
||||
try:
|
||||
|
||||
collection = db["myclass"]
|
||||
collection = MYSY_GV.dbname["myclass"]
|
||||
|
||||
if( len(str(source_field)) == 0):
|
||||
source_field = 'default'
|
||||
|
@ -156,6 +152,30 @@ def ela_index_class(external_code="", source_field = ""):
|
|||
else:
|
||||
return False
|
||||
|
||||
# Indexation du champ : video_text , donc la transcription d'une video
|
||||
elif (str(source_field) == "video_text"):
|
||||
chaine = doc["video_text"]
|
||||
# mycommon.myprint(" external_code = "+str(doc["external_code"])+" ==> "+str(chaine))
|
||||
retval = ls.ela_index_record_field(chaine, str(doc["external_code"]), source_field)
|
||||
|
||||
'''
|
||||
# Apres indexation, on met a jour la formation pour
|
||||
# mettre le champ "indexe" a 1.
|
||||
'''
|
||||
if (retval):
|
||||
# mise à jour du champ "indexed"
|
||||
myobjectif = {}
|
||||
myobjectif['indexed_video_text'] = '1'
|
||||
|
||||
ret_val2 = collection.find_and_modify(query={"external_code": external_code},
|
||||
update={"$set": myobjectif}
|
||||
)
|
||||
|
||||
if (ret_val2 and ret_val2['_id']):
|
||||
mycommon.myprint("La formation indexée (champs video_text) =" + str(ret_val2['_id']))
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
return True
|
||||
except Exception as e:
|
||||
|
@ -172,7 +192,7 @@ def ela_index_all_classes():
|
|||
# Making a Connection with MongoClient
|
||||
try:
|
||||
# collection
|
||||
collection = db["myclass"]
|
||||
collection = MYSY_GV.dbname["myclass"]
|
||||
'''
|
||||
ATTENTION ATTENTION
|
||||
/!\ : avant l'indexation, on vide la table des indexes.
|
||||
|
@ -223,7 +243,7 @@ ex : mot1;mot2;mot3
|
|||
'''
|
||||
def ela_index_all_classes_keywords():
|
||||
try:
|
||||
collection = db["myclass"]
|
||||
collection = MYSY_GV.dbname["myclass"]
|
||||
nb_class_indexed = 0
|
||||
for doc in collection.find({'indexed_keyword': '0'}):
|
||||
if ("mots_cle" in doc.keys()):
|
||||
|
@ -257,7 +277,7 @@ def ela_index_all_classes_title():
|
|||
# Making a Connection with MongoClient
|
||||
try:
|
||||
|
||||
collection = db["myclass"]
|
||||
collection = MYSY_GV.dbname["myclass"]
|
||||
'''
|
||||
ATTENTION ATTENTION
|
||||
/!\ : avant l'indexation, on vide la table des indexes.
|
||||
|
@ -312,7 +332,7 @@ def ela_index_all_classes_desc():
|
|||
# database
|
||||
db = client["cherifdb"]
|
||||
# collection
|
||||
collection = db["myclass"]
|
||||
collection = MYSY_GV.dbname["myclass"]
|
||||
'''
|
||||
ATTENTION ATTENTION
|
||||
/!\ : avant l'indexation, on vide la table des indexes.
|
||||
|
@ -364,7 +384,7 @@ def ela_index_all_classes_obj():
|
|||
# Making a Connection with MongoClient
|
||||
try:
|
||||
# collection
|
||||
collection = db["myclass"]
|
||||
collection = MYSY_GV.dbname["myclass"]
|
||||
'''
|
||||
ATTENTION ATTENTION
|
||||
/!\ : avant l'indexation, on vide la table des indexes.
|
||||
|
@ -407,6 +427,61 @@ def ela_index_all_classes_obj():
|
|||
return False, "Impossible d'indexer la BDD des formations ==> KO"
|
||||
|
||||
|
||||
'''
|
||||
Cette fonction lire la transcription d'une video et
|
||||
l'indexer'''
|
||||
|
||||
def ela_index_all_classes_video_text():
|
||||
# Making a Connection with MongoClient
|
||||
try:
|
||||
# collection
|
||||
collection = MYSY_GV.dbname["myclass"]
|
||||
'''
|
||||
ATTENTION ATTENTION
|
||||
/!\ : avant l'indexation, on vide la table des indexes.
|
||||
cette logique est tres gourmande.
|
||||
ce quil faudrait faire :
|
||||
- si une formation est indexée, mettre un fag pour dire : indexé => OK
|
||||
- si on met à jour une formation ==> Mettre le flag à KO
|
||||
- si on ajoute une formation, mettre le flag à KO
|
||||
|
||||
|
||||
ex : indexation du champ 'title' ou du champ 'objectif', je vais introduitre la notion de 'source_fied'
|
||||
pour l'indexation title, 'source_fied' = 'title'
|
||||
pour l'indexation objectif, 'source_fied' = 'objectif'
|
||||
|
||||
apres pour l'indexation, il ne faudra indexer que les formation dont le flag est KO.
|
||||
aussi, avant d'indexer une formation, supprimer tous les index de la dite formation pour remplacer tous les anciens et eviter un doublon
|
||||
'''
|
||||
|
||||
nb_class_indexed = 0
|
||||
for doc in collection.find({'indexed_video_text': '0'}):
|
||||
if ("video_text" in doc.keys()):
|
||||
chaine = doc["video_text"]
|
||||
#mycommon.myprint(" video_texte = " + str(doc["external_code"]) + " ==> " + str(chaine))
|
||||
retval = ela_index_class(str(doc["external_code"]), "video_text")
|
||||
#print(" apres : ela_index_all_classes_video_texte retval = " + str(retval))
|
||||
|
||||
if (retval is False):
|
||||
mycommon.myprint("IMPOSSIBLE d'indexer la formation = " + str(doc["external_code"]))
|
||||
else:
|
||||
nb_class_indexed = nb_class_indexed + 1
|
||||
|
||||
# retval = ls.ela_index_record_field(chaine, str(doc["external_code"]))
|
||||
|
||||
mycommon.myprint(str(nb_class_indexed) + " ont été indexes - champ video_text")
|
||||
return True, str(nb_class_indexed) + " ont été indexes - champ video_text "
|
||||
|
||||
except Exception as e:
|
||||
exc_type, exc_obj, exc_tb = sys.exc_info()
|
||||
mycommon.myprint(str(inspect.stack()[0][3]) + " -" + str(e) + " - Line : " + str(exc_tb.tb_lineno))
|
||||
return False, "Impossible d'indexer video_text la BDD des formations ==> KO"
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
'''
|
||||
Cette fonction recherche une chaine de caractère
|
||||
dans la base de données.
|
||||
|
@ -455,7 +530,7 @@ def ela_recherche_tokens(sentence):
|
|||
'''
|
||||
|
||||
# collection
|
||||
collection = db["elaindex"]
|
||||
collection = MYSY_GV.dbname["elaindex"]
|
||||
|
||||
print(" mot recherché tab_tokens4 = "+str(tab_tokens4))
|
||||
|
||||
|
@ -532,7 +607,7 @@ def ela_recherche_tokens_source_field(sentence, source_fied=""):
|
|||
a quoi cela pourrait correspondre.
|
||||
'''
|
||||
|
||||
collection = db["elaindex"]
|
||||
collection = MYSY_GV.dbname["elaindex"]
|
||||
|
||||
for token in tab_tokens4:
|
||||
print(" #### Token rechercher dans l'index est : '"+str(token)+"' et le source_field = '"+str(source_fied)+"' ")
|
||||
|
@ -560,7 +635,7 @@ def ela_index_all_articles_avis():
|
|||
# Making a Connection with MongoClient
|
||||
try:
|
||||
# collection
|
||||
collection = db["articles_avis"]
|
||||
collection = MYSY_GV.dbname["articles_avis"]
|
||||
nb_class_indexed = 0
|
||||
for doc in collection.find({'indexed': '0'}):
|
||||
if ("title_formation" in doc.keys()):
|
||||
|
@ -596,7 +671,7 @@ Par defaut sont pris en compte :
|
|||
def ela_index_articles_avis(titre_formation="", source_field = ""):
|
||||
try:
|
||||
|
||||
collection = db["articles_avis"]
|
||||
collection = MYSY_GV.dbname["articles_avis"]
|
||||
|
||||
if( len(str(source_field)) == 0):
|
||||
source_field = 'default'
|
||||
|
@ -681,7 +756,7 @@ def ela_recherche_article_avis_tokens(sentence):
|
|||
'''
|
||||
|
||||
# collection
|
||||
collection = db["elaindex_article_avis"]
|
||||
collection = MYSY_GV.dbname["elaindex_article_avis"]
|
||||
|
||||
print(" mot recherché tab_tokens4 = " + str(tab_tokens4))
|
||||
|
||||
|
|
|
@ -1,13 +1,21 @@
|
|||
mots occurence moyenne id_formation source_field
|
||||
0 regl 1 0.08 8866 objectif
|
||||
1 const 1 0.08 8866 objectif
|
||||
2 object 1 0.08 8866 objectif
|
||||
3 publiqu 1 0.08 8866 objectif
|
||||
4 format 1 0.08 8866 objectif
|
||||
5 comptabl 1 0.08 8866 objectif
|
||||
6 impos 1 0.08 8866 objectif
|
||||
7 evolu 1 0.08 8866 objectif
|
||||
8 reglement 1 0.08 8866 objectif
|
||||
9 appliqu 1 0.08 8866 objectif
|
||||
10 maitris 1 0.08 8866 objectif
|
||||
11 princip 1 0.08 8866 objectif
|
||||
mots occurence moyenne id_formation source_field
|
||||
0 dynamiques 7 0.012 DmZxdIvG3YQ video_text
|
||||
1 valeurs 5 0.009 DmZxdIvG3YQ video_text
|
||||
2 emporter 5 0.009 DmZxdIvG3YQ video_text
|
||||
3 colonne 6 0.011 DmZxdIvG3YQ video_text
|
||||
4 graphique 6 0.011 DmZxdIvG3YQ video_text
|
||||
5 nombre 5 0.009 DmZxdIvG3YQ video_text
|
||||
6 vendu 8 0.014 DmZxdIvG3YQ video_text
|
||||
7 prix 10 0.018 DmZxdIvG3YQ video_text
|
||||
8 croises 6 0.011 DmZxdIvG3YQ video_text
|
||||
9 somme 5 0.009 DmZxdIvG3YQ video_text
|
||||
10 cellules 5 0.009 DmZxdIvG3YQ video_text
|
||||
11 fonction 5 0.009 DmZxdIvG3YQ video_text
|
||||
12 groupe 7 0.012 DmZxdIvG3YQ video_text
|
||||
13 tableaux 7 0.012 DmZxdIvG3YQ video_text
|
||||
14 modifier 6 0.011 DmZxdIvG3YQ video_text
|
||||
15 tableau 7 0.012 DmZxdIvG3YQ video_text
|
||||
16 donnees 8 0.014 DmZxdIvG3YQ video_text
|
||||
17 vis 6 0.011 DmZxdIvG3YQ video_text
|
||||
18 produits 12 0.021 DmZxdIvG3YQ video_text
|
||||
19 besoin 6 0.011 DmZxdIvG3YQ video_text
|
52
main.py
52
main.py
|
@ -9,7 +9,7 @@ from datetime import timedelta
|
|||
import Ela_Spacy as ls
|
||||
import user_message_mgt as um
|
||||
import user_session as us
|
||||
import logging
|
||||
|
||||
import partners as pa
|
||||
import prj_common as mycommon
|
||||
import ela_index_bdd_classes as ela_index_class
|
||||
|
@ -21,23 +21,15 @@ from OpenSSL import SSL
|
|||
from flask import redirect
|
||||
from flask import Flask, render_template, request, redirect, url_for
|
||||
import articles_avis as aa
|
||||
import GlobalVariable as MYSY_GV
|
||||
import youtubes_analyse as YTA
|
||||
|
||||
|
||||
DEBUG_LEVEL = logging.DEBUG
|
||||
LOG_FILE_NAME = "./Log/log_file.log"
|
||||
|
||||
|
||||
upload_folder = "./Data/"
|
||||
ALLOWED_EXTENSIONS = {'txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'}
|
||||
|
||||
|
||||
logging.basicConfig( level=DEBUG_LEVEL, filename=LOG_FILE_NAME)
|
||||
|
||||
|
||||
app = Flask(__name__)
|
||||
cors = CORS(app, resources={r"/foo": {"origins": "*"}})
|
||||
app.config['CORS_HEADERS'] = 'Content-Type'
|
||||
app.config['UPLOAD_FOLDER'] = upload_folder
|
||||
app.config['UPLOAD_FOLDER'] = MYSY_GV.upload_folder
|
||||
|
||||
@app.before_request
|
||||
def before_request():
|
||||
|
@ -605,6 +597,39 @@ def ela_index_all_classes_title():
|
|||
status, retval = ela_index_class.ela_index_all_classes_title()
|
||||
return jsonify(status=status, message=retval)
|
||||
|
||||
|
||||
|
||||
'''
|
||||
Cette API index la base donnée en se basant sur les videos qui ont une transcription
|
||||
le champs en question sera : "video_text".
|
||||
La fonction "ela_index_all_classes" va aller chercher chaque cours dans la BDD
|
||||
et créer les mots clées qui sont ensuite enregister dans la table "elaindex"
|
||||
|
||||
'''
|
||||
@app.route('/myclass/api/ela_index_all_classes_video_text/', methods=['POST','GET'])
|
||||
@crossdomain(origin='*')
|
||||
def ela_index_all_classes_video_text():
|
||||
# On recupere le corps (payload) de la requete
|
||||
payload = request.form.to_dict()
|
||||
print(" ### payload = ",payload)
|
||||
status, retval = ela_index_class.ela_index_all_classes_video_text()
|
||||
return jsonify(status=status, message=retval)
|
||||
|
||||
|
||||
|
||||
''' Analyse Youtubes
|
||||
'''
|
||||
@app.route('/myclass/api/YTUBES_RedText/', methods=['POST','GET'])
|
||||
@crossdomain(origin='*')
|
||||
def YTUBES_RedText():
|
||||
# On recupere le corps (payload) de la requete
|
||||
payload = request.form.to_dict()
|
||||
print(" ### payload = ",payload)
|
||||
status, retval = YTA.YTUBES_RedText()
|
||||
return jsonify(status=status, message=retval)
|
||||
|
||||
|
||||
|
||||
'''
|
||||
Cette API index la base donnée en se basant sur le description.
|
||||
La fonction "ela_index_all_classes" va aller chercher chaque cours dans la BDD
|
||||
|
@ -801,7 +826,7 @@ par exemple avec un fichier CSV
|
|||
|
||||
def allowed_file(filename):
|
||||
return '.' in filename and \
|
||||
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
|
||||
filename.rsplit('.', 1)[1].lower() in MYSY_GV.ALLOWED_EXTENSIONS
|
||||
|
||||
|
||||
@app.route('/myclass/api/add_class_mass/', methods=['POST','GET'])
|
||||
|
@ -975,6 +1000,7 @@ def send_mail_delete_user():
|
|||
if __name__ == '__main__':
|
||||
print(" debut api")
|
||||
|
||||
|
||||
context = SSL.Context(SSL.SSLv23_METHOD)
|
||||
|
||||
|
||||
|
|
|
@ -15,15 +15,9 @@ import sys
|
|||
from datetime import datetime
|
||||
from pymongo import ReturnDocument
|
||||
from unidecode import unidecode
|
||||
import GlobalVariable as MYSY_GV
|
||||
|
||||
|
||||
TOKEN_SIZE = 25
|
||||
CONNECTION_STRING = "mongodb://localhost/cherifdb"
|
||||
client = MongoClient(CONNECTION_STRING)
|
||||
dbname = client['cherifdb']
|
||||
|
||||
AUTORIZED_SOURCE_IPV4=["127.0.0.1", "localhost", "88.170.110.220", "192.168.1.21", "192.168.1.48"]
|
||||
|
||||
|
||||
def myprint(message = ""):
|
||||
logging.info(str(datetime.now()) + " : "+str(message) )
|
||||
|
@ -31,11 +25,11 @@ def myprint(message = ""):
|
|||
|
||||
|
||||
def create_token_urlsafe():
|
||||
return secrets.token_urlsafe(TOKEN_SIZE)
|
||||
return secrets.token_urlsafe(MYSY_GV.TOKEN_SIZE)
|
||||
|
||||
|
||||
def create_user_recid():
|
||||
return secrets.token_hex(TOKEN_SIZE)
|
||||
return secrets.token_hex(MYSY_GV.TOKEN_SIZE)
|
||||
|
||||
|
||||
'''
|
||||
|
@ -88,7 +82,7 @@ puis verifie si la validité du trile (email, token, statut)
|
|||
def check_token_validity(email="", token=""):
|
||||
try :
|
||||
|
||||
coll_token = dbname['user_token']
|
||||
coll_token = MYSY_GV.dbname['user_token']
|
||||
|
||||
tmp_count = coll_token.count_documents({ 'token': str(token), 'valide': '1'})
|
||||
if (tmp_count <= 0):
|
||||
|
@ -108,7 +102,7 @@ puis verifie si le compte utilisateur est actif
|
|||
def check_user_validity(email="", token=""):
|
||||
try :
|
||||
|
||||
coll_token = dbname['user_account']
|
||||
coll_token = MYSY_GV.dbname['user_account']
|
||||
message = {}
|
||||
ret = True
|
||||
|
||||
|
@ -142,7 +136,7 @@ donc la table partner_token
|
|||
'''
|
||||
def check_partner_token_validity(email="", token=""):
|
||||
try :
|
||||
coll_token = dbname['partner_token']
|
||||
coll_token = MYSY_GV.dbname['partner_token']
|
||||
tmp_count = coll_token.count_documents({ 'token': str(token), 'locked':'0', 'valide': '1'})
|
||||
|
||||
#tmp_count = coll_token.find({ 'token': str(token), 'locked':'0', 'valide': '1'}).count()
|
||||
|
@ -166,7 +160,7 @@ def get_user_recid_from_token(token = ""):
|
|||
myprint(" Le token est vide")
|
||||
return False
|
||||
|
||||
coll_token = dbname['user_token']
|
||||
coll_token = MYSY_GV.dbname['user_token']
|
||||
tmp_val = coll_token.find({'token': str(token), 'valide': '1'})
|
||||
user_recid = tmp_val[0]['recid']
|
||||
return user_recid
|
||||
|
@ -180,7 +174,7 @@ def get_user_email_from_token(token = ""):
|
|||
myprint(" Le token est vide")
|
||||
return False
|
||||
|
||||
coll_token = dbname['user_token']
|
||||
coll_token = MYSY_GV.dbname['user_token']
|
||||
tmp_val = coll_token.find({'token': str(token), 'valide': '1'})
|
||||
user_email = tmp_val[0]['email']
|
||||
return user_email
|
||||
|
@ -196,7 +190,7 @@ def get_user_email_from_recid(recid = ""):
|
|||
myprint(" Le recid est vide")
|
||||
return False
|
||||
|
||||
coll_token = dbname['user_account']
|
||||
coll_token = MYSY_GV.dbname['user_account']
|
||||
tmp_val = coll_token.find({'recid': str(recid), 'active': '1'})
|
||||
|
||||
if( tmp_val and tmp_val[0] and tmp_val[0]['email']):
|
||||
|
@ -219,7 +213,7 @@ def get_parnter_recid_from_token(token = ""):
|
|||
myprint(" Le token partner est vide")
|
||||
return False
|
||||
|
||||
coll_token = dbname['partner_token']
|
||||
coll_token = MYSY_GV.dbname['partner_token']
|
||||
tmp_val = coll_token.find({'token': str(token), 'valide': '1', 'locked':'0'})
|
||||
user_recid = tmp_val[0]['recid']
|
||||
return user_recid
|
||||
|
@ -233,7 +227,7 @@ def get_user_recid_from_email(email = ""):
|
|||
return False
|
||||
|
||||
|
||||
coll_user = dbname['user_account']
|
||||
coll_user = MYSY_GV.dbname['user_account']
|
||||
tmp_val = coll_user.find({'email': str(email), 'active': '1'})
|
||||
user_recid = tmp_val[0]['recid']
|
||||
|
||||
|
@ -337,7 +331,7 @@ stemisable à traver la tabla "word_not_stem
|
|||
'''
|
||||
def Word_Not_Stemmize(word = None):
|
||||
try:
|
||||
coll_not_stem = dbname["word_not_stem"]
|
||||
coll_not_stem = MYSY_GV.dbname["word_not_stem"]
|
||||
val_tmp = coll_not_stem.count_documents({'mot': str(word)})
|
||||
|
||||
|
||||
|
@ -360,8 +354,8 @@ Utilisé dans le cas de l'indexation d'une formation
|
|||
def check_word_in_fr_dict(mot=None):
|
||||
try:
|
||||
print("#### analyse du mot "+str(mot))
|
||||
col_name = dbname["list_mots_fr"]
|
||||
col_name_not_fr = dbname["list_mots_not_fr"]
|
||||
col_name = MYSY_GV.dbname["list_mots_fr"]
|
||||
col_name_not_fr = MYSY_GV.dbname["list_mots_not_fr"]
|
||||
mydata = {}
|
||||
val_tmp = col_name.count_documents({'mot': str(mot)})
|
||||
|
||||
|
@ -397,7 +391,7 @@ recherche d'un utilisateur '''
|
|||
def recherche_check_word_in_fr_dict(mot=None):
|
||||
try:
|
||||
print("#### analyse du mot "+str(mot))
|
||||
col_name = dbname["list_mots_fr"]
|
||||
col_name = MYSY_GV.dbname["list_mots_fr"]
|
||||
val_tmp = col_name.count_documents({'mot': str(mot)})
|
||||
|
||||
if (val_tmp <= 0):
|
||||
|
@ -419,7 +413,7 @@ est autorisé ou pas.
|
|||
'''
|
||||
def check_source_ipv4(source_ip=None):
|
||||
try:
|
||||
if source_ip in AUTORIZED_SOURCE_IPV4:
|
||||
if source_ip in MYSY_GV.AUTORIZED_SOURCE_IPV4:
|
||||
myprint(" Security check : IP adresse '"+str(source_ip)+"' connected")
|
||||
return True
|
||||
else:
|
||||
|
|
61
wrapper.py
61
wrapper.py
|
@ -19,6 +19,7 @@ import sys, os
|
|||
import csv
|
||||
import pandas as pd
|
||||
from unidecode import unidecode
|
||||
import GlobalVariable as MYSY_GV
|
||||
|
||||
|
||||
|
||||
|
@ -30,15 +31,7 @@ class JSONEncoder(json.JSONEncoder):
|
|||
print(o)
|
||||
return json.JSONEncoder.default(self, o)
|
||||
|
||||
CONNECTION_STRING = "mongodb://localhost/cherifdb"
|
||||
client = MongoClient(CONNECTION_STRING)
|
||||
dbname = client['cherifdb']
|
||||
|
||||
'''
|
||||
La taille maximal d'un champs à retourner.
|
||||
Au dela de 300 caractère, le système rame
|
||||
'''
|
||||
MAX_CARACT = 300
|
||||
|
||||
def get_recherche_gle_class(sentence):
|
||||
try:
|
||||
|
@ -53,7 +46,7 @@ def get_recherche_gle_class(sentence):
|
|||
mycommon.myprint(tab_training)
|
||||
|
||||
|
||||
coll_name = dbname['myclass']
|
||||
coll_name = MYSY_GV.dbname['myclass']
|
||||
|
||||
insertObject = []
|
||||
|
||||
|
@ -80,10 +73,8 @@ def get_class_by_list_attr(attribut, list_values):
|
|||
ela_array = []
|
||||
ela_array = list_values.split(",")
|
||||
mycommon.myprint(" attribut"+ attribut+" ==> list_values = "+ela_array)
|
||||
client = MongoClient(CONNECTION_STRING)
|
||||
|
||||
dbname = client['cherifdb']
|
||||
coll_name = dbname['myclass']
|
||||
coll_name = MYSY_GV.dbname['myclass']
|
||||
|
||||
insertObject = []
|
||||
for x in coll_name.find({attribut:{ "$in":ela_array}},{"_id": 0, "indexed": 0, "indexed_desc": 0, "indexed_obj": 0, "indexed_title": 0,
|
||||
|
@ -105,10 +96,8 @@ def get_class_by_list_attr(attribut, list_values):
|
|||
def update_class_by_attribut(objId, attribut, value):
|
||||
try:
|
||||
mycommon.myprint("objId = "+objId+" attribut "+attribut+" value = "+value)
|
||||
client = MongoClient(CONNECTION_STRING)
|
||||
|
||||
dbname = client['cherifdb']
|
||||
coll_name = dbname['myclass']
|
||||
coll_name = MYSY_GV.dbname['myclass']
|
||||
|
||||
insertObject = []
|
||||
for x in coll_name.find({attribut: value}, {"_id": 0, "indexed": 0, "indexed_desc": 0, "indexed_obj": 0,
|
||||
|
@ -186,7 +175,7 @@ def get_all_class(diction):
|
|||
|
||||
print(" My Data ="+str(mydata))
|
||||
|
||||
coll_name = dbname['myclass']
|
||||
coll_name = MYSY_GV.dbname['myclass']
|
||||
|
||||
val_tmp = 1
|
||||
insertObject = []
|
||||
|
@ -194,11 +183,11 @@ def get_all_class(diction):
|
|||
"valide": 0, "locked": 0, "partner_owner_recid": 0, }):
|
||||
#mycommon.myprint("AVANT ==> "+str(x['description']))
|
||||
val = x['description']
|
||||
if( len(x['description']) > MAX_CARACT ):
|
||||
x['description'] = val[:MAX_CARACT]+" ..."
|
||||
if( len(x['description']) > MYSY_GV.MAX_CARACT ):
|
||||
x['description'] = val[:MYSY_GV.MAX_CARACT]+" ..."
|
||||
|
||||
else:
|
||||
x['description'] = val[:MAX_CARACT]
|
||||
x['description'] = val[:MYSY_GV.MAX_CARACT]
|
||||
|
||||
#mycommon.myprint("APRES ==> " + str(x['description']))
|
||||
|
||||
|
@ -350,7 +339,7 @@ def recherche_text_simple(diction):
|
|||
'''
|
||||
|
||||
user_recid = "None"
|
||||
coll_search_result = dbname['user_recherche_result']
|
||||
coll_search_result = MYSY_GV.dbname['user_recherche_result']
|
||||
|
||||
# Verification de la validité du token/mail dans le cas des user en mode connecté
|
||||
if ( len(str(token)) > 0 ):
|
||||
|
@ -433,7 +422,7 @@ def recherche_text_simple(diction):
|
|||
#print( " unique = "+str(t))
|
||||
final_message2.append(t)
|
||||
|
||||
coll_name = dbname['myclass']
|
||||
coll_name = MYSY_GV.dbname['myclass']
|
||||
|
||||
print(" Dans la recherch TIPS , les critère = " + str(certif_crit) + " la langue = " + str(
|
||||
lang_crit) + " price = " + str(price_crit)+" support = "+str(support_crit),
|
||||
|
@ -445,7 +434,7 @@ def recherche_text_simple(diction):
|
|||
#store_recherche_Id
|
||||
find_result = {'find_result':str(final_message2)}
|
||||
|
||||
tab_user_recherche = dbname['user_recherche']
|
||||
tab_user_recherche = MYSY_GV.dbname['user_recherche']
|
||||
# seules les formation avec locked = 0 et valide=1 sont modifiables
|
||||
#print(str(inspect.stack()[0][3]) + " ENREG DES RESULT :"+str(store_recherche_Id)+" --- "+str(final_message2))
|
||||
|
||||
|
@ -493,10 +482,10 @@ def recherche_text_simple(diction):
|
|||
|
||||
user = x
|
||||
val = x['description']
|
||||
if (len(x['description']) > MAX_CARACT):
|
||||
x['description'] = val[:MAX_CARACT] + " ..."
|
||||
if (len(x['description']) > MYSY_GV.MAX_CARACT):
|
||||
x['description'] = val[:MYSY_GV.MAX_CARACT] + " ..."
|
||||
else:
|
||||
x['description'] = val[:MAX_CARACT]
|
||||
x['description'] = val[:MYSY_GV.MAX_CARACT]
|
||||
|
||||
insertObject.append(JSONEncoder().encode(user))
|
||||
|
||||
|
@ -528,7 +517,7 @@ def recherche_text_simple(diction):
|
|||
mycommon.myprint(" pour phrase : #" + search_text + "#, voici la liste des formations")
|
||||
mycommon.myprint(tab_training)
|
||||
|
||||
coll_name = dbname['myclass']
|
||||
coll_name = MYSY_GV.dbname['myclass']
|
||||
|
||||
|
||||
'''
|
||||
|
@ -564,10 +553,10 @@ def recherche_text_simple(diction):
|
|||
|
||||
user = x
|
||||
val = x['description']
|
||||
if (len(x['description']) > MAX_CARACT):
|
||||
x['description'] = val[:MAX_CARACT] + " ..."
|
||||
if (len(x['description']) > MYSY_GV.MAX_CARACT):
|
||||
x['description'] = val[:MYSY_GV.MAX_CARACT] + " ..."
|
||||
else:
|
||||
x['description'] = val[:MAX_CARACT]
|
||||
x['description'] = val[:MYSY_GV.MAX_CARACT]
|
||||
insertObject.append(JSONEncoder().encode(user))
|
||||
|
||||
#print(insertObject)
|
||||
|
@ -732,7 +721,7 @@ def store_recherche(diction, user_recid=""):
|
|||
mydata['valide'] = diction['valide']
|
||||
|
||||
|
||||
coll_name = dbname['user_recherche']
|
||||
coll_name = MYSY_GV.dbname['user_recherche']
|
||||
|
||||
# Si le champ "id" est renseigné, il s'agit d'une mis jour
|
||||
mycommon.myprint(str(inspect.stack()[0][3]) + " on va stocker "+str(mydata)+" id = "+str(mydata_id))
|
||||
|
@ -892,7 +881,7 @@ def recherche_tips_ret_ref(diction):
|
|||
#mycommon.myprint(" pour phrase : #" + str(chaine[1]).lower() + "# , Pour le tips #"+ str(my_tips)+"#, voici la liste des formations")
|
||||
#mycommon.myprint(tab_training)
|
||||
|
||||
coll_name = dbname['myclass']
|
||||
coll_name = MYSY_GV.dbname['myclass']
|
||||
|
||||
return True, tab_training
|
||||
|
||||
|
@ -1034,7 +1023,7 @@ def recherche_tips(diction):
|
|||
mycommon.myprint(" pour phrase : #" + str(chaine[1]).lower() + "# , Pour le tips #"+ str(my_tips)+"#, voici la liste des formations")
|
||||
mycommon.myprint(tab_training)
|
||||
|
||||
coll_name = dbname['myclass']
|
||||
coll_name = MYSY_GV.dbname['myclass']
|
||||
|
||||
insertObject = []
|
||||
for x in coll_name.find({"external_code": {"$in": tab_training}}, {"_id": 0, "indexed": 0, "indexed_desc": 0, "indexed_obj": 0, "indexed_title": 0,
|
||||
|
@ -1042,10 +1031,10 @@ def recherche_tips(diction):
|
|||
#mycommon.myprint(x)
|
||||
user = x
|
||||
val = x['description']
|
||||
if (len(x['description']) > MAX_CARACT):
|
||||
x['description'] = val[:MAX_CARACT] + " ..."
|
||||
if (len(x['description']) > MYSY_GV.MAX_CARACT):
|
||||
x['description'] = val[:MYSY_GV.MAX_CARACT] + " ..."
|
||||
else:
|
||||
x['description'] = val[:MAX_CARACT]
|
||||
x['description'] = val[:MYSY_GV.MAX_CARACT]
|
||||
insertObject.append(JSONEncoder().encode(user))
|
||||
|
||||
# mycommon.myprint(" insertObject = ", insertObject)
|
||||
|
@ -1125,7 +1114,7 @@ def get_stored_recherche(diction):
|
|||
return False, " Impossible de recuperer l'historique de recherche"
|
||||
|
||||
|
||||
coll_name = dbname['user_recherche']
|
||||
coll_name = MYSY_GV.dbname['user_recherche']
|
||||
|
||||
RetObject = []
|
||||
for retVal in coll_name.find({'user_recid': user_recid, 'valide':'1'}):
|
||||
|
|
|
@ -0,0 +1,96 @@
|
|||
import GlobalVariable as MYSY_GV
|
||||
from pymongo import MongoClient
|
||||
from collections import Counter
|
||||
|
||||
import prj_common as mycommon
|
||||
from unidecode import unidecode
|
||||
import inspect
|
||||
import sys, os
|
||||
from autocorrect import Speller
|
||||
from datetime import datetime
|
||||
import re
|
||||
import Ela_Spacy as ls
|
||||
|
||||
|
||||
|
||||
|
||||
def YTUBES_ela_index_class(external_code="", source_field = ""):
|
||||
try:
|
||||
|
||||
collection = MYSY_GV.YTUBES_dbname["mysyclass"]
|
||||
|
||||
print(" lalalalal1")
|
||||
if( len(str(source_field)) == 0):
|
||||
source_field = 'default'
|
||||
|
||||
print(" external_code = "+str(external_code)+ " source_field = "+str(source_field))
|
||||
for doc in collection.find({"external_code":external_code}):
|
||||
print(" 3333")
|
||||
# Indexation du champ : video_text , donc la transcription d'une video
|
||||
if (str(source_field) == "video_text"):
|
||||
print(" lalalalal")
|
||||
chaine = doc["video_text"]
|
||||
# mycommon.myprint(" external_code = "+str(doc["external_code"])+" ==> "+str(chaine))
|
||||
retval = ls.YTUBES_ela_index_record_field(chaine, str(doc["external_code"]), source_field)
|
||||
|
||||
'''
|
||||
# Apres indexation, on met a jour la formation pour
|
||||
# mettre le champ "indexe" a 1.
|
||||
'''
|
||||
if (retval):
|
||||
# mise à jour du champ "indexed"
|
||||
myobjectif = {}
|
||||
myobjectif['indexed_video_text'] = '0'
|
||||
|
||||
ret_val2 = collection.find_and_modify(query={"external_code": external_code},
|
||||
update={"$set": myobjectif}
|
||||
)
|
||||
|
||||
if (ret_val2 and ret_val2['_id']):
|
||||
mycommon.myprint("La formation indexée YTUBES (champs video_text) =" + str(ret_val2['_id']))
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
return True
|
||||
except Exception as e:
|
||||
exc_type, exc_obj, exc_tb = sys.exc_info()
|
||||
mycommon.myprint(str(inspect.stack()[0][3]) + " -" + str(e) + " - Line : " + str(exc_tb.tb_lineno))
|
||||
return False
|
||||
|
||||
|
||||
def YTUBES_RedText():
|
||||
try:
|
||||
|
||||
print(" on est dans ytubes read")
|
||||
Y_collection = MYSY_GV.YTUBES_dbname["mysyclass"]
|
||||
|
||||
|
||||
|
||||
nb_class_indexed = 0
|
||||
for doc in Y_collection.find({'indexed_video_text': '0'}):
|
||||
if ("video_text" in doc.keys()):
|
||||
chaine = doc["video_text"]
|
||||
#mycommon.myprint(" video_texte = " + str(doc["external_code"]) + " ==> " + str(chaine))
|
||||
|
||||
|
||||
retval = YTUBES_ela_index_class(str(doc["external_code"]), "video_text")
|
||||
# print(" apres : ela_index_all_classes_video_texte retval = " + str(retval))
|
||||
|
||||
if (retval is False):
|
||||
mycommon.myprint("IMPOSSIBLE d'indexer la formation = " + str(doc["external_code"]))
|
||||
else:
|
||||
nb_class_indexed = nb_class_indexed + 1
|
||||
|
||||
# retval = ls.ela_index_record_field(chaine, str(doc["external_code"]))
|
||||
|
||||
mycommon.myprint(str(nb_class_indexed) + " ont été indexes YTUBES- champ video_text")
|
||||
return True, str(nb_class_indexed) + " ont été indexes YTUBES - champ video_text "
|
||||
|
||||
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
exc_type, exc_obj, exc_tb = sys.exc_info()
|
||||
mycommon.myprint(str(inspect.stack()[0][3]) + " -" + str(e) + " - Line : " + str(exc_tb.tb_lineno))
|
||||
return False
|
Loading…
Reference in New Issue