1035 lines
29 KiB
Python
1035 lines
29 KiB
Python
import hashlib
|
||
|
||
from pymongo import MongoClient
|
||
import pymongo
|
||
from difflib import SequenceMatcher
|
||
import textdistance
|
||
from datetime import datetime
|
||
import logging
|
||
import secrets
|
||
|
||
from bson import ObjectId
|
||
from pymongo import MongoClient
|
||
import inspect
|
||
from werkzeug.utils import secure_filename
|
||
import time
|
||
import os
|
||
import csv
|
||
import inspect
|
||
import sys
|
||
import pandas as pd
|
||
from datetime import datetime
|
||
from pymongo import ReturnDocument
|
||
from unidecode import unidecode
|
||
import GlobalVariable as MYSY_GV
|
||
from serpapi import GoogleSearch
|
||
import prj_common as mycommon
|
||
import re
|
||
import email_mgt as email_mgt
|
||
import random
|
||
|
||
def myprint(message = ""):
|
||
logging.info(str(datetime.now()) + " : "+str(message) )
|
||
print(str(datetime.now()) + " : " + str(message))
|
||
|
||
|
||
def create_token_urlsafe():
|
||
return secrets.token_urlsafe(MYSY_GV.TOKEN_SIZE)
|
||
|
||
|
||
def create_user_recid():
|
||
return secrets.token_hex(MYSY_GV.TOKEN_SIZE)
|
||
|
||
|
||
'''
|
||
Cette fonction recupere et enregistrer un
|
||
fichier dans le Dossier "./Data/".
|
||
|
||
La fonction retourne le nom du fichier
|
||
'''
|
||
def Upload_Save_CSV_File(file=None, Folder=None):
|
||
try:
|
||
basename = os.path.basename(file.filename)
|
||
basename2 = basename.split(".")
|
||
|
||
'''
|
||
Verification qu'il s'agit bien d'un fichier csv, dont le nom ne comporte pas de "."
|
||
'''
|
||
if(len(basename2) != 2 ):
|
||
myprint(str(inspect.stack()[0][3]) + " - : Le nom du fichier est incorret")
|
||
return False, None
|
||
|
||
if( str(basename2[1]).lower() != "csv" ):
|
||
myprint(str(inspect.stack()[0][3]) + " - : Ce n'est pas un fichier csv")
|
||
return False, None
|
||
|
||
|
||
|
||
timestr = time.strftime("%Y%m%d%H%M%S")
|
||
new_file_name = str(basename2[0]) + "_" + str(timestr) + "." + str(basename2[1])
|
||
file.filename = new_file_name
|
||
file.save(os.path.join(str(Folder), secure_filename(file.filename))) # t
|
||
|
||
Global_file_name = "./Data/"+file.filename
|
||
|
||
|
||
|
||
return True, Global_file_name
|
||
|
||
except Exception as e :
|
||
exc_type, exc_obj, exc_tb = sys.exc_info()
|
||
myprint(str(inspect.stack()[0][3]) + " -" + str(e) + " - ERRORRRR AT Line : " + str(exc_tb.tb_lineno))
|
||
return False, None
|
||
|
||
|
||
|
||
|
||
'''
|
||
Cette fonction prend un email et un token
|
||
puis verifie si la validité du trile (email, token, statut)
|
||
'''
|
||
def check_token_validity(email="", token=""):
|
||
try :
|
||
|
||
coll_token = MYSY_GV.dbname['user_token']
|
||
|
||
tmp_count = coll_token.count_documents({ 'token': str(token), 'valide': '1'})
|
||
if (tmp_count <= 0):
|
||
myprint("Le token n'est pas valide")
|
||
return False
|
||
return True
|
||
|
||
except Exception as e:
|
||
exc_type, exc_obj, exc_tb = sys.exc_info()
|
||
myprint(str(inspect.stack()[0][3]) + " -" + str(e) + " - ERRORRRR AT Line : " + str(exc_tb.tb_lineno))
|
||
return False
|
||
|
||
'''
|
||
Cette fonction prend un email et un token
|
||
puis verifie si le compte utilisateur est actif
|
||
'''
|
||
def check_user_validity(email="", token=""):
|
||
try :
|
||
|
||
coll_token = MYSY_GV.dbname['user_account']
|
||
message = {}
|
||
ret = True
|
||
|
||
for retVal in coll_token.find({ 'token': str(token)}):
|
||
user = retVal
|
||
if( user['valide'] == '0'):
|
||
print(" le compte avec le token : "+token+" n'est pas valide")
|
||
message['valide'] = '0'
|
||
ret = False
|
||
|
||
if (user['locked'] == '1'):
|
||
print(" le compte avec le token : " + token + " est verrouillé")
|
||
message['locked'] = '1'
|
||
ret = False
|
||
|
||
return ret, message
|
||
|
||
|
||
except Exception as e:
|
||
exc_type, exc_obj, exc_tb = sys.exc_info()
|
||
myprint(str(inspect.stack()[0][3]) + " -" + str(e) + " - ERRORRRR AT Line : " + str(exc_tb.tb_lineno))
|
||
return False, message
|
||
|
||
|
||
|
||
|
||
'''
|
||
Cette fonction prend un token
|
||
puis verifie si la validité du trile (email, token, statut) EXCLUSIVEMENT PR LES PARTNER, d
|
||
donc la table partner_token
|
||
'''
|
||
def check_partner_token_validity(email="", token=""):
|
||
try :
|
||
coll_token = MYSY_GV.dbname['partner_token']
|
||
tmp_count = coll_token.count_documents({ 'token': str(token), 'locked':'0', 'valide': '1'})
|
||
|
||
#tmp_count = coll_token.find({ 'token': str(token), 'locked':'0', 'valide': '1'}).count()
|
||
|
||
if (tmp_count <= 0):
|
||
myprint("Le token du partenaire n'est pas valide")
|
||
return False
|
||
return True
|
||
|
||
except Exception as e:
|
||
exc_type, exc_obj, exc_tb = sys.exc_info()
|
||
myprint(str(inspect.stack()[0][3]) + " -" + str(e) + " - ERRORRRR AT Line : " + str(exc_tb.tb_lineno))
|
||
return False
|
||
|
||
|
||
'''
|
||
recuperation du recid du user
|
||
'''
|
||
def get_user_recid_from_token(token = ""):
|
||
if len(str(token)) <= 0 :
|
||
myprint(" Le token est vide")
|
||
return False
|
||
|
||
coll_token = MYSY_GV.dbname['user_token']
|
||
tmp_val = coll_token.find({'token': str(token), 'valide': '1'})
|
||
user_recid = tmp_val[0]['recid']
|
||
return user_recid
|
||
|
||
|
||
'''
|
||
recuperation d'email du user from token
|
||
'''
|
||
def get_user_email_from_token(token = ""):
|
||
if len(str(token)) <= 0 :
|
||
myprint(" Le token est vide")
|
||
return False
|
||
|
||
coll_token = MYSY_GV.dbname['user_token']
|
||
tmp_val = coll_token.find({'token': str(token), 'valide': '1'})
|
||
user_email = tmp_val[0]['email']
|
||
return user_email
|
||
|
||
|
||
'''
|
||
recuperation d'email du user from recid
|
||
'''
|
||
def get_user_email_from_recid(recid = ""):
|
||
|
||
try:
|
||
if len(str(recid)) <= 0 :
|
||
myprint(" Le recid est vide")
|
||
return False
|
||
|
||
coll_token = MYSY_GV.dbname['user_account']
|
||
tmp_val = coll_token.find({'recid': str(recid), 'active': '1'})
|
||
|
||
if( tmp_val and tmp_val[0] and tmp_val[0]['email']):
|
||
user_email = tmp_val[0]['email']
|
||
else:
|
||
return False, "Impossible de supprimer le compte utilisateur"
|
||
|
||
return user_email
|
||
|
||
except Exception as e:
|
||
exc_type, exc_obj, exc_tb = sys.exc_info()
|
||
myprint(str(inspect.stack()[0][3]) + " -" + str(e) + " - ERRORRRR AT Line : " + str(exc_tb.tb_lineno))
|
||
return False, "Impossible de supprimer le compte utilisateur"
|
||
|
||
'''
|
||
recuperation du recid du partner
|
||
'''
|
||
def get_parnter_recid_from_token(token = ""):
|
||
if len(str(token)) <= 0 :
|
||
myprint(" Le token partner est vide")
|
||
return False
|
||
|
||
coll_token = MYSY_GV.dbname['partner_token']
|
||
tmp_val = coll_token.find({'token': str(token), 'valide': '1', 'locked':'0'})
|
||
user_recid = tmp_val[0]['recid']
|
||
return user_recid
|
||
|
||
|
||
|
||
|
||
def get_user_recid_from_email(email = ""):
|
||
if len(str(email)) <= 0 :
|
||
myprint(" L'email est vide")
|
||
return False
|
||
|
||
|
||
coll_user = MYSY_GV.dbname['user_account']
|
||
tmp_val = coll_user.find({'email': str(email), 'active': '1'})
|
||
user_recid = tmp_val[0]['recid']
|
||
|
||
return user_recid
|
||
|
||
|
||
'''
|
||
Cette fonction créer la reference interne d'une formation
|
||
'''
|
||
def Create_internal_call_ref():
|
||
retval = None
|
||
now = datetime.now()
|
||
retval = "Mysy_"+now
|
||
return retval
|
||
|
||
def textdist():
|
||
val = textdistance.mra("doe", "dough")
|
||
#print(" mra = "+str(val))
|
||
|
||
val2 = textdistance.editex("doe", "dough")
|
||
#print(" editex = " + str(val2))
|
||
|
||
|
||
def similaire():
|
||
mots = ["Durand est present", "Meyer", "Dupond", "Dopon", "DUPON", "Nguyen", "Toto"]
|
||
ratio = 0.8
|
||
|
||
for mot in mots:
|
||
#print(" CMP de 'Dupont' et '"+mot+"'" )
|
||
my_ratio = SequenceMatcher(None, "Dupont", mot).ratio()
|
||
#print(" ## RATION = "+str(my_ratio))
|
||
|
||
|
||
#resultat = [mot for mot in mots if SequenceMatcher(None, "Dupont", mot).ratio() >= ratio]
|
||
#print(resultat)
|
||
|
||
return
|
||
|
||
|
||
|
||
def levenshtein(mot1,mot2):
|
||
try:
|
||
# ligne_i est un tableau tel que tout au long de l'algorithme,
|
||
# ligne_i[k] contienne la distance de levenshtein entre les k premières lettres de mot1
|
||
# et les i premières lettres de mot2
|
||
# Au début, i=0, et la distance entre les k premières lettres de mot1 et la chaîne vide
|
||
# vaut bien sûr k. (il faut faire k suppressions pour passer des k premières lettres de mot1
|
||
# à la chaîne vide)
|
||
ligne_i = [ k for k in range(len(mot1)+1) ]
|
||
# i va ensuite varier de 1 à len(mot2)
|
||
for i in range(1, len(mot2) + 1):
|
||
# i vient d'être incrémenté. On stocke dans ligne_prec la valeur de la ligne numéro i-1
|
||
ligne_prec = ligne_i
|
||
# On crée la nouvelle ligne, dont le premier élément (l'élement numéro 0) doit être
|
||
# la distance de levenshtein entre la chaîne vide ("") et les i premières lettres de mot2, soit i
|
||
# (il faut faire i additions pour passer de la chaîne vide aux i premières lettres de mot2)
|
||
ligne_i = [i]*(len(mot1)+1)
|
||
# On va ensuite remplir le reste de la ligne i, c'est-à-dire calculer ligne_i[k] pour k allant de 1 à len(mot1)
|
||
for k in range(1,len(ligne_i)):
|
||
# La variable cout vaut 0 si la kième lettre de mot1 est la même que la ième lettre de mot2, et 1 sinon
|
||
#La kième lettre de mot1 s'obtient avec mot1[k-1], les indices commencent à 0
|
||
cout = int(mot1[k-1] != mot2[i-1])
|
||
#Voilà enfin le sel de l'algorithme, le calcul de ligne_i[k] pour i et k quelconques,
|
||
# connaissant ligne_prec[k-1], ligne_prec[k] et ligne_i[k-1]
|
||
ligne_i[k] = min(ligne_i[k-1] + 1, ligne_prec[k] + 1, ligne_prec[k-1] + cout)
|
||
# Lorsque l'on sort de la boucle, i vaut len(mot2)
|
||
#Ce que l'on cherche est la distance de levenshtein entre les len(mot1) premières lettres de mot1
|
||
# et les len(mot2) premières lettres de mot2, qui est stockée dans ligne_i[len(mot1)]
|
||
return ligne_i[len(mot1)]
|
||
|
||
except Exception as e:
|
||
exc_type, exc_obj, exc_tb = sys.exc_info()
|
||
mycommon.myprint(str(inspect.stack()[0][3]) + " -" + str(e) + " - ERRORRRR AT Line : " + str(exc_tb.tb_lineno))
|
||
return False, None
|
||
|
||
|
||
'''
|
||
Check if string is float and convert.
|
||
0 if error
|
||
'''
|
||
def tryFloat(val):
|
||
try:
|
||
val = str(val).replace(",",".")
|
||
myfloat = float(val)
|
||
return myfloat
|
||
except ValueError:
|
||
return 0
|
||
|
||
|
||
'''
|
||
Check if string is Int and convert.
|
||
0 if error
|
||
'''
|
||
def tryInt(val):
|
||
try:
|
||
val = str(val).replace(",",".")
|
||
tab_val = val.split(".")
|
||
myfloat = int(tab_val[0])
|
||
return myfloat
|
||
except ValueError:
|
||
return 0
|
||
|
||
|
||
'''
|
||
Verification que le mot n'est pas
|
||
stemisable à traver la tabla "word_not_stem
|
||
'''
|
||
def Word_Not_Stemmize(word = None):
|
||
try:
|
||
coll_not_stem = MYSY_GV.dbname["word_not_stem"]
|
||
val_tmp = coll_not_stem.count_documents({'mot': str(word)})
|
||
|
||
|
||
if (val_tmp > 0):
|
||
return True
|
||
else:
|
||
return False
|
||
|
||
except Exception as e:
|
||
exc_type, exc_obj, exc_tb = sys.exc_info()
|
||
myprint(str(inspect.stack()[0][3]) + " -" + str(e) + " - ERRORRRR AT Line : " + str(exc_tb.tb_lineno))
|
||
return False, " Impossible de verifier Word_Not_Stemmize"
|
||
|
||
|
||
'''
|
||
Cette fonction verifie si un mot est dans le dictionnaire français - une table interne
|
||
si non, le mot est enregistré dans une table pour traitement utérieur.
|
||
Utilisé dans le cas de l'indexation d'une formation
|
||
'''
|
||
def check_word_in_fr_dict(mot=None):
|
||
try:
|
||
print("#### analyse du mot "+str(mot))
|
||
col_name = MYSY_GV.dbname["list_mots_fr"]
|
||
col_name_not_fr = MYSY_GV.dbname["list_mots_not_fr"]
|
||
mydata = {}
|
||
val_tmp = col_name.count_documents({'mot': str(mot)})
|
||
|
||
if (val_tmp <= 0):
|
||
myprint(" Le mot '" + mot + "' n'existe pas dans le dictionnaire")
|
||
|
||
mydata['mot'] = mot
|
||
mydata['treated'] = int("0")
|
||
mydata['update_date'] = datetime.now()
|
||
|
||
|
||
ret_val = col_name_not_fr.find_one_and_update(
|
||
{'mot': str(mot) }, {"$set": mydata},upsert=True,
|
||
return_document=ReturnDocument.AFTER
|
||
)
|
||
|
||
if ( ret_val['_id'] is False):
|
||
print(" Impossible d'enregistrer le '" + mot + "'")
|
||
return False
|
||
|
||
return False
|
||
|
||
return True
|
||
except Exception as e:
|
||
exc_type, exc_obj, exc_tb = sys.exc_info()
|
||
myprint(str(inspect.stack()[0][3]) + " -" + str(e) + " - ERRORRRR AT Line : " + str(exc_tb.tb_lineno))
|
||
return False
|
||
|
||
|
||
'''
|
||
Cette fonction recherche à savoir si un mot est dans le dictionnaire dans le cadre de la re
|
||
recherche d'un utilisateur '''
|
||
def recherche_check_word_in_fr_dict(mot=None):
|
||
try:
|
||
print("#### analyse du mot "+str(mot))
|
||
col_name = MYSY_GV.dbname["list_mots_fr"]
|
||
val_tmp = col_name.count_documents({'mot': str(mot)})
|
||
|
||
if (val_tmp <= 0):
|
||
myprint(" Le mot '" + mot + "' n'existe pas dans le dictionnaire")
|
||
|
||
return False
|
||
|
||
return True
|
||
except Exception as e:
|
||
exc_type, exc_obj, exc_tb = sys.exc_info()
|
||
myprint(str(inspect.stack()[0][3]) + " -" + str(e) + " - ERRORRRR AT Line : " + str(exc_tb.tb_lineno))
|
||
return False
|
||
|
||
|
||
'''
|
||
Securité : Ip source requester
|
||
Cette fonction verifie si l'adresse IP de la source
|
||
est autorisé ou pas.
|
||
'''
|
||
def check_source_ipv4(source_ip=None):
|
||
try:
|
||
if source_ip in MYSY_GV.AUTORIZED_SOURCE_IPV4:
|
||
myprint(" Security check : IP adresse '"+str(source_ip)+"' connected")
|
||
return True
|
||
else:
|
||
myprint(" Security check : IP adresse '" + str(source_ip) + "' is not autorized")
|
||
return False
|
||
|
||
except Exception as e:
|
||
exc_type, exc_obj, exc_tb = sys.exc_info()
|
||
myprint(str(inspect.stack()[0][3]) + " -" + str(e) + " - ERRORRRR AT Line : " + str(exc_tb.tb_lineno))
|
||
return False
|
||
|
||
|
||
'''
|
||
Dans le cadre de la recherche, l'expression saisit par l'utilisateur dans
|
||
la search bar doit etre nettoyé, traité avant de rentrer dans le process.
|
||
|
||
Puis la phrase est renvoyée en mode "unicode"
|
||
'''
|
||
def Parse_Clean_Search_Text(sentence=None):
|
||
try:
|
||
if (len(str(sentence)) <= 0 ):
|
||
return False, ""
|
||
|
||
'''
|
||
/!\ : On supprime tous les caratère "spaciaux" et ponctuation EXCEPTE
|
||
- le ":" dont on a besoin pour identifier les patterns et
|
||
- le ' " ' dont on a besoin pour identifier les patterns
|
||
'''
|
||
list_noises = ['...', '.', ';', ',', '!', '?', ')', '(', '[', ']', '\'', '’', '`', '©', '–',
|
||
'{', '}', '-', '=', '°', '#', '-', '/', '~', '&', '\\', '.', '^', '$', '*', '+', '\\n',
|
||
'?', '{', '}', '[', ']', '|', '(', ')', '-', '>', '<', '@', '®', '™', '«', '»']
|
||
|
||
for noise in list_noises:
|
||
# print(" suppression de : '"+str(noise)+"' ")
|
||
sentence = sentence.replace(str(noise), " ")
|
||
|
||
unicode_sentence = unidecode(sentence)
|
||
|
||
|
||
return True, unicode_sentence
|
||
|
||
except Exception as e:
|
||
exc_type, exc_obj, exc_tb = sys.exc_info()
|
||
myprint(str(inspect.stack()[0][3]) + " -" + str(e) + " - ERRORRRR AT Line : " + str(exc_tb.tb_lineno))
|
||
return False, ""
|
||
|
||
|
||
'''
|
||
Cette fonction va aller cherche la recherche edendue en
|
||
utilisant l'api de "serpapi"
|
||
'''
|
||
def Get_Extended_Result(sentence=None):
|
||
try:
|
||
list_extended = []
|
||
external_code_prefixe = str(datetime.now().timestamp()).replace(".", "")
|
||
print("external_code_prefixe = " + str(external_code_prefixe))
|
||
|
||
|
||
status = RunSearchAPI(sentence, external_code_prefixe)
|
||
if( status is False):
|
||
return False, list_extended
|
||
|
||
return True, external_code_prefixe
|
||
except Exception as e:
|
||
exc_type, exc_obj, exc_tb = sys.exc_info()
|
||
myprint(str(inspect.stack()[0][3]) + " -" + str(e) + " - ERRORRRR AT Line : " + str(exc_tb.tb_lineno))
|
||
return False, []
|
||
|
||
|
||
'''
|
||
Cette fonction lance l'API et enregistre le resultat en base
|
||
'''
|
||
def RunSearchAPI(search_text=None, external_code_prefixe=None):
|
||
try:
|
||
if( len(str(search_text).strip()) < 0 ):
|
||
return True
|
||
|
||
params = {
|
||
"q": str(search_text),
|
||
"hl": "fr",
|
||
"gl": "fr",
|
||
"num":"20",
|
||
"safe": "active",
|
||
"google_domain": "google.com",
|
||
"api_key": "596cb9a468f8292fcefa6f297444db9c12478685d8734b52efdf8aa53c54fd55"
|
||
}
|
||
|
||
mycommon.myprint("######## PARAM RunSearchAPI = "+str(params))
|
||
|
||
'''
|
||
return false prematuré pour eviter de consommer les credits de l'API
|
||
'''
|
||
return False
|
||
|
||
|
||
search = GoogleSearch(params)
|
||
results = search.get_dict()
|
||
organic_results = results['organic_results']
|
||
|
||
my_collection = MYSY_GV.YTUBES_dbname['mysyserpapi']
|
||
|
||
#mycommon.myprint("resutlat 1 organic_results ")
|
||
cmpt = 0
|
||
for val in organic_results:
|
||
cmpt = cmpt +1
|
||
mydata = {}
|
||
'''
|
||
print("External_code = " + str(val['title']))
|
||
print("Title = "+str(val['title']))
|
||
print("url = " + str(val['link']))
|
||
print("description = " + str(val['snippet']))
|
||
|
||
if ("snippet_highlighted_words" in val.keys()):
|
||
if val['snippet_highlighted_words']:
|
||
print("mot_cles = " + str(val['snippet_highlighted_words']))
|
||
'''
|
||
|
||
mydata['external_code'] = external_code_prefixe+"_"+str(cmpt)
|
||
mydata['title'] = str(val['title'])
|
||
mydata['url'] = str(val['link'])
|
||
|
||
if ("snippet" in val.keys()):
|
||
if val['snippet']:
|
||
mydata['description'] = str(val['snippet'])
|
||
|
||
if ("snippet_highlighted_words" in val.keys()):
|
||
if val['snippet_highlighted_words']:
|
||
mydata['tags'] = str(val['snippet_highlighted_words'])
|
||
|
||
mydata['update_date'] = str(datetime.now())
|
||
|
||
if ("position" in val.keys()):
|
||
if val['position']:
|
||
mydata['rang'] = str(val['position'])
|
||
|
||
mydata['orign_search_text'] = str(search_text)
|
||
mydata['valide'] = "1"
|
||
mydata['treated'] = "0"
|
||
|
||
ret_val = my_collection.find_one_and_update({'url': str(mydata['url'])},
|
||
{"$set": mydata},
|
||
upsert=True,
|
||
return_document=ReturnDocument.AFTER
|
||
)
|
||
|
||
if ret_val and ret_val['_id']:
|
||
mycommon.myprint(" Le document de la recherche étendu a bien été ajouté = " + str(ret_val['_id']))
|
||
|
||
else:
|
||
mycommon.myprint(" WARNING : Impossible d'ajouter le document de la recherche étentue " + str(mydata['url']))
|
||
|
||
return True
|
||
except Exception as e:
|
||
exc_type, exc_obj, exc_tb = sys.exc_info()
|
||
mycommon.myprint(str(e) + " - Line : " + str(exc_tb.tb_lineno))
|
||
return False
|
||
|
||
|
||
|
||
'''
|
||
Cette fonction prends un mot,
|
||
et retourne sont masculin / singulier
|
||
Cette fonction va s'etoffer au fur et à mesure
|
||
|
||
/!\ Cette fonction prend un pot, pas une phrase
|
||
'''
|
||
def GetMasculinSingulier(word=None):
|
||
try:
|
||
mot = str(word).lower() # mettre les mots en minuscule
|
||
# Retirons les caractères spéciaux :
|
||
|
||
# mots terminés par "es"
|
||
patter2 = re.compile(r"\w+(ees)+$")
|
||
|
||
# mots terminés par "s"
|
||
patter3 = re.compile(r"\w+(s)+$")
|
||
|
||
# mots terminés par "x"
|
||
patter4 = re.compile(r"\w+(x)+$")
|
||
|
||
|
||
if (len(str(mot)) > 3):
|
||
if( re.match(patter2, str(mot))):
|
||
neword = mot[:-2]
|
||
return True, neword
|
||
|
||
if (re.match(patter3, str(mot))):
|
||
neword = mot[:-1]
|
||
return True, neword
|
||
|
||
if (re.match(patter4, str(mot))):
|
||
neword = mot[:-1]
|
||
return True, neword
|
||
|
||
|
||
return True, str(mot)
|
||
|
||
|
||
except Exception as e:
|
||
exc_type, exc_obj, exc_tb = sys.exc_info()
|
||
mycommon.myprint(str(e) + " - Line : " + str(exc_tb.tb_lineno))
|
||
return False, ""
|
||
|
||
|
||
'''
|
||
cette fonction créer un code_externe unique basé sur le timestemp actuel
|
||
'''
|
||
def CreateMyCode():
|
||
try:
|
||
mycode = str(datetime.now().timestamp()).replace(".", '').replace(',', '')
|
||
return True, mycode
|
||
|
||
except Exception as e:
|
||
exc_type, exc_obj, exc_tb = sys.exc_info()
|
||
print(str(inspect.stack()[0][3]) + " -" + str(e) + " - ERRORRRR AT Line : " + str(exc_tb.tb_lineno))
|
||
return False, '-1'
|
||
|
||
|
||
|
||
'''
|
||
Cette fonction va aller cherche tous les messages à envoyer
|
||
dans la collection 'user_message' et declencher le traitement
|
||
adequat
|
||
'''
|
||
def CronUSerMessage_Mail():
|
||
try:
|
||
nb_message = 0
|
||
coll_message = MYSY_GV.dbname['user_message']
|
||
my_today = datetime.today()
|
||
|
||
|
||
for val in coll_message.find({'sent':'0', 'valide':'1', 'type':'email'}):
|
||
ismail_traited = '1'
|
||
error_message = ""
|
||
nb_message = nb_message + 1
|
||
print(" traitement du message : "+str(val))
|
||
|
||
|
||
mail_recever = ""
|
||
if ("recever_mail" in val.keys()):
|
||
if val['recever_mail']:
|
||
mail_recever = str(val['recever_mail']).strip()
|
||
|
||
mail_object = ""
|
||
if ("object" in val.keys()):
|
||
if val['object']:
|
||
mail_object = str(val['object']).strip()
|
||
|
||
message = ""
|
||
if ("message" in val.keys()):
|
||
if val['message']:
|
||
message = str(val['message']).strip()
|
||
|
||
# pattern des email
|
||
patter_mail = re.compile(r"^[\w\.]+@([\w-]+\.)+[\w]{2,4}$")
|
||
|
||
if (re.match(patter_mail, str(mail_recever))):
|
||
if( len(mail_object) == 0 or len(message) == 0 ):
|
||
myprint(" WARNING : Impossible d'enboyer le mail au destinataire. l'objet ou le message sont vides : mail_object = "
|
||
+ str(mail_object)+ " ou message = "+str(message))
|
||
ismail_traited = 'error'
|
||
error_message = "l'objet ou le message sont vides"
|
||
|
||
else:
|
||
if( email_mgt.SendGenericEmail(mail_recever, mail_object, message) is False ):
|
||
ismail_traited = 'error'
|
||
error_message = " Erreur SMPT "
|
||
|
||
# envoyer le mail
|
||
else:
|
||
myprint(" WARNING : Impossible d'envoyer le mail au destinataire. format incorrecte: "+str(mail_recever))
|
||
ismail_traited = 'error'
|
||
error_message = "format du mail receveur est incorrecte "
|
||
|
||
|
||
'''
|
||
Mise à jour du statut du message
|
||
'''
|
||
ret_val = coll_message.find_one_and_update({'_id': ObjectId(val['_id']),},
|
||
{"$set": {'sent':str(ismail_traited),
|
||
'error_message':str(error_message)}},
|
||
return_document=ReturnDocument.AFTER
|
||
)
|
||
|
||
if (ret_val and ret_val['_id']):
|
||
nb_doc = str(ret_val['_id'])
|
||
myprint(" le message = " + str(nb_doc)+" a bien été mise à jour")
|
||
|
||
else:
|
||
myprint(
|
||
str(inspect.stack()[0][3]) + " WARNING : Impossible de mettre à jour le user_message = " + str(
|
||
nb_doc))
|
||
|
||
|
||
return True, str(my_today), str(nb_message) + " traite (s) : OK"
|
||
except Exception as e:
|
||
exc_type, exc_obj, exc_tb = sys.exc_info()
|
||
print(str(inspect.stack()[0][3]) + " -" + str(e) + " - ERRORRRR AT Line : " + str(exc_tb.tb_lineno))
|
||
return False,str(datetime.today()), " Impossible de traiter les demandes d'envoi d'email "
|
||
|
||
|
||
|
||
'''
|
||
Cette fonction prend un tableau
|
||
et retourn le meme tableau mais dans un order aléatoire
|
||
/!\ cette fonction est trop lente, je ne peux l'utiliser
|
||
'''
|
||
|
||
def RendomizeTab(table):
|
||
try:
|
||
|
||
if( table.count() <= 0 ):
|
||
print(" impossible de rendomize")
|
||
return False, []
|
||
|
||
taille = table.count()-1
|
||
new_tab = []
|
||
new_tab_cpt = 0
|
||
i = 0
|
||
|
||
while( new_tab_cpt <= taille ):
|
||
cpt = random.randint(0, taille)
|
||
#print("cpt = " + str(cpt) + " - len(table) = " + str(table.count()))
|
||
#print(" ANALYSE DE " + str(table[cpt]['_id']))
|
||
|
||
if( table[cpt] not in new_tab ):
|
||
#print(" AJOUT DE "+str(table[cpt]['_id']))
|
||
new_tab.append(table[cpt])
|
||
new_tab_cpt = new_tab_cpt +1
|
||
|
||
return True, new_tab
|
||
except Exception as e:
|
||
exc_type, exc_obj, exc_tb = sys.exc_info()
|
||
print(str(inspect.stack()[0][3]) + " -" + str(e) + " - ERRORRRR AT Line : " + str(exc_tb.tb_lineno))
|
||
return False, []
|
||
|
||
|
||
|
||
def Reordertab():
|
||
try:
|
||
|
||
coll_name = MYSY_GV.dbname['myclass']
|
||
table = coll_name.find({'trainer':'PYRAMYD'},
|
||
{"external_code": 1, "title": 1, "_id": 1, "url": 1, })
|
||
|
||
print(str(table.count()))
|
||
taille = table.count()-1
|
||
new_tab = []
|
||
new_tab_cpt = 0
|
||
i = 0
|
||
|
||
while( new_tab_cpt <= taille ):
|
||
cpt = random.randint(0, taille)
|
||
#print("cpt = " + str(cpt) + " - len(table) = " + str(table.count()))
|
||
#print(" ANALYSE DE " + str(table[cpt]['_id']))
|
||
|
||
if( table[cpt] not in new_tab ):
|
||
#print(" AJOUT DE "+str(table[cpt]['_id']))
|
||
new_tab.append(table[cpt])
|
||
new_tab_cpt = new_tab_cpt +1
|
||
|
||
return True, new_tab
|
||
except Exception as e:
|
||
exc_type, exc_obj, exc_tb = sys.exc_info()
|
||
print(str(inspect.stack()[0][3]) + " -" + str(e) + " - ERRORRRR AT Line : " + str(exc_tb.tb_lineno))
|
||
return False, []
|
||
|
||
|
||
'''
|
||
Pour etre en mesure d'afficher que 100 de X formations qui existent
|
||
en base, un systeme de "aLaUne" va etre mis en place
|
||
Un cron va a periodicité données choisir de manière aleatoire
|
||
les formations à afficher à la une.'''
|
||
def FormationAlaUne():
|
||
try:
|
||
|
||
coll_name = MYSY_GV.dbname['myclass']
|
||
df = pd.DataFrame(list(coll_name.find({'valide':'1'},{'_id':1, 'external_code':1})))
|
||
df['isalaune'] = '0'
|
||
|
||
|
||
taille2 = df.shape[0]
|
||
affectation = 0
|
||
while( affectation < MYSY_GV.MAINPAGE_QUERY_LIMIT_ROW ):
|
||
affectation = affectation + 1
|
||
cpt = random.randint(0, taille2)
|
||
df.at[cpt, 'isalaune'] = '1'
|
||
|
||
i = 0
|
||
tab_id = []
|
||
while( i < taille2):
|
||
if( str(df.at[i,'isalaune']) == "1"):
|
||
#print(str(df.at[i,'external_code']) +" = "+str(df.at[i,'isalaune']))
|
||
tab_id.append(str(df.at[i,'_id']))
|
||
i = i+1
|
||
|
||
'''
|
||
Suppression des ancien "is a la une"
|
||
'''
|
||
# update many with "num" field greater than 100
|
||
result = coll_name.update_many(
|
||
{"isalaune": "1"},
|
||
{
|
||
"$set": {"isalaune": '0'}
|
||
})
|
||
print("raw:", result.raw_result)
|
||
print("acknowledged:", result.acknowledged)
|
||
print("matched_count:", result.matched_count)
|
||
|
||
|
||
|
||
for val in tab_id:
|
||
print(" val ="+val)
|
||
ret_val = coll_name.find_one_and_update(
|
||
{'_id': ObjectId(str(val)), 'valide':'1', 'locked':'0'},
|
||
{"$set": {'isalaune':'1'}},
|
||
return_document=ReturnDocument.AFTER
|
||
)
|
||
|
||
return True, "ok"
|
||
except Exception as e:
|
||
exc_type, exc_obj, exc_tb = sys.exc_info()
|
||
print(str(inspect.stack()[0][3]) + " -" + str(e) + " - ERRORRRR AT Line : " + str(exc_tb.tb_lineno))
|
||
return False, "KO"
|
||
|
||
|
||
|
||
|
||
'''
|
||
Cette fonction replace les caractères speciaux et ponctuation par des space
|
||
'''
|
||
def local_Remove_Ponct_Special_Caractere(sentence):
|
||
try:
|
||
text = sentence.lower() # mettre les mots en minuscule
|
||
|
||
# Retirons les caractères spéciaux :
|
||
text = re.sub(r"[,\!\?\%\(\)\/\"]", " ", text)
|
||
text = re.sub(r"\&\S*\s", " ", text)
|
||
text = re.sub(r"\-", " ", text)
|
||
|
||
list_noises = ['...', '.', ';', ',', ':', '!', '?', ')', '(', '[', ']', '\'', '"', '’', '`','©', '–',
|
||
'{', '}', '-', '=', '°', '#', '-', '/', '~', '&', '\\', '.', '^', '$', '*', '+','\\n','\n',
|
||
'?', '{', '}', '[', ']', '|', '(', ')', '-', '>', '<', '@','®', '™', '«', '»']
|
||
|
||
sentence = text
|
||
for noise in list_noises:
|
||
#print(" suppression de : '"+str(noise)+"' ")
|
||
sentence = sentence.replace(str(noise), " ")
|
||
|
||
#print(" AFTER REPLACE NOISES = "+str(sentence))
|
||
return True, sentence
|
||
|
||
except Exception as e:
|
||
exc_type, exc_obj, exc_tb = sys.exc_info()
|
||
print(str(inspect.stack()[0][3]) + " -" + str(e)+" - Line : "+ str(exc_tb.tb_lineno) )
|
||
return False, " Impossible Ela_Remove_Ponct_Special_Caractere"
|
||
|
||
def Migration_internal_url():
|
||
try:
|
||
coll_name = MYSY_GV.dbname['myclass']
|
||
for retVal in coll_name.find({}):
|
||
user = retVal
|
||
|
||
my_internal_url = str(user['title'])
|
||
local_status, my_internal_url = mycommon.local_Remove_Ponct_Special_Caractere(my_internal_url)
|
||
my_internal_url = unidecode(my_internal_url.lower())
|
||
my_internal_url = my_internal_url.replace(" ", "-")
|
||
my_internal_url = my_internal_url.replace("/", "-")
|
||
if (my_internal_url.startswith('-')):
|
||
my_internal_url = my_internal_url[1:]
|
||
|
||
if (my_internal_url.endswith('-')):
|
||
my_internal_url = my_internal_url[:-1]
|
||
|
||
|
||
suffix = hashlib.md5(my_internal_url.encode()).hexdigest()
|
||
|
||
new_internal_url = str(my_internal_url) + "-" + str(suffix[-3:])
|
||
|
||
new_internal_url = new_internal_url.replace("---", "-")
|
||
new_internal_url = new_internal_url.replace("--", "-")
|
||
|
||
print('new_internal_url = '+new_internal_url)
|
||
|
||
|
||
|
||
|
||
result = coll_name.update_many(
|
||
{'_id':ObjectId(str(user['_id']))},
|
||
{
|
||
"$set": {"internal_url": str(new_internal_url)}
|
||
})
|
||
'''print("raw:", result.raw_result)
|
||
print("acknowledged:", result.acknowledged)
|
||
print("matched_count:", result.matched_count)'''
|
||
|
||
return True, "ok"
|
||
|
||
except Exception as e:
|
||
exc_type, exc_obj, exc_tb = sys.exc_info()
|
||
print(str(inspect.stack()[0][3]) + " -" + str(e) + " - ERRORRRR AT Line : " + str(exc_tb.tb_lineno))
|
||
return False, "KO"
|
||
|
||
|
||
'''
|
||
Cette fonction prends un mot de X digit
|
||
et retourne le top X des mot ayant le meme digit
|
||
/!\ cette fonction est glement utilisee dans le car des
|
||
des recherches vides
|
||
'''
|
||
def GetMotFromElaIndex(diction):
|
||
try :
|
||
|
||
'''
|
||
# Verification que les champs reçus dans l'API sont bien dans la liste des champs autorisés
|
||
# Cela evite le cas ou une entité tierce ajouter les valeurs inconnu dans l'API
|
||
# Ici on doit mettre tous les champs possible (obligatoire ou non) de la BDD dans la liste
|
||
# field_list.
|
||
'''
|
||
field_list = ['mot', 'domaine']
|
||
|
||
incom_keys = diction.keys()
|
||
for val in incom_keys:
|
||
if val not in field_list:
|
||
mycommon.myprint(str(inspect.stack()[0][
|
||
3]) + " - Le champ '" + val + "' n'existe pas, Creation formation annulée")
|
||
return False, []
|
||
|
||
'''
|
||
Une fois qu'on a controlé que toutes les clés mise dans l'API sont correcte. etape precedente,
|
||
On controle que les champs obligatoires sont presents dans la liste
|
||
'''
|
||
field_list_obligatoire = ['mot']
|
||
|
||
for val in field_list_obligatoire:
|
||
if val not in diction:
|
||
mycommon.myprint(
|
||
str(inspect.stack()[0][3]) + " - La valeur '" + val + "' n'est pas presente dans liste ")
|
||
return False, []
|
||
|
||
|
||
coll_name = MYSY_GV.dbname['elaindex']
|
||
training_mots = []
|
||
|
||
mot = ""
|
||
if ("mot" in diction.keys()):
|
||
if diction['mot']:
|
||
mot = diction['mot']
|
||
|
||
if( len(mot) < 3 ):
|
||
return True, []
|
||
|
||
search = str(mot)
|
||
search_expr = re.compile(f".*{search}.*", re.I)
|
||
|
||
print(" #### mot recu "+mot+" search_expr = "+str(search_expr))
|
||
for x in coll_name.find({'mots': {'$regex': search_expr}}).\
|
||
sort([("occurence", pymongo.DESCENDING)]).\
|
||
limit(MYSY_GV.HELP_WORD_QUERY_LIMIT):
|
||
if( x['mots'] not in training_mots ):
|
||
training_mots.append(x['mots'])
|
||
|
||
return True, training_mots
|
||
|
||
except Exception as e:
|
||
exc_type, exc_obj, exc_tb = sys.exc_info()
|
||
print(str(inspect.stack()[0][3]) + " -" + str(e) + " - ERRORRRR AT Line : " + str(exc_tb.tb_lineno))
|
||
return False, []
|
||
|
||
|
||
|
||
'''
|
||
correction erreur titre mymooc.com
|
||
'''
|
||
|
||
def Migration_mooc_title():
|
||
try:
|
||
coll_name = MYSY_GV.dbname['myclass']
|
||
for retVal in coll_name.find({'owner':'mymooc.com'}):
|
||
|
||
new_title = str(retVal['title'])
|
||
|
||
new_title = new_title.replace("-", " ")
|
||
if (new_title.startswith('-')):
|
||
new_title = new_title[1:]
|
||
|
||
if (new_title.endswith('-')):
|
||
new_title = new_title[:-1]
|
||
|
||
|
||
print(" new_title = "+new_title)
|
||
result = coll_name.update_many(
|
||
{'_id': ObjectId(str(retVal['_id']))},
|
||
{
|
||
"$set": {"title": str(new_title)}
|
||
})
|
||
|
||
return True, "ok"
|
||
|
||
except Exception as e:
|
||
exc_type, exc_obj, exc_tb = sys.exc_info()
|
||
print(str(inspect.stack()[0][3]) + " -" + str(e) + " - ERRORRRR AT Line : " + str(exc_tb.tb_lineno))
|
||
return False, "KO"
|