89 lines
1.6 KiB
Python
89 lines
1.6 KiB
Python
# -*- coding: utf-8 -*-
|
|
import matplotlib
|
|
matplotlib.use('Agg')
|
|
import time
|
|
import init
|
|
from datetime import datetime
|
|
import corporization
|
|
import preprocessing
|
|
import topicModeling
|
|
import cleaning
|
|
|
|
from miscellaneous import *
|
|
|
|
# ssh madonna "nohup /usr/bin/python3 -u /home/jannis.grundmann/PycharmProjects/topicModelingTickets/main.py &> /home/jannis.grundmann/PycharmProjects/topicModelingTickets/log/printout_main.log &"
|
|
start = time.time()
|
|
|
|
# idee http://bigartm.org/
|
|
# idee http://wiki.languagetool.org/tips-and-tricks
|
|
# idee https://en.wikipedia.org/wiki/Noisy_text_analytics
|
|
# idee https://gate.ac.uk/family/
|
|
|
|
|
|
|
|
|
|
# idee häufige n-gramme raus (zB damen und herren)
|
|
# idee llda topics zusammenfassen
|
|
# idee lda so trainieren, dass zuordnung term <-> topic nicht zu schwach wird, aber möglichst viele topics
|
|
# frage welche mitarbeiter bearbeiteten welche Topics? idee topics mit mitarbeiternummern erstzen
|
|
# idee word vorher mit semantischen netz abgleichen: wenn zu weit entfernt, dann ignore
|
|
|
|
# todo modelle testen
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
logprint("main.py started at {}".format(datetime.now()))
|
|
|
|
|
|
"""
|
|
init.main()
|
|
logprint("")
|
|
|
|
corporization.main()
|
|
logprint("")
|
|
|
|
cleaning.main()
|
|
logprint("")
|
|
|
|
preprocessing.main()
|
|
logprint("")
|
|
"""
|
|
|
|
|
|
|
|
|
|
#topicModeling.main(algorithm="lsa")
|
|
logprint("")
|
|
|
|
|
|
#topicModeling.main(algorithm="nmf")
|
|
logprint("")
|
|
|
|
|
|
#topicModeling.main(algorithm="llda")
|
|
logprint("")
|
|
|
|
|
|
topicModeling.main(algorithm="lda")
|
|
logprint("")
|
|
|
|
|
|
|
|
|
|
end = time.time()
|
|
logprint("main.py finished at {}".format(datetime.now()))
|
|
logprint("Total Time Elapsed: {0} min".format((end - start) / 60))
|
|
|