# -*- coding: utf-8 -*- ############# misc def printlog(string, level="INFO"): """log and prints""" print(string) if level=="INFO": logging.info(string) elif level=="DEBUG": logging.debug(string) elif level == "WARNING": logging.warning(string) printlog("Load functions") def compose(*functions): def compose2(f, g): return lambda x: f(g(x)) return functools.reduce(compose2, functions, lambda x: x) def get_calling_function(): """finds the calling function in many decent cases. https://stackoverflow.com/questions/39078467/python-how-to-get-the-calling-function-not-just-its-name """ fr = sys._getframe(1) # inspect.stack()[1][0] co = fr.f_code for get in ( lambda:fr.f_globals[co.co_name], lambda:getattr(fr.f_locals['self'], co.co_name), lambda:getattr(fr.f_locals['cls'], co.co_name), lambda:fr.f_back.f_locals[co.co_name], # nested lambda:fr.f_back.f_locals['func'], # decorators lambda:fr.f_back.f_locals['meth'], lambda:fr.f_back.f_locals['f'], ): try: func = get() except (KeyError, AttributeError): pass else: if func.__code__ == co: return func raise AttributeError("func not found") def printRandomDoc(textacyCorpus): import random print() printlog("len(textacyCorpus) = %i" % len(textacyCorpus)) randIndex = int((len(textacyCorpus) - 1) * random.random()) printlog("Index: {0} ; Text: {1} ; Metadata: {2}".format(randIndex, textacyCorpus[randIndex].text, textacyCorpus[randIndex].metadata)) print() ############# load xml def generateMainTextfromTicketXML(path2xml, main_textfield='Description'): """ generates strings from XML :param path2xml: :param main_textfield: :param cleaning_function: :yields strings """ tree = ET.parse(path2xml, ET.XMLParser(encoding="utf-8")) root = tree.getroot() for ticket in root: for field in ticket: if field.tag == main_textfield: yield field.text def generateMetadatafromTicketXML(path2xml, leave_out=['Description']): tree = ET.parse(path2xml, ET.XMLParser(encoding="utf-8")) root = tree.getroot() for ticket in root: metadata = {} for field in ticket: if field.tag not in leave_out: metadata[field.tag] = field.text yield metadata ############# load csv def csv_to_contentStream(path2csv: str, content_collumn_name: str): """ :param path2csv: string :param content_collumn_name: string :return: string-generator """ stream = textacy.fileio.read_csv(path2csv, delimiter=";") # ,encoding='utf8') content_collumn = 0 # standardvalue for i,lst in enumerate(stream): if i == 0: # look for desired column for j,col in enumerate(lst): if col == content_collumn_name: content_collumn = j else: yield lst[content_collumn] def csv_to_metaStream(path2csv: str, metalist: [str]): """ :param path2csv: string :param metalist: list of strings :return: dict-generator """ stream = textacy.fileio.read_csv(path2csv, delimiter=";") # ,encoding='utf8') content_collumn = 0 # standardvalue metaindices = [] metadata_temp = {} for i,lst in enumerate(stream): if i == 0: for j,col in enumerate(lst): # geht bestimmt effizienter... egal, weil passiert nur einmal for key in metalist: if key == col: metaindices.append(j) metadata_temp = dict(zip(metalist,metaindices)) # zB {'Subject' : 1, 'categoryName' : 3, 'Solution' : 10} else: metadata = metadata_temp.copy() for key,value in metadata.items(): metadata[key] = lst[value] yield metadata ############################################ Preprocessing ############################################## ############# on str-gen def processTokens(tokens, funclist, parser): # in:tokenlist, funclist # out: tokenlist for f in funclist: # idee: funclist sortieren,s.d. erst alle string-methoden ausgeführt werden, dann wird geparesed, dann wird auf tokens gearbeitet, dann evtl. auf dem ganzen Doc if 'bool' in str(f.__annotations__): tokens = list(filter(f, tokens)) elif 'str' in str(f.__annotations__): tokens = list(map(f, tokens)) # purer text doc = parser(" ".join(tokens)) # neu parsen tokens = [tok for tok in doc] # nur tokens elif 'spacy.tokens.doc.Doc' in str(f.__annotations__): #todo wirkt gefrickelt doc = parser(" ".join(tok.lower_ for tok in tokens)) # geparsed tokens = f(doc) doc = parser(" ".join(tokens)) # geparsed tokens = [tok for tok in doc] # nur tokens else: warnings.warn("Unknown Annotation while preprocessing. Function: {0}".format(str(f))) return tokens def processTextstream(textstream, funclist, parser=DE_PARSER): """ :param textstream: string-gen :param funclist: [func] :param parser: spacy-parser :return: string-gen """ # input:str-stream output:str-stream pipe = parser.pipe(textstream) for doc in pipe: tokens = [] for tok in doc: tokens.append(tok) tokens = processTokens(tokens,funclist,parser) yield " ".join([tok.lower_ for tok in tokens]) def processDictstream(dictstream, funcdict, parser=DE_PARSER): """ :param dictstream: dict-gen :param funcdict: clean_in_meta = { "Solution":funclist, ... } :param parser: spacy-parser :return: dict-gen """ for dic in dictstream: result = {} for key, value in dic.items(): if key in funcdict: doc = parser(value) tokens = [tok for tok in doc] funclist = funcdict[key] tokens = processTokens(tokens,funclist,parser) result[key] = " ".join([tok.lower_ for tok in tokens]) else: result[key] = value yield result ############# return bool def keepPOS(pos_list) -> bool: ret = lambda tok : tok.pos_ in pos_list ret.__annotations__ = get_calling_function().__annotations__ return ret def removePOS(pos_list)-> bool: ret = lambda tok : tok.pos_ not in pos_list ret.__annotations__ = get_calling_function().__annotations__ return ret def removeWords(words, keep=None)-> bool: if hasattr(keep, '__iter__'): for k in keep: try: words.remove(k) except ValueError: pass ret = lambda tok : tok.lower_ not in words ret.__annotations__ = get_calling_function().__annotations__ return ret def keepENT(ent_list) -> bool: ret = lambda tok : tok.ent_type_ in ent_list ret.__annotations__ = get_calling_function().__annotations__ return ret def removeENT(ent_list) -> bool: ret = lambda tok: tok.ent_type_ not in ent_list ret.__annotations__ = get_calling_function().__annotations__ return ret def remove_words_containing_Numbers() -> bool: ret = lambda tok: not bool(re.search('\d', tok.lower_)) ret.__annotations__ = get_calling_function().__annotations__ return ret def remove_words_containing_specialCharacters() -> bool: ret = lambda tok: not bool(re.search(r'[`\-=~!@#$%^&*()_+\[\]{};\'\\:"|<,./<>?]', tok.lower_)) ret.__annotations__ = get_calling_function().__annotations__ return ret def remove_words_containing_topLVL() -> bool: ret = lambda tok: not bool(re.search(r'\.[a-z]{2,3}(\.[a-z]{2,3})?', tok.lower_)) ret.__annotations__ = get_calling_function().__annotations__ return ret def lemmatizeWord(word,filepath=LEMMAS): """http://www.lexiconista.com/datasets/lemmatization/""" for line in list(textacy.fileio.read_file_lines(filepath=filepath)): if word.lower() == line.split()[1].strip().lower(): return line.split()[0].strip().lower() return word.lower() # falls nix gefunden wurde def lemmatize() -> str: ret = lambda tok: lemmatizeWord(tok.lower_) ret.__annotations__ = get_calling_function().__annotations__ return ret ############# return strings mentionFinder = re.compile(r"@[a-z0-9_]{1,15}", re.IGNORECASE) emailFinder = re.compile(r"\b[A-Z0-9._%+-]+@[A-Z0-9.-]+\.[A-Z]{2,}\b", re.IGNORECASE) urlFinder = re.compile(r"^(?:https?:\/\/)?(?:www\.)?[a-zA-Z0-9./]+$", re.IGNORECASE) topLVLFinder = re.compile(r'\.[a-z]{2,3}(\.[a-z]{2,3})?', re.IGNORECASE) specialFinder = re.compile(r'[`\-=~!@#$%^&*()_+\[\]{};\'\\:"|<,./>?]', re.IGNORECASE) hardSFinder = re.compile(r'[ß]', re.IGNORECASE) def replaceEmails(replace_with="EMAIL") -> str: ret = lambda tok : emailFinder.sub(replace_with, tok.lower_) ret.__annotations__ = get_calling_function().__annotations__ return ret def replaceURLs(replace_with="URL") -> str: ret = lambda tok: textacy.preprocess.replace_urls(tok.lower_,replace_with=replace_with) #ret = lambda tok: urlFinder.sub(replace_with,tok.lower_) ret.__annotations__ = get_calling_function().__annotations__ return ret def replaceSpecialChars(replace_with=" ") -> str: ret = lambda tok: specialFinder.sub(replace_with,tok.lower_) ret.__annotations__ = get_calling_function().__annotations__ return ret def replaceTwitterMentions(replace_with="TWITTER_MENTION") -> str: ret = lambda tok : mentionFinder.sub(replace_with,tok.lower_) ret.__annotations__ = get_calling_function().__annotations__ return ret def replaceNumbers(replace_with="NUMBER") -> str: ret = lambda tok: textacy.preprocess.replace_numbers(tok.lower_, replace_with=replace_with) ret.__annotations__ = get_calling_function().__annotations__ return ret def replacePhonenumbers(replace_with="PHONENUMBER") -> str: ret = lambda tok: textacy.preprocess.replace_phone_numbers(tok.lower_, replace_with=replace_with) ret.__annotations__ = get_calling_function().__annotations__ return ret def replaceHardS(replace_with="ss") -> str: ret = lambda tok: hardSFinder.sub(replace_with,tok.lower_) ret.__annotations__ = get_calling_function().__annotations__ return ret def fixUnicode() -> str: ret = lambda tok: textacy.preprocess.fix_bad_unicode(tok.lower_, normalization=u'NFC') ret.__annotations__ = get_calling_function().__annotations__ return ret def resolveAbbreviations(): pass #todo #todo wörter mit len < 2 entfernen( vorher abkürzungen (v.a. tu und fh) auflösen) und > 35 oder 50 ("Reiserücktrittskostenversicherung) ############# return docs def keepUniqeTokens() -> spacy.tokens.Doc: ret = lambda doc: (set([tok.lower_ for tok in doc])) ret.__annotations__ = get_calling_function().__annotations__ return ret def lower() -> spacy.tokens.Doc: ret = lambda doc: ([tok.lower_ for tok in doc]) ret.__annotations__ = get_calling_function().__annotations__ return ret ################################################################################################################