khaerens commited on
Commit
719360a
1 Parent(s): 1aac569

removed prints

Browse files
Files changed (2) hide show
  1. app.py +2 -0
  2. rebel.py +0 -4
app.py CHANGED
@@ -82,6 +82,8 @@ def wiki_show_text(page_title):
82
  st.session_state['wiki_suggestions'].remove(page_title)
83
  temp = st.session_state['wiki_suggestions'] + e.options[:3]
84
  st.session_state['wiki_suggestions'] = list(set(temp))
 
 
85
 
86
  def wiki_add_text(term):
87
  if len(st.session_state['wiki_text']) > 4:
 
82
  st.session_state['wiki_suggestions'].remove(page_title)
83
  temp = st.session_state['wiki_suggestions'] + e.options[:3]
84
  st.session_state['wiki_suggestions'] = list(set(temp))
85
+ except wikipedia.WikipediaException:
86
+ st.session_state['nodes'].remove(term)
87
 
88
  def wiki_add_text(term):
89
  if len(st.session_state['wiki_text']) > 4:
rebel.py CHANGED
@@ -32,13 +32,10 @@ def generate_knowledge_graph(texts: List[str], filename: str):
32
  doc = nlp("\n".join(texts).lower())
33
  NERs = [ent.text for ent in doc.ents]
34
  NER_types = [ent.label_ for ent in doc.ents]
35
- for nr, nrt in zip(NERs, NER_types):
36
- print(nr, nrt)
37
 
38
  triplets = []
39
  for triplet in texts:
40
  triplets.extend(generate_partial_graph(triplet))
41
- print(generate_partial_graph.cache_info())
42
  heads = [ t["head"].lower() for t in triplets]
43
  tails = [ t["tail"].lower() for t in triplets]
44
 
@@ -77,7 +74,6 @@ def generate_knowledge_graph(texts: List[str], filename: str):
77
 
78
  @lru_cache
79
  def generate_partial_graph(text: str):
80
- print(text[0:20], hash(text))
81
  triplet_extractor = pipeline('text2text-generation', model='Babelscape/rebel-large', tokenizer='Babelscape/rebel-large')
82
  a = triplet_extractor(text, return_tensors=True, return_text=False)[0]["generated_token_ids"]["output_ids"]
83
  extracted_text = triplet_extractor.tokenizer.batch_decode(a)
 
32
  doc = nlp("\n".join(texts).lower())
33
  NERs = [ent.text for ent in doc.ents]
34
  NER_types = [ent.label_ for ent in doc.ents]
 
 
35
 
36
  triplets = []
37
  for triplet in texts:
38
  triplets.extend(generate_partial_graph(triplet))
 
39
  heads = [ t["head"].lower() for t in triplets]
40
  tails = [ t["tail"].lower() for t in triplets]
41
 
 
74
 
75
  @lru_cache
76
  def generate_partial_graph(text: str):
 
77
  triplet_extractor = pipeline('text2text-generation', model='Babelscape/rebel-large', tokenizer='Babelscape/rebel-large')
78
  a = triplet_extractor(text, return_tensors=True, return_text=False)[0]["generated_token_ids"]["output_ids"]
79
  extracted_text = triplet_extractor.tokenizer.batch_decode(a)