kinely commited on
Commit
b26ff56
·
verified ·
1 Parent(s): b878812

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -7,8 +7,8 @@ import wikipediaapi
7
 
8
  # Initialize Wikipedia API with a custom user-agent
9
  wiki_wiki = wikipediaapi.Wikipedia(
10
- language='en', # Specify the language as a keyword argument
11
- user_agent='HumanizedTextApp/1.0 (kinelyaydenseo19@gmail.com)' # Proper user agent format
12
  )
13
 
14
  # Function to fetch content from Wikipedia
@@ -27,7 +27,7 @@ embedder = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2')
27
 
28
  # Fetch and create the corpus
29
  titles = [
30
- "Crypto",
31
  "Finance",
32
  "Technology",
33
  "Healthcare",
@@ -71,8 +71,8 @@ if st.button("Generate Humanized Text"):
71
  # Concatenate user input and context for model input
72
  input_text = f"{user_input} {context}"
73
 
74
- # Tokenize input
75
- inputs = tokenizer(input_text, return_tensors="pt", truncation=True, max_length=1024)
76
 
77
  # Generate output
78
  outputs = model.generate(inputs.input_ids, max_length=2000, num_return_sequences=1)
 
7
 
8
  # Initialize Wikipedia API with a custom user-agent
9
  wiki_wiki = wikipediaapi.Wikipedia(
10
+ language='en',
11
+ user_agent='HumanizedTextApp/1.0 (kinelyaydenseo19@gmail.com)'
12
  )
13
 
14
  # Function to fetch content from Wikipedia
 
27
 
28
  # Fetch and create the corpus
29
  titles = [
30
+ "Crypto",
31
  "Finance",
32
  "Technology",
33
  "Healthcare",
 
71
  # Concatenate user input and context for model input
72
  input_text = f"{user_input} {context}"
73
 
74
+ # Tokenize input and handle truncation
75
+ inputs = tokenizer(input_text, return_tensors="pt", max_length=1024, truncation=True)
76
 
77
  # Generate output
78
  outputs = model.generate(inputs.input_ids, max_length=2000, num_return_sequences=1)