Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -14,22 +14,17 @@ import torch
|
|
| 14 |
import json
|
| 15 |
import sys
|
| 16 |
import os
|
| 17 |
-
#from datasets import load_metric
|
| 18 |
from sklearn.metrics import classification_report
|
| 19 |
from pandas import read_csv
|
| 20 |
from sklearn.linear_model import LogisticRegression
|
| 21 |
import sklearn.model_selection
|
| 22 |
from sklearn.feature_extraction.text import TfidfTransformer
|
| 23 |
from sklearn.feature_extraction.text import CountVectorizer
|
| 24 |
-
#from sklearn.naive_bayes import MultinomialNB
|
| 25 |
-
#from sklearn.model_selection import GridSearchCV
|
| 26 |
from sklearn.pipeline import Pipeline, FeatureUnion
|
| 27 |
import math
|
| 28 |
from sklearn.metrics import accuracy_score
|
| 29 |
from sklearn.metrics import precision_recall_fscore_support
|
| 30 |
from sklearn.model_selection import train_test_split
|
| 31 |
-
#from sklearn.metrics import Scorer
|
| 32 |
-
#from sklearn.metrics import SCORERS
|
| 33 |
import json
|
| 34 |
import re
|
| 35 |
import numpy as np
|
|
@@ -37,13 +32,8 @@ import pandas as pd
|
|
| 37 |
import re
|
| 38 |
import nltk
|
| 39 |
nltk.download("punkt")
|
| 40 |
-
#stemmer = nltk.SnowballStemmer("english")
|
| 41 |
-
#from nltk.corpus import stopwords
|
| 42 |
import string
|
| 43 |
from sklearn.model_selection import train_test_split
|
| 44 |
-
# import seaborn as sns
|
| 45 |
-
# from sklearn.metrics import confusion_matrix
|
| 46 |
-
# from sklearn.metrics import classification_report, ConfusionMatrixDisplay
|
| 47 |
from transformers import AutoTokenizer, Trainer, TrainingArguments, AutoModelForSequenceClassification, AutoConfig
|
| 48 |
import torch
|
| 49 |
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
|
|
@@ -54,13 +44,7 @@ from transformers import TextClassificationPipeline, TFAutoModelForSequenceClass
|
|
| 54 |
from transformers import pipeline
|
| 55 |
import pickle
|
| 56 |
import urllib.request
|
| 57 |
-
# from sklearn.feature_extraction.text import TfidfTransformer
|
| 58 |
-
# from sklearn.feature_extraction.text import CountVectorizer
|
| 59 |
-
#from PyPDF2 import PdfReader
|
| 60 |
-
#from urllib.request import urlopen
|
| 61 |
-
#from tabulate import tabulate
|
| 62 |
import csv
|
| 63 |
-
#import gdown
|
| 64 |
import pdfplumber
|
| 65 |
import pathlib
|
| 66 |
import shutil
|
|
@@ -505,7 +489,7 @@ def main():
|
|
| 505 |
|
| 506 |
# Write JSON to file
|
| 507 |
#with open(DATA_FILE, 'w') as f: #w+
|
| 508 |
-
with
|
| 509 |
st.write(f)
|
| 510 |
f.write(json.dump(json_data))
|
| 511 |
#json.dump(json_data, f)
|
|
|
|
| 14 |
import json
|
| 15 |
import sys
|
| 16 |
import os
|
|
|
|
| 17 |
from sklearn.metrics import classification_report
|
| 18 |
from pandas import read_csv
|
| 19 |
from sklearn.linear_model import LogisticRegression
|
| 20 |
import sklearn.model_selection
|
| 21 |
from sklearn.feature_extraction.text import TfidfTransformer
|
| 22 |
from sklearn.feature_extraction.text import CountVectorizer
|
|
|
|
|
|
|
| 23 |
from sklearn.pipeline import Pipeline, FeatureUnion
|
| 24 |
import math
|
| 25 |
from sklearn.metrics import accuracy_score
|
| 26 |
from sklearn.metrics import precision_recall_fscore_support
|
| 27 |
from sklearn.model_selection import train_test_split
|
|
|
|
|
|
|
| 28 |
import json
|
| 29 |
import re
|
| 30 |
import numpy as np
|
|
|
|
| 32 |
import re
|
| 33 |
import nltk
|
| 34 |
nltk.download("punkt")
|
|
|
|
|
|
|
| 35 |
import string
|
| 36 |
from sklearn.model_selection import train_test_split
|
|
|
|
|
|
|
|
|
|
| 37 |
from transformers import AutoTokenizer, Trainer, TrainingArguments, AutoModelForSequenceClassification, AutoConfig
|
| 38 |
import torch
|
| 39 |
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
|
|
|
|
| 44 |
from transformers import pipeline
|
| 45 |
import pickle
|
| 46 |
import urllib.request
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 47 |
import csv
|
|
|
|
| 48 |
import pdfplumber
|
| 49 |
import pathlib
|
| 50 |
import shutil
|
|
|
|
| 489 |
|
| 490 |
# Write JSON to file
|
| 491 |
#with open(DATA_FILE, 'w') as f: #w+
|
| 492 |
+
with open('https://huggingface.co/datasets/Seetha/visual_files/raw/main/level2.json','w') as f:
|
| 493 |
st.write(f)
|
| 494 |
f.write(json.dump(json_data))
|
| 495 |
#json.dump(json_data, f)
|