doublesizebed commited on
Commit
fbde303
·
1 Parent(s): 60ebaee

Initial Docker Space

Browse files
Files changed (2) hide show
  1. app.py +0 -18
  2. requirements.txt +1 -5
app.py CHANGED
@@ -2,7 +2,6 @@ import os
2
  import re
3
  import asyncio
4
  import torch
5
- import nltk
6
  import soundfile as sf
7
  from flask import Flask, request, jsonify, send_from_directory
8
  from flask_cors import CORS
@@ -10,10 +9,6 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
10
  from deep_translator import GoogleTranslator
11
  from textblob import TextBlob
12
  from parler_tts import ParlerTTSForConditionalGeneration
13
- from g2p import make_g2p
14
- import fasttext
15
- import string
16
- from huggingface_hub import hf_hub_download
17
 
18
  # Flask setup
19
  dir_path = os.path.dirname(os.path.realpath(__file__))
@@ -31,19 +26,6 @@ class ChatBot:
31
  self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
32
  self.tokenizer = AutoTokenizer.from_pretrained("TinyLlama/TinyLlama-1.1B-Chat-v1.0")
33
  self.model = AutoModelForCausalLM.from_pretrained("TinyLlama/TinyLlama-1.1B-Chat-v1.0").to(self.device)
34
-
35
- try:
36
- nltk.data.find('corpora/brown')
37
- except LookupError:
38
- nltk.download('brown')
39
-
40
- try:
41
- nltk.data.find('tokenizers/punkt')
42
- nltk.data.find('tokenizers/punkt_tab')
43
- except LookupError:
44
- nltk.download('punkt')
45
- nltk.download('punkt_tab')
46
-
47
  # Parler-TTS Setup
48
  self.tts_model = ParlerTTSForConditionalGeneration.from_pretrained("doublesizebed/parler-tts-mini-malay").to(self.device)
49
  self.tts_tokenizer = AutoTokenizer.from_pretrained("C:/Users/Honor/app/model")
 
2
  import re
3
  import asyncio
4
  import torch
 
5
  import soundfile as sf
6
  from flask import Flask, request, jsonify, send_from_directory
7
  from flask_cors import CORS
 
9
  from deep_translator import GoogleTranslator
10
  from textblob import TextBlob
11
  from parler_tts import ParlerTTSForConditionalGeneration
 
 
 
 
12
 
13
  # Flask setup
14
  dir_path = os.path.dirname(os.path.realpath(__file__))
 
26
  self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
27
  self.tokenizer = AutoTokenizer.from_pretrained("TinyLlama/TinyLlama-1.1B-Chat-v1.0")
28
  self.model = AutoModelForCausalLM.from_pretrained("TinyLlama/TinyLlama-1.1B-Chat-v1.0").to(self.device)
 
 
 
 
 
 
 
 
 
 
 
 
 
29
  # Parler-TTS Setup
30
  self.tts_model = ParlerTTSForConditionalGeneration.from_pretrained("doublesizebed/parler-tts-mini-malay").to(self.device)
31
  self.tts_tokenizer = AutoTokenizer.from_pretrained("C:/Users/Honor/app/model")
requirements.txt CHANGED
@@ -3,11 +3,7 @@ flask-cors
3
  nest_asyncio
4
  transformers>=4.30
5
  torch
6
- fasttext
7
  deep-translator
8
  textblob==0.17.1
9
  parler-tts
10
- soundfile
11
- nltk
12
- g2p-en
13
- huggingface-hub
 
3
  nest_asyncio
4
  transformers>=4.30
5
  torch
 
6
  deep-translator
7
  textblob==0.17.1
8
  parler-tts
9
+ soundfile