HaveAI commited on
Commit
b495279
·
verified ·
1 Parent(s): aa13ff5

Upload 5 files

Browse files
Files changed (5) hide show
  1. config.json +1 -33
  2. flare_model.py +2 -25
  3. generation_config.json +1 -9
  4. requirements.txt +2 -0
  5. run_flare.bat +2 -0
config.json CHANGED
@@ -1,33 +1 @@
1
- {
2
- "_name_or_path": "ai-forever/sage-fredt5-distilled-95m",
3
- "architectures": [
4
- "T5ForConditionalGeneration"
5
- ],
6
- "bos_token_id": 1,
7
- "classifier_dropout": 0.0,
8
- "d_ff": 1024,
9
- "d_kv": 64,
10
- "d_model": 512,
11
- "decoder_start_token_id": 0,
12
- "dense_act_fn": "gelu_new",
13
- "dropout_rate": 0.1,
14
- "eos_token_id": 2,
15
- "feed_forward_proj": "gated-gelu",
16
- "initializer_factor": 1.0,
17
- "is_encoder_decoder": true,
18
- "is_gated_act": true,
19
- "layer_norm_epsilon": 1e-06,
20
- "max_length": 256,
21
- "model_type": "t5",
22
- "num_decoder_layers": 8,
23
- "num_heads": 6,
24
- "num_layers": 8,
25
- "output_past": true,
26
- "pad_token_id": 0,
27
- "relative_attention_max_distance": 128,
28
- "relative_attention_num_buckets": 32,
29
- "tie_word_embeddings": false,
30
- "transformers_version": "4.33.2",
31
- "use_cache": true,
32
- "vocab_size": 50364
33
- }
 
1
+ {"model_name": "encoder_model_quantized", "device": "cpu"}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
flare_model.py CHANGED
@@ -1,25 +1,2 @@
1
-
2
- import random
3
- import json
4
-
5
- class FlareModel:
6
- def __init__(self):
7
- self.context = {}
8
-
9
- def analyze_message(self, message):
10
- # Пример анализа сообщения
11
- return f"Сообщение анализируется: {message}"
12
-
13
- def generate_response(self, message):
14
- responses = [
15
- "Привет, как я могу помочь?",
16
- "Что-то еще? Спрашивай!",
17
- "А что ты думаешь о том, что я только что сказал?"
18
- ]
19
- return random.choice(responses)
20
-
21
- def load_context(self, data):
22
- self.context.update(data)
23
-
24
- def get_context(self):
25
- return self.context
 
1
+ # Model code for inference goes here
2
+ print("Model loaded and ready for inference!")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
generation_config.json CHANGED
@@ -1,9 +1 @@
1
- {
2
- "_from_model_config": true,
3
- "bos_token_id": 1,
4
- "decoder_start_token_id": 0,
5
- "eos_token_id": 2,
6
- "max_length": 256,
7
- "pad_token_id": 0,
8
- "transformers_version": "4.33.2"
9
- }
 
1
+ {"top_k": 50, "top_p": 0.95, "temperature": 0.7}
 
 
 
 
 
 
 
 
requirements.txt CHANGED
@@ -1,2 +1,4 @@
1
  torch
2
  transformers
 
 
 
1
  torch
2
  transformers
3
+ onnx
4
+ flask
run_flare.bat ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ @echo off
2
+ python flare_model.py --input "Enter your prompt"