Upload flan_t5_xl_with_gpu.py

#1
by Zaven - opened
Files changed (1) hide show
  1. flan_t5_xl_with_gpu.py +103 -0
flan_t5_xl_with_gpu.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """Flan-t5-xl_with_GPU.ipynb
3
+
4
+ Automatically generated by Colaboratory.
5
+
6
+ Original file is located at
7
+ https://colab.research.google.com/drive/15P4GWaUNFBqJf5_I58DxSiusjPiHUeU6
8
+ """
9
+
10
+ from IPython.display import HTML, display
11
+
12
+ def set_css():
13
+ display(HTML('''
14
+ <style>
15
+ pre {
16
+ white-space: pre-wrap;
17
+ }
18
+ </style>
19
+ '''))
20
+ get_ipython().events.register('pre_run_cell', set_css)
21
+
22
+ import multiprocessing
23
+ import torch
24
+
25
+ cores = multiprocessing.cpu_count() # Count the number of cores in a computer
26
+ cores
27
+
28
+ !pip install -q transformers
29
+
30
+ !pip install -U deep-translator
31
+
32
+ from deep_translator import GoogleTranslator
33
+
34
+ # Use any translator you like, in this example GoogleTranslator
35
+ #translated = GoogleTranslator(source='hy', target='en').translate("Բարև, ո՞նց ես։") # output -> Hello, how are you?
36
+
37
+ !pip install accelerate bitsandbytes sentencepiece
38
+
39
+ device = "cuda:0" if torch.cuda.is_available() else "cpu"
40
+ device
41
+
42
+ !pip install streamlit
43
+ import streamlit as st
44
+
45
+ x = st.slider('Select a value')
46
+ st.write(x, 'squared is', x * x)
47
+
48
+ !apt-get update -qq
49
+ !apt-get install -y -qq git
50
+
51
+ #!git clone https://huggingface.co/spaces/VGG1555/VGG1
52
+
53
+ !pwd
54
+ !ls -la /
55
+ !cd /content/VGG1
56
+
57
+ import torch
58
+ from transformers import T5Tokenizer, T5ForConditionalGeneration
59
+
60
+ tokenizer = T5Tokenizer.from_pretrained("google/flan-t5-xl")
61
+ model = T5ForConditionalGeneration.from_pretrained("google/flan-t5-xl", device_map = "auto")
62
+
63
+ # We are running FP32!
64
+
65
+ my_text = "Summarize: \
66
+ Science can ignite new discoveries for society, \
67
+ Society has the tendency to refer to old, familiar ways of doing. \
68
+ Chaos is also a part of our society, although increasingly often so.\
69
+ Innovative ways lead to new growthin businesses and under certain conditions all of society participates."
70
+
71
+ my_text = "Write an essay with 100 words about Quantum Physics and it's problems regarding our understanding of laws of Physics."
72
+
73
+ my_text = "Q: Can Geoffrey Hinton have a conversation with George Washington? Give the rationale before answering."
74
+ # my_text = "A short explanation of machine learning for medical applications."
75
+
76
+ translated = "Q: Ի՞նչ է խնձորը։"
77
+ my_text = GoogleTranslator(source='hy', target='en').translate(translated)
78
+
79
+ # We tune it a little bit: SMILE:
80
+ # Specify your downstream task: FP32
81
+
82
+ torch.cuda.empty_cache()
83
+
84
+ input_ids = tokenizer(my_text, return_tensors = "pt").input_ids.to("cuda")
85
+
86
+ outputs = model.generate(input_ids,
87
+ min_length = 20,
88
+ max_new_tokens = 600,
89
+ length_penalty = 1.6, # Set to values < 1.0 in order to encourage the model to generate shorter answers.
90
+ num_beams = 6,
91
+ no_repeat_ngram_size = 3,
92
+ temperature = 0.9,
93
+ top_k = 150, # default 50
94
+ top_p = 0.92,
95
+ repetition_penalty = 2.1)
96
+
97
+ print(GoogleTranslator(source='en', target='hy').translate(tokenizer.decode(outputs[0], skip_special_tokens = True)))
98
+
99
+ #!git clone https://huggingface.co/spaces/VGG1555/VGG1
100
+ !cd /content/VGG1 && git status
101
+ !cd /content/VGG1 && git add app.py
102
+ !cd /content/VGG1 && git commit -m "Add application file"
103
+ !cd /content/VGG1 && git push