File size: 1,616 Bytes
f252528
d688f9a
ef37e4a
d688f9a
f252528
 
 
d688f9a
f252528
 
 
d688f9a
f252528
 
 
 
 
 
 
 
 
 
 
 
 
d688f9a
 
f252528
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d688f9a
f252528
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
import gradio as gr

gr.load("models/meta-llama/Meta-Llama-3-8B").launch()

# import transformers
# import torch
# import os

# os.environ["HF_TOKEN"] = st.secrets["HF_TOKEN"]
# os.environ["HUGGINGFACEHUB_API_TOKEN"] = st.secrets["HF_TOKEN"]
# # os.environ["USE_FLASH_ATTENTION"] = "1"

# print(f"Device name: {torch.cuda.get_device_properties('cuda').name}")
# print(f"FlashAttention available: {torch.backends.cuda.flash_sdp_enabled()}")
# print(f"torch version: {torch.version}")


# # model_id = "meta-llama/Meta-Llama-3-8B"

# # pipeline = transformers.pipeline(
# #     "text-generation", model=model_id, model_kwargs={"torch_dtype": torch.bfloat16}, device_map="auto"
# # )
# # pipeline("Hey how are you doing today?")

# model_id = "meta-llama/Meta-Llama-3-8B-Instruct"

# pipeline = transformers.pipeline(
#     "text-generation",
#     model=model_id,
#     model_kwargs={"torch_dtype": torch.bfloat16},
#     device_map="auto",
# )

# messages = [
#     {
#         "role": "system",
#         "content": "You are a pirate chatbot who always responds in pirate speak!",
#     },
#     {"role": "user", "content": "Who are you?"},
# ]

# prompt = pipeline.tokenizer.apply_chat_template(
#     messages, tokenize=False, add_generation_prompt=True
# )

# terminators = [
#     pipeline.tokenizer.eos_token_id,
#     pipeline.tokenizer.convert_tokens_to_ids("<|eot_id|>"),
# ]

# outputs = pipeline(
#     prompt,
#     max_new_tokens=256,
#     eos_token_id=terminators,
#     do_sample=True,
#     temperature=0.6,
#     top_p=0.9,
# )
# print(outputs[0]["generated_text"][len(prompt) :])

# print("hello")