added current status
Browse files- src/__init__.py +0 -0
- src/consistency.py +41 -0
- src/cot.py +46 -0
- src/groq_client.py +22 -0
- src/utils.py +2 -0
- tutorial.ipynb +103 -0
src/__init__.py
ADDED
|
File without changes
|
src/consistency.py
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List, Tuple
|
| 2 |
+
from .cot import generate_answer
|
| 3 |
+
|
| 4 |
+
def sample_cot(
|
| 5 |
+
question: str,
|
| 6 |
+
model_id: str,
|
| 7 |
+
temperature: float,
|
| 8 |
+
max_tokens: int,
|
| 9 |
+
exampler: List[Tuple[str, str]]
|
| 10 |
+
):
|
| 11 |
+
"""
|
| 12 |
+
to be written
|
| 13 |
+
"""
|
| 14 |
+
return generate_answer(question, model_id, temperature, max_tokens, exampler)
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def self_consistent_answer(
|
| 18 |
+
question:str,
|
| 19 |
+
model_id:str,
|
| 20 |
+
temperature:float=0.5,
|
| 21 |
+
max_tokens:int=200,
|
| 22 |
+
exampler:List[Tuple[str, str]]=("",""),
|
| 23 |
+
num_samples:int=3
|
| 24 |
+
):
|
| 25 |
+
"""
|
| 26 |
+
to be written
|
| 27 |
+
"""
|
| 28 |
+
|
| 29 |
+
reasoning_paths = []
|
| 30 |
+
results = []
|
| 31 |
+
|
| 32 |
+
for _ in range(num_samples):
|
| 33 |
+
reasoning = sample_cot(question, model_id, temperature, max_tokens ,exampler)
|
| 34 |
+
reasoning_paths.append(reasoning)
|
| 35 |
+
|
| 36 |
+
last_line = reasoning.strip().split("\n")[-1]
|
| 37 |
+
results.append(last_line)
|
| 38 |
+
|
| 39 |
+
return reasoning_paths, results
|
| 40 |
+
|
| 41 |
+
|
src/cot.py
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Literal, List, Tuple
|
| 2 |
+
from .groq_client import groq_chat
|
| 3 |
+
from .utils import clean_response
|
| 4 |
+
|
| 5 |
+
def build_prompt(question:str,
|
| 6 |
+
mode: Literal["cot", "base"]="base",
|
| 7 |
+
exampler: List[Tuple[str, str]]=("",""),
|
| 8 |
+
zero_shot: bool=False
|
| 9 |
+
):
|
| 10 |
+
"""
|
| 11 |
+
to be written
|
| 12 |
+
"""
|
| 13 |
+
|
| 14 |
+
if mode == "cot":
|
| 15 |
+
|
| 16 |
+
if not zero_shot:
|
| 17 |
+
prompt = ""
|
| 18 |
+
|
| 19 |
+
for q, a in exampler:
|
| 20 |
+
prompt+=f"Q: {q}\nA:{a}"
|
| 21 |
+
|
| 22 |
+
prompt+=f"Q: {question}\nA:"
|
| 23 |
+
return prompt
|
| 24 |
+
else:
|
| 25 |
+
return f"Q: {question}\nA:"
|
| 26 |
+
|
| 27 |
+
return f"Q: {question}\nA:"
|
| 28 |
+
|
| 29 |
+
def generate_answer(
|
| 30 |
+
question: str,
|
| 31 |
+
model_id: str="llama3-8b-8192",
|
| 32 |
+
temperature: float=0.5,
|
| 33 |
+
max_tokens: int=200,
|
| 34 |
+
mode: Literal["cot", "base"]="base",
|
| 35 |
+
exampler: List[Tuple[str ,str]]=("",""),
|
| 36 |
+
zero_shot: bool=False
|
| 37 |
+
):
|
| 38 |
+
"""
|
| 39 |
+
to be written
|
| 40 |
+
"""
|
| 41 |
+
|
| 42 |
+
prompt = build_prompt(question, mode, exampler, zero_shot)
|
| 43 |
+
response = groq_chat(prompt, model_id, temperature, max_tokens)
|
| 44 |
+
|
| 45 |
+
return clean_response(response)
|
| 46 |
+
|
src/groq_client.py
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from groq import Groq
|
| 3 |
+
|
| 4 |
+
def init_groq(api_key: str=None):
|
| 5 |
+
|
| 6 |
+
api_key = api_key or os.get_env("GROQ_API_KEY")
|
| 7 |
+
return Groq(api_key)
|
| 8 |
+
|
| 9 |
+
def groq_chat(prompt: str, model_id: str, temperature: float = 0.7, max_tokens: int = 200):
|
| 10 |
+
|
| 11 |
+
client = init_groq()
|
| 12 |
+
response=client.chat.completions.create(
|
| 13 |
+
model=model_id,
|
| 14 |
+
messages=[
|
| 15 |
+
{"role" : "system", "content" : "You are a helpful assistant."},
|
| 16 |
+
{"role": "user", "content": prompt}
|
| 17 |
+
],
|
| 18 |
+
temperature=temperature,
|
| 19 |
+
max_tokens=max_tokens
|
| 20 |
+
)
|
| 21 |
+
|
| 22 |
+
return response.choices[0].message.content
|
src/utils.py
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
def clean_response():
|
| 2 |
+
pass
|
tutorial.ipynb
ADDED
|
@@ -0,0 +1,103 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cells": [
|
| 3 |
+
{
|
| 4 |
+
"cell_type": "code",
|
| 5 |
+
"execution_count": 3,
|
| 6 |
+
"id": "efbec4bf",
|
| 7 |
+
"metadata": {},
|
| 8 |
+
"outputs": [],
|
| 9 |
+
"source": [
|
| 10 |
+
"from groq import Groq\n",
|
| 11 |
+
"from typing import Literal\n",
|
| 12 |
+
"import os"
|
| 13 |
+
]
|
| 14 |
+
},
|
| 15 |
+
{
|
| 16 |
+
"cell_type": "code",
|
| 17 |
+
"execution_count": 4,
|
| 18 |
+
"id": "e8943c7d",
|
| 19 |
+
"metadata": {},
|
| 20 |
+
"outputs": [],
|
| 21 |
+
"source": [
|
| 22 |
+
"os.environ[\"GROQ_API_KEY\"] = \"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\""
|
| 23 |
+
]
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"cell_type": "code",
|
| 27 |
+
"execution_count": 5,
|
| 28 |
+
"id": "394c1f72",
|
| 29 |
+
"metadata": {},
|
| 30 |
+
"outputs": [],
|
| 31 |
+
"source": [
|
| 32 |
+
"def build_prompt(question, mode: Literal[\"cot\",\"base\"]=\"base\"):\n",
|
| 33 |
+
"\n",
|
| 34 |
+
" if mode==\"cot\":\n",
|
| 35 |
+
" return f\"Q: {question}\\nLet's think step by step:\\n\"\n",
|
| 36 |
+
" else:\n",
|
| 37 |
+
" return f\"Q: {question}\\nA:\"\n"
|
| 38 |
+
]
|
| 39 |
+
},
|
| 40 |
+
{
|
| 41 |
+
"cell_type": "code",
|
| 42 |
+
"execution_count": null,
|
| 43 |
+
"id": "5d6dc71b",
|
| 44 |
+
"metadata": {},
|
| 45 |
+
"outputs": [
|
| 46 |
+
{
|
| 47 |
+
"name": "stdout",
|
| 48 |
+
"output_type": "stream",
|
| 49 |
+
"text": [
|
| 50 |
+
"Since K is the husband of R, and V is the son of K, then R is the mother of V.\n"
|
| 51 |
+
]
|
| 52 |
+
}
|
| 53 |
+
],
|
| 54 |
+
"source": [
|
| 55 |
+
"question = \"If V is the son of K, and K is the father of S and husband of R, then who is R to V\"\n",
|
| 56 |
+
"\n",
|
| 57 |
+
"cot_prompt = build_prompt(question, mode=\"base\")\n",
|
| 58 |
+
"\n",
|
| 59 |
+
"client = Groq()\n",
|
| 60 |
+
"response = client.chat.completions.create(\n",
|
| 61 |
+
" model=\"llama3-8b-8192\",\n",
|
| 62 |
+
" messages=[\n",
|
| 63 |
+
" {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n",
|
| 64 |
+
" {\"role\": \"user\", \"content\": cot_prompt}\n",
|
| 65 |
+
" ],\n",
|
| 66 |
+
" temperature=0.7,\n",
|
| 67 |
+
" max_tokens=200,\n",
|
| 68 |
+
")\n",
|
| 69 |
+
"\n",
|
| 70 |
+
"print(response.choices[0].message.content)"
|
| 71 |
+
]
|
| 72 |
+
},
|
| 73 |
+
{
|
| 74 |
+
"cell_type": "code",
|
| 75 |
+
"execution_count": null,
|
| 76 |
+
"id": "67b9d179",
|
| 77 |
+
"metadata": {},
|
| 78 |
+
"outputs": [],
|
| 79 |
+
"source": []
|
| 80 |
+
}
|
| 81 |
+
],
|
| 82 |
+
"metadata": {
|
| 83 |
+
"kernelspec": {
|
| 84 |
+
"display_name": "LLM_reasoning",
|
| 85 |
+
"language": "python",
|
| 86 |
+
"name": "python3"
|
| 87 |
+
},
|
| 88 |
+
"language_info": {
|
| 89 |
+
"codemirror_mode": {
|
| 90 |
+
"name": "ipython",
|
| 91 |
+
"version": 3
|
| 92 |
+
},
|
| 93 |
+
"file_extension": ".py",
|
| 94 |
+
"mimetype": "text/x-python",
|
| 95 |
+
"name": "python",
|
| 96 |
+
"nbconvert_exporter": "python",
|
| 97 |
+
"pygments_lexer": "ipython3",
|
| 98 |
+
"version": "3.10.18"
|
| 99 |
+
}
|
| 100 |
+
},
|
| 101 |
+
"nbformat": 4,
|
| 102 |
+
"nbformat_minor": 5
|
| 103 |
+
}
|