id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
7,449 | import requests
import re
import streamlit as st
from dataclasses import dataclass
from enum import auto, Enum
from PIL.Image import Image
from PIL import ImageDraw
from streamlit.delta_generator import DeltaGenerator
The provided code snippet includes necessary dependencies for implementing the `postprocess_image` function. Write a Python function `def postprocess_image(text: str, img: Image) -> (str, Image)` to solve the following problem:
Processes the given text to identify and draw bounding boxes on the provided image. This function searches for patterns in the text that represent coordinates for bounding boxes and draws rectangles on the image at these coordinates. Each box is drawn in a different color for distinction. Args: text (str): The text containing bounding box coordinates in a specific pattern. img (Image): The image on which to draw the bounding boxes. Returns: tuple[str, Image]: The processed text with additional annotations for each bounding box, and the image with the drawn bounding boxes.
Here is the function:
def postprocess_image(text: str, img: Image) -> (str, Image):
"""
Processes the given text to identify and draw bounding boxes on the provided image.
This function searches for patterns in the text that represent coordinates for bounding
boxes and draws rectangles on the image at these coordinates. Each box is drawn in a
different color for distinction.
Args:
text (str): The text containing bounding box coordinates in a specific pattern.
img (Image): The image on which to draw the bounding boxes.
Returns:
tuple[str, Image]: The processed text with additional annotations for each bounding
box, and the image with the drawn bounding boxes.
"""
colors = ["red", "green", "blue", "yellow", "purple", "orange"]
# Updated pattern to match single or multiple coordinate groups
pattern = r"\[\[([\d,]+(?:;[\d,]+)*)\]\]"
matches = re.findall(pattern, text)
draw = ImageDraw.Draw(img)
if not matches:
return text, None
for i, match in enumerate(matches):
# Splitting the matched string into individual coordinate groups
coords_groups = match.split(';')
# Determining the color for the current match
color = colors[i % len(colors)]
for coords_str in coords_groups:
coords = coords_str.split(',')
if len(coords) == 4: # Rectangle
scaled_coords = (
int(float(coords[0]) * 0.001 * img.width),
int(float(coords[1]) * 0.001 * img.height),
int(float(coords[2]) * 0.001 * img.width),
int(float(coords[3]) * 0.001 * img.height)
)
draw.rectangle(scaled_coords, outline=color, width=3)
elif len(coords) == 2: # Point
scaled_coords = (
int(float(coords[0]) * 0.001 * img.width),
int(float(coords[1]) * 0.001 * img.height)
)
radius = 5
draw.ellipse([scaled_coords[0] - radius, scaled_coords[1] - radius,
scaled_coords[0] + radius, scaled_coords[1] + radius],
fill=color)
return text, img | Processes the given text to identify and draw bounding boxes on the provided image. This function searches for patterns in the text that represent coordinates for bounding boxes and draws rectangles on the image at these coordinates. Each box is drawn in a different color for distinction. Args: text (str): The text containing bounding box coordinates in a specific pattern. img (Image): The image on which to draw the bounding boxes. Returns: tuple[str, Image]: The processed text with additional annotations for each bounding box, and the image with the drawn bounding boxes. |
7,450 | import requests
import re
import streamlit as st
from dataclasses import dataclass
from enum import auto, Enum
from PIL.Image import Image
from PIL import ImageDraw
from streamlit.delta_generator import DeltaGenerator
The provided code snippet includes necessary dependencies for implementing the `translate_baidu` function. Write a Python function `def translate_baidu(translate_text, source_lan, target_lan)` to solve the following problem:
Translates text using Baidu's translation service. (if you are not use English) This function sends a request to the Baidu translation API to translate the provided text from the source language to the target language. Args: translate_text (str): The text to be translated. source_lan (str): The source language code (e.g., "en" for English). target_lan (str): The target language code (e.g., "zh" for Chinese). Returns: str: The translated text. Returns "error" in case of an exception.
Here is the function:
def translate_baidu(translate_text, source_lan, target_lan):
"""
Translates text using Baidu's translation service. (if you are not use English)
This function sends a request to the Baidu translation API to translate the provided text
from the source language to the target language.
Args:
translate_text (str): The text to be translated.
source_lan (str): The source language code (e.g., "en" for English).
target_lan (str): The target language code (e.g., "zh" for Chinese).
Returns:
str: The translated text. Returns "error" in case of an exception.
"""
url = "https://aip.baidubce.com/rpc/2.0/mt/texttrans/v1?access_token="
headers = {'Content-Type': 'application/json'}
payload = {
'q': translate_text,
'from': source_lan,
'to': target_lan
}
try:
r = requests.post(url, json=payload, headers=headers)
result = r.json()
final_translation = ''
for item in result['result']['trans_result']:
final_translation += item['dst'] + '\n'
except Exception as e:
print(e)
return "error"
return final_translation | Translates text using Baidu's translation service. (if you are not use English) This function sends a request to the Baidu translation API to translate the provided text from the source language to the target language. Args: translate_text (str): The text to be translated. source_lan (str): The source language code (e.g., "en" for English). target_lan (str): The target language code (e.g., "zh" for Chinese). Returns: str: The translated text. Returns "error" in case of an exception. |
7,451 | import streamlit as st
import base64
import re
from PIL import Image
from io import BytesIO
from streamlit.delta_generator import DeltaGenerator
from client import get_client
from utils import images_are_same
from conversation import Conversation, Role, postprocess_image, postprocess_text
class Conversation:
"""
Represents a single conversation turn within a dialogue.
Attributes:
role (Role): The role of the speaker in the conversation (USER or ASSISTANT).
content (str): The textual content of the conversation turn.
image (Image, optional): An optional image associated with the conversation turn.
content_show (str, optional): The content to be displayed in the WebUI. This may differ
from `content` if translation or other processing is applied.
translate (bool, optional): Whether to translate the content of the conversation turn.
Methods:
__str__(self) -> str:
Returns a string representation of the conversation turn, including the role and content.
show(self, placeholder: DeltaGenerator | None = None) -> str:
Displays the conversation turn in the WebUI. If `placeholder` is provided, the content
is shown in the specified Streamlit container. Otherwise, it uses the message style
determined by the role.
"""
role: Role = Role.USER
content: str = ""
image: Image | None = None
content_show: str | None = None
translate: bool = False
def __str__(self) -> str:
print(self.role, self.content)
match self.role:
case Role.USER | Role.ASSISTANT:
return f'{self.role}\n{self.content}'
def show(self, placeholder: DeltaGenerator | None = None) -> str:
"""
show in markdown formate
"""
if placeholder:
message = placeholder
else:
message = self.role.get_message()
# for Chinese WebUI show
if self.role == Role.USER:
if self.translate:
self.content = translate_baidu(self.content_show, source_lan="zh", target_lan="en")
if self.content == "error":
self.content_show = "Please Enter your Baidu Translation API Key in function translate_baidu()"
else:
self.content = self.content_show
if self.role == Role.ASSISTANT:
if self.translate:
self.content_show = translate_baidu(self.content, source_lan="en", target_lan="zh")
else:
self.content_show = self.content
self.content_show = self.content_show.replace('\n', ' \n')
message.markdown(self.content_show)
if self.image:
message.image(self.image)
def append_conversation(
conversation: Conversation,
history: list[Conversation],
placeholder: DeltaGenerator | None = None,
) -> None:
history.append(conversation)
conversation.show(placeholder) | null |
7,452 | from io import BytesIO
import base64
import streamlit as st
import re
from streamlit.delta_generator import DeltaGenerator
from client import get_client
from conversation import postprocess_text, Conversation, Role, postprocess_image
from PIL import Image
from utils import images_are_same
class Conversation:
"""
Represents a single conversation turn within a dialogue.
Attributes:
role (Role): The role of the speaker in the conversation (USER or ASSISTANT).
content (str): The textual content of the conversation turn.
image (Image, optional): An optional image associated with the conversation turn.
content_show (str, optional): The content to be displayed in the WebUI. This may differ
from `content` if translation or other processing is applied.
translate (bool, optional): Whether to translate the content of the conversation turn.
Methods:
__str__(self) -> str:
Returns a string representation of the conversation turn, including the role and content.
show(self, placeholder: DeltaGenerator | None = None) -> str:
Displays the conversation turn in the WebUI. If `placeholder` is provided, the content
is shown in the specified Streamlit container. Otherwise, it uses the message style
determined by the role.
"""
role: Role = Role.USER
content: str = ""
image: Image | None = None
content_show: str | None = None
translate: bool = False
def __str__(self) -> str:
print(self.role, self.content)
match self.role:
case Role.USER | Role.ASSISTANT:
return f'{self.role}\n{self.content}'
def show(self, placeholder: DeltaGenerator | None = None) -> str:
"""
show in markdown formate
"""
if placeholder:
message = placeholder
else:
message = self.role.get_message()
# for Chinese WebUI show
if self.role == Role.USER:
if self.translate:
self.content = translate_baidu(self.content_show, source_lan="zh", target_lan="en")
if self.content == "error":
self.content_show = "Please Enter your Baidu Translation API Key in function translate_baidu()"
else:
self.content = self.content_show
if self.role == Role.ASSISTANT:
if self.translate:
self.content_show = translate_baidu(self.content, source_lan="en", target_lan="zh")
else:
self.content_show = self.content
self.content_show = self.content_show.replace('\n', ' \n')
message.markdown(self.content_show)
if self.image:
message.image(self.image)
def append_conversation(
conversation: Conversation,
history: list[Conversation],
placeholder: DeltaGenerator | None = None,
) -> None:
history.append(conversation)
conversation.show(placeholder) | null |
7,453 | from __future__ import annotations
from threading import Thread
import streamlit as st
import torch
import warnings
import os
from typing import Any, Protocol
from collections.abc import Iterable
from huggingface_hub.inference._text_generation import TextGenerationStreamResponse, Token
from transformers import AutoTokenizer, TextIteratorStreamer, AutoModelForCausalLM
from conversation import Conversation
models_info = {
'tokenizer': {
'path': os.environ.get('TOKENIZER_PATH', 'lmsys/vicuna-7b-v1.5'),
},
'agent_chat': {
'path': os.environ.get('MODEL_PATH_AGENT_CHAT', 'THUDM/cogagent-chat-hf'),
'device': ['cuda:0']
},
'vlm_chat': {
'path': os.environ.get('MODEL_PATH_VLM_CHAT', 'THUDM/cogvlm-chat-hf'),
'device': ['cuda:3']
},
'vlm_grounding': {
'path': os.environ.get('MODEL_PATH_VLM_GROUNDING','THUDM/cogvlm-grounding-generalist-hf'),
'device': ['cuda:6']
}
}
class Client(Protocol):
def generate_stream(self,
history: list[Conversation],
grounding: bool = False,
model_use: str = 'agent_chat',
**parameters: Any
) -> Iterable[TextGenerationStreamResponse]:
...
class HFClient(Client):
"""
The HFClient class manages the interaction with various large language models
for text generation tasks. It supports handling multiple models, each designated
for a specific task like chatting or grounding.
Args:
models_info (dict): A dictionary containing the configuration for each model.
The dictionary format is:
- 'tokenizer': Path and settings for the tokenizer.
- 'agent_chat': Path and settings for the CogAgent-chat-18B model.
- 'vlm_chat': Path and settings for the CogVLM-chat-17B model.
- 'vlm_grounding': Path and settings for the CogVLM-grounding-17B model.
The class loads each model based on the provided information and assigns it to the
specified CUDA device. It also handles the tokenizer used across all models.
"""
def __init__(self, models_info):
self.models = {}
self.tokenizer = AutoTokenizer.from_pretrained(models_info['tokenizer']['path'], trust_remote_code=True)
for model_name, model_info in models_info.items():
if model_name != 'tokenizer':
self.models[model_name] = []
for device in model_info['device']:
model = AutoModelForCausalLM.from_pretrained(
model_info['path'],
torch_dtype=torch_type,
low_cpu_mem_usage=True,
trust_remote_code=True,
).to(device).eval()
self.models[model_name].append(model)
def select_best_gpu(self, model_name):
min_memory_used = None
selected_model = None
for model in self.models[model_name]:
device = next(model.parameters()).device
mem_used = torch.cuda.memory_allocated(device=device)
if min_memory_used is None or mem_used < min_memory_used:
min_memory_used = mem_used
selected_model = model
return selected_model
def generate_stream(self,
history: list,
grounding: bool = False,
model_use: str = 'agent_chat',
**parameters: Any
) -> Iterable[TextGenerationStreamResponse]:
"""
Generates a stream of text responses based on the input history and selected model.
This method facilitates a chat-like interaction with the models. Depending on the
model selected and whether grounding is enabled, it alters the behavior of the text
generation process.
Args:
history (list[Conversation]): A list of Conversation objects representing the
dialogue history.
grounding (bool, optional): A flag to indicate whether grounding should be used
in the generation process. Defaults to False.
model_use (str, optional): The key name of the model to be used for the generation.
Defaults to 'agent_chat'.
**parameters (Any): Additional parameters that may be required for the generation
process.
Yields:
Iterable[TextGenerationStreamResponse]: A stream of text generation responses, each
encapsulating a generated piece of text.
The method selects the appropriate model based on `model_use`, processes the input
history, and feeds it into the model to generate text. It uses threading to handle
the generation process efficiently.
"""
query, history, image = process_history(history)
if grounding:
query += "(with grounding)"
model = self.select_best_gpu(model_use)
device = next(model.parameters()).device
# Print user input info
print("\n== Input ==\n", query)
print("\n==History==\n", history)
print("\n== Model ==\n\n", model.config.name_or_path)
print("\n== Device ==\n\n", device)
input_by_model = model.build_conversation_input_ids(
self.tokenizer,
query=query,
history=history,
images=[image]
)
inputs = {
'input_ids': input_by_model['input_ids'].unsqueeze(0).to(device),
'token_type_ids': input_by_model['token_type_ids'].unsqueeze(0).to(device),
'attention_mask': input_by_model['attention_mask'].unsqueeze(0).to(device),
'images': [[input_by_model['images'][0].to(device).to(torch_type)]],
}
# CogVLM model do not have param 'cross_images', Only CogAgent have.
if 'cross_images' in input_by_model and input_by_model['cross_images']:
inputs['cross_images'] = [[input_by_model['cross_images'][0].to(device).to(torch_type)]]
# Use TextIteratorStreamer for streaming generation like huggingface.
streamer = TextIteratorStreamer(self.tokenizer, timeout=20.0, skip_prompt=True, skip_special_tokens=True)
parameters['streamer'] = streamer
gen_kwargs = {**parameters, **inputs}
with torch.no_grad():
thread = Thread(target=model.generate, kwargs=gen_kwargs)
thread.start()
for next_text in streamer:
yield TextGenerationStreamResponse(
token=Token(
id=0,
logprob=0,
text=next_text,
special=False,
)
)
def get_client() -> Client:
client = HFClient(models_info)
return client | null |
7,454 | from __future__ import annotations
from threading import Thread
import streamlit as st
import torch
import warnings
import os
from typing import Any, Protocol
from collections.abc import Iterable
from huggingface_hub.inference._text_generation import TextGenerationStreamResponse, Token
from transformers import AutoTokenizer, TextIteratorStreamer, AutoModelForCausalLM
from conversation import Conversation
class Conversation:
"""
Represents a single conversation turn within a dialogue.
Attributes:
role (Role): The role of the speaker in the conversation (USER or ASSISTANT).
content (str): The textual content of the conversation turn.
image (Image, optional): An optional image associated with the conversation turn.
content_show (str, optional): The content to be displayed in the WebUI. This may differ
from `content` if translation or other processing is applied.
translate (bool, optional): Whether to translate the content of the conversation turn.
Methods:
__str__(self) -> str:
Returns a string representation of the conversation turn, including the role and content.
show(self, placeholder: DeltaGenerator | None = None) -> str:
Displays the conversation turn in the WebUI. If `placeholder` is provided, the content
is shown in the specified Streamlit container. Otherwise, it uses the message style
determined by the role.
"""
role: Role = Role.USER
content: str = ""
image: Image | None = None
content_show: str | None = None
translate: bool = False
def __str__(self) -> str:
print(self.role, self.content)
match self.role:
case Role.USER | Role.ASSISTANT:
return f'{self.role}\n{self.content}'
def show(self, placeholder: DeltaGenerator | None = None) -> str:
"""
show in markdown formate
"""
if placeholder:
message = placeholder
else:
message = self.role.get_message()
# for Chinese WebUI show
if self.role == Role.USER:
if self.translate:
self.content = translate_baidu(self.content_show, source_lan="zh", target_lan="en")
if self.content == "error":
self.content_show = "Please Enter your Baidu Translation API Key in function translate_baidu()"
else:
self.content = self.content_show
if self.role == Role.ASSISTANT:
if self.translate:
self.content_show = translate_baidu(self.content, source_lan="en", target_lan="zh")
else:
self.content_show = self.content
self.content_show = self.content_show.replace('\n', ' \n')
message.markdown(self.content_show)
if self.image:
message.image(self.image)
The provided code snippet includes necessary dependencies for implementing the `process_history` function. Write a Python function `def process_history(history: list[Conversation])` to solve the following problem:
Process the input history to extract the query and the history pairs. Args: History(list[Conversation]): A list of Conversation objects representing all conversations. Returns: query(str): The current user input string. history_pairs(list[(str,str)]): A list of (user, assistant) pairs. last_user_image(Image): The last user image. Only the latest image.
Here is the function:
def process_history(history: list[Conversation]):
"""
Process the input history to extract the query and the history pairs.
Args:
History(list[Conversation]): A list of Conversation objects representing all conversations.
Returns:
query(str): The current user input string.
history_pairs(list[(str,str)]): A list of (user, assistant) pairs.
last_user_image(Image): The last user image. Only the latest image.
"""
history_pairs = []
query = ""
last_user_image = None
user_text = None
for i, conversation in enumerate(history):
if conversation.role == conversation.role.USER:
user_text = conversation.content
if conversation.image:
last_user_image = conversation.image
if i == len(history) - 1:
query = conversation.content
else:
if user_text is not None:
history_pairs.append((user_text, conversation.content))
user_text = None
return query, history_pairs, last_user_image | Process the input history to extract the query and the history pairs. Args: History(list[Conversation]): A list of Conversation objects representing all conversations. Returns: query(str): The current user input string. history_pairs(list[(str,str)]): A list of (user, assistant) pairs. last_user_image(Image): The last user image. Only the latest image. |
7,456 | import os
import torch
import argparse
from functools import partial
import sys
from sat import mpu, get_args, get_tokenizer
from sat.training.deepspeed_training import training_main
from sat.helpers import print_rank0
from utils.models import FineTuneTrainCogVLMModel
from utils.utils import llama2_text_processor, llama2_text_processor_inference, get_image_processor
from collections import defaultdict
from torch.nn import CrossEntropyLoss
import numpy as np
from sat.model.mixins import CachedAutoregressiveMixin
from sat.generation.autoregressive_sampling import filling_sequence
from sat.generation.sampling_strategies import BaseStrategy, BeamSearchStrategy
from torch.nn import CrossEntropyLoss
from utils.utils import ItemDataset
from sat.model.finetune.lora2 import LoraMixin
from sat.model.finetune.prompt_tuning import PTuningV2Mixin
def disable_untrainable_params(self):
total_trainable = 0
enable = [('mlp', 'vit')]
if self.args.use_ptuning:
enable.extend(['ptuning'])
if self.args.use_lora or self.args.use_qlora:
enable.extend(['matrix_A', 'matrix_B'])
for n, p in self.named_parameters():
flag = False
for e in enable:
if type(e) is tuple:
if e[0].lower() in n.lower() and e[1].lower() in n.lower() and 55 > int(n[:n.find('.mlp')].split('.')[-1]) > 45:
flag = True
break
else:
if e.lower() in n.lower():
flag = True
break
if not flag:
p.requires_grad_(False)
else:
total_trainable += p.numel()
print_rank0(n)
print_rank0("***** Total trainable parameters: "+str(total_trainable)+" *****") | null |
7,457 | import os
import torch
import argparse
from functools import partial
import sys
from sat import mpu, get_args, get_tokenizer
from sat.training.deepspeed_training import training_main
from sat.helpers import print_rank0
from utils.models import FineTuneTrainCogVLMModel
from utils.utils import llama2_text_processor, llama2_text_processor_inference, get_image_processor
from collections import defaultdict
from torch.nn import CrossEntropyLoss
import numpy as np
from sat.model.mixins import CachedAutoregressiveMixin
from sat.generation.autoregressive_sampling import filling_sequence
from sat.generation.sampling_strategies import BaseStrategy, BeamSearchStrategy
from torch.nn import CrossEntropyLoss
from utils.utils import ItemDataset
from sat.model.finetune.lora2 import LoraMixin
from sat.model.finetune.prompt_tuning import PTuningV2Mixin
def data_collator(examples):
examples = [ex for ex in examples if len(ex) > 0] # drop {}
for example in examples:
for k in example:
if isinstance(example[k], list):
example[k] = torch.tensor(example[k])
elif isinstance(example[k], np.ndarray):
example[k] = torch.from_numpy(example[k])
img_args = {}
tmp_example = examples[0]
for k in tmp_example['vision']:
if type(tmp_example['vision'][k]) is torch.Tensor:
img_args['vision_'+k] = torch.cat([example['vision'][k] for example in examples])
else:
img_args['vision_'+k] = example['vision'][k]
for example in examples:
example.pop('vision')
if 'cross' in example:
example.pop('cross')
model_args = {}
tmp_example = examples[0]
for k in tmp_example:
if type(tmp_example[k]) is torch.Tensor:
model_args[k] = torch.cat([example[k] for example in examples])
else:
model_args[k] = tmp_example[k]
model_args.update(img_args)
return model_args | null |
7,458 | import os
import torch
import argparse
from functools import partial
import sys
from sat import mpu, get_args, get_tokenizer
from sat.training.deepspeed_training import training_main
from sat.helpers import print_rank0
from utils.models import FineTuneTrainCogVLMModel
from utils.utils import llama2_text_processor, llama2_text_processor_inference, get_image_processor
from collections import defaultdict
def get_batch(data_iterator, args, timers):
from torch.nn import CrossEntropyLoss
import numpy as np
from sat.model.mixins import CachedAutoregressiveMixin
from sat.generation.autoregressive_sampling import filling_sequence
from sat.generation.sampling_strategies import BaseStrategy, BeamSearchStrategy
def chat(model, tokenizer, tokens,
max_length: int = 1800, num_beams=5, top_p=0.95, top_k=0, temperature=0.8, **kwargs):
from torch.nn import CrossEntropyLoss
from utils.utils import ItemDataset
from sat.model.finetune.lora2 import LoraMixin
from sat.model.finetune.prompt_tuning import PTuningV2Mixin
def forward_step_eval(data_iterator, model, args, timers):
def compute_metrics(eval_preds):
preds, labels, device = eval_preds
preds = preds.unsqueeze(0)
if isinstance(preds, tuple):
preds = preds[0]
decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True)
if args.ignore_pad_token_for_loss:
# Replace -100 in the labels as we can't decode them.
labels = np.where(labels != -100, labels, tokenizer.pad_token_id)
decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
score_dict = {
"acc": [],
"acc_w/o_case": [],
}
for pred, label in zip(decoded_preds, decoded_labels):
if args.rank == 0:
print('pred', pred, 'label', label, flush=True)
if pred == label:
score_dict['acc'].append(1.)
else:
score_dict['acc'].append(0.)
if pred.lower() == label.lower():
score_dict['acc_w/o_case'].append(1.)
else:
score_dict['acc_w/o_case'].append(0.)
for k, v in score_dict.items():
score_dict[k] = float(np.mean(v))
return score_dict
# Get the batch.
timers('batch generator').start()
data_b = get_batch(
data_iterator, args, timers)
timers('batch generator').stop()
context_len = int(data_b['context_length'][0])
tokens = data_b['input_ids'][:, :context_len]
data_b['vision_expert_mask'] = data_b['vision_expert_mask'][:, :context_len]
data_b['image_embed_mask'] = data_b['image_embed_mask'][:, :context_len]
data_b['image_rope_mask'] = data_b['image_rope_mask'][:, :context_len]
data_b.pop('input_ids')
data_b.pop('attention_mask')
data_b.pop('position_ids')
labels = data_b.pop('labels')
qid = data_b.pop('question_id')
model.add_mixin('auto-regressive', CachedAutoregressiveMixin())
outputs = chat(model, tokenizer, tokens, **data_b)[0][context_len:]
# print(outputs)
model.del_mixin('auto-regressive')
return torch.tensor(0, device=outputs.device), {k: torch.tensor(v, device=outputs.device) for k, v in
compute_metrics(
(outputs.cpu(), labels.cpu(), outputs.device)).items()} | null |
7,459 | import os
import torch
import argparse
from functools import partial
import sys
from sat import mpu, get_args, get_tokenizer
from sat.training.deepspeed_training import training_main
from sat.helpers import print_rank0
from utils.models import FineTuneTrainCogVLMModel
from utils.utils import llama2_text_processor, llama2_text_processor_inference, get_image_processor
from collections import defaultdict
def get_batch(data_iterator, args, timers):
# Broadcast data.
timers('data loader').start()
if data_iterator is not None:
data = next(data_iterator)
else:
data = None
timers('data loader').stop()
data_b = broadcast_auto(data)
for k in data_b:
if type(data_b[k]) is torch.Tensor and data_b[k].dtype is not torch.int32 and data_b[k].dtype is not torch.long:
if args.fp16:
data_b[k] = data_b[k].half()
elif args.bf16:
data_b[k] = data_b[k].bfloat16()
return data_b
from torch.nn import CrossEntropyLoss
import numpy as np
from sat.model.mixins import CachedAutoregressiveMixin
from sat.generation.autoregressive_sampling import filling_sequence
from sat.generation.sampling_strategies import BaseStrategy, BeamSearchStrategy
from torch.nn import CrossEntropyLoss
from utils.utils import ItemDataset
from sat.model.finetune.lora2 import LoraMixin
from sat.model.finetune.prompt_tuning import PTuningV2Mixin
The provided code snippet includes necessary dependencies for implementing the `forward_step` function. Write a Python function `def forward_step(data_iterator, model, args, timers)` to solve the following problem:
Forward step.
Here is the function:
def forward_step(data_iterator, model, args, timers):
"""Forward step."""
# Get the batch.
timers('batch generator').start()
data_b = get_batch(
data_iterator, args, timers)
labels = data_b.pop('labels')
timers('batch generator').stop()
logits = model(**data_b)[0]
lm_logits = logits.to(torch.float32)
# Shift so that tokens < n predict n
shift_labels = labels[..., 1:].contiguous()
shift_logits = lm_logits[..., -1-shift_labels.size(-1):-1, :].contiguous()
# Flatten the tokens
loss_fct = CrossEntropyLoss(ignore_index=-100)
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
loss = loss.to(torch.float32)
return loss, {'loss': loss} | Forward step. |
7,460 | import os
import torch
import argparse
from functools import partial
import sys
from sat import mpu, get_args, get_tokenizer
from sat.training.deepspeed_training import training_main
from sat.helpers import print_rank0
from utils.models import FineTuneTrainCogVLMModel
from utils.utils import llama2_text_processor, llama2_text_processor_inference, get_image_processor
from collections import defaultdict
from torch.nn import CrossEntropyLoss
import numpy as np
from sat.model.mixins import CachedAutoregressiveMixin
from sat.generation.autoregressive_sampling import filling_sequence
from sat.generation.sampling_strategies import BaseStrategy, BeamSearchStrategy
from torch.nn import CrossEntropyLoss
from utils.utils import ItemDataset
from sat.model.finetune.lora2 import LoraMixin
from sat.model.finetune.prompt_tuning import PTuningV2Mixin
def create_dataset_function(image_processor, text_processor, path, args):
dataset = ItemDataset(image_processor, text_processor, args, path)
return dataset | null |
7,461 | import os
import torch
import argparse
from functools import partial
import sys
from sat import mpu, get_args, get_tokenizer
from sat.training.deepspeed_training import training_main
from sat.helpers import print_rank0
from utils.models import FineTuneTestCogVLMModel
from utils.utils import llama2_text_processor, llama2_text_processor_inference, get_image_processor
from collections import defaultdict
from torch.nn import CrossEntropyLoss
import numpy as np
from sat.model.mixins import CachedAutoregressiveMixin
from sat.generation.autoregressive_sampling import filling_sequence
from sat.generation.sampling_strategies import BaseStrategy, BeamSearchStrategy
from torch.nn import CrossEntropyLoss
from utils.utils import ItemDataset
def data_collator(examples):
examples = [ex for ex in examples if len(ex) > 0] # drop {}
for example in examples:
for k in example:
if isinstance(example[k], list):
example[k] = torch.tensor(example[k])
elif isinstance(example[k], np.ndarray):
example[k] = torch.from_numpy(example[k])
img_args = {}
tmp_example = examples[0]
for k in tmp_example['vision']:
if type(tmp_example['vision'][k]) is torch.Tensor:
img_args['vision_'+k] = torch.cat([example['vision'][k] for example in examples])
else:
img_args['vision_'+k] = example['vision'][k]
for example in examples:
example.pop('vision')
if 'cross' in example:
example.pop('cross')
model_args = {}
tmp_example = examples[0]
for k in tmp_example:
if type(tmp_example[k]) is torch.Tensor:
model_args[k] = torch.cat([example[k] for example in examples])
else:
model_args[k] = tmp_example[k]
model_args.update(img_args)
return model_args | null |
7,462 | import os
import torch
import argparse
from functools import partial
import sys
from sat import mpu, get_args, get_tokenizer
from sat.training.deepspeed_training import training_main
from sat.helpers import print_rank0
from utils.models import FineTuneTestCogVLMModel
from utils.utils import llama2_text_processor, llama2_text_processor_inference, get_image_processor
from collections import defaultdict
def get_batch(data_iterator, args, timers):
# Broadcast data.
timers('data loader').start()
if data_iterator is not None:
data = next(data_iterator)
else:
data = None
timers('data loader').stop()
data_b = broadcast_auto(data)
for k in data_b:
if type(data_b[k]) is torch.Tensor and data_b[k].dtype is not torch.int32 and data_b[k].dtype is not torch.long:
if args.fp16:
data_b[k] = data_b[k].half()
elif args.bf16:
data_b[k] = data_b[k].bfloat16()
return data_b
from torch.nn import CrossEntropyLoss
import numpy as np
from sat.model.mixins import CachedAutoregressiveMixin
from sat.generation.autoregressive_sampling import filling_sequence
from sat.generation.sampling_strategies import BaseStrategy, BeamSearchStrategy
def chat(model, tokenizer, tokens,
max_length: int = 1800, num_beams=5, top_p=0.95, top_k=0, temperature=0.8, **kwargs):
inputs = tokens.to(model.parameters().__next__().device)[0]
seq = torch.cat(
[inputs, torch.tensor([-1] * (max_length - len(inputs)), device=inputs.device)], dim=0
)
strategy = BaseStrategy(temperature=temperature, top_p=0.4, top_k=1, end_tokens=[tokenizer.eos_token_id])
# strategy = BeamSearchStrategy(temperature=temperature, top_p=top_p, top_k=top_k, end_tokens=[tokenizer.eos_token_id],
# num_beams=num_beams, consider_end=True)
get_func = llama2_text_processor_inference.get_func(None, None, image_rope_mask=kwargs['image_rope_mask'])
output = filling_sequence(
model, seq,
batch_size=1,
strategy=strategy,
get_masks_and_position_ids=get_func,
**kwargs
)[0] # drop memory
return output
from torch.nn import CrossEntropyLoss
from utils.utils import ItemDataset
def forward_step_eval(data_iterator, model, args, timers):
def compute_metrics(eval_preds):
preds, labels, device = eval_preds
preds = preds.unsqueeze(0)
if isinstance(preds, tuple):
preds = preds[0]
decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True)
if args.ignore_pad_token_for_loss:
# Replace -100 in the labels as we can't decode them.
labels = np.where(labels != -100, labels, tokenizer.pad_token_id)
decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
score_dict = {
"acc": [],
"acc_w/o_case": [],
}
for pred, label in zip(decoded_preds, decoded_labels):
if args.rank == 0:
print('pred', pred, 'label', label, flush=True)
if pred == label:
score_dict['acc'].append(1.)
else:
score_dict['acc'].append(0.)
if pred.lower() == label.lower():
score_dict['acc_w/o_case'].append(1.)
else:
score_dict['acc_w/o_case'].append(0.)
for k, v in score_dict.items():
score_dict[k] = float(np.mean(v))
return score_dict
# Get the batch.
timers('batch generator').start()
data_b = get_batch(
data_iterator, args, timers)
timers('batch generator').stop()
context_len = int(data_b['context_length'][0])
tokens = data_b['input_ids'][:, :context_len]
data_b['vision_expert_mask'] = data_b['vision_expert_mask'][:, :context_len]
data_b['image_embed_mask'] = data_b['image_embed_mask'][:, :context_len]
data_b['image_rope_mask'] = data_b['image_rope_mask'][:, :context_len]
data_b.pop('input_ids')
data_b.pop('attention_mask')
data_b.pop('position_ids')
labels = data_b.pop('labels')
qid = data_b.pop('question_id')
model.add_mixin('auto-regressive', CachedAutoregressiveMixin())
outputs = chat(model, tokenizer, tokens, **data_b)[0][context_len:]
# print(outputs)
model.del_mixin('auto-regressive')
return torch.tensor(0, device=outputs.device), {k: torch.tensor(v, device=outputs.device) for k, v in
compute_metrics(
(outputs.cpu(), labels.cpu(), outputs.device)).items()} | null |
7,463 | import os
import torch
import argparse
from functools import partial
import sys
from sat import mpu, get_args, get_tokenizer
from sat.training.deepspeed_training import training_main
from sat.helpers import print_rank0
from utils.models import FineTuneTestCogVLMModel
from utils.utils import llama2_text_processor, llama2_text_processor_inference, get_image_processor
from collections import defaultdict
def get_batch(data_iterator, args, timers):
# Broadcast data.
timers('data loader').start()
if data_iterator is not None:
data = next(data_iterator)
else:
data = None
timers('data loader').stop()
data_b = broadcast_auto(data)
for k in data_b:
if type(data_b[k]) is torch.Tensor and data_b[k].dtype is not torch.int32 and data_b[k].dtype is not torch.long:
if args.fp16:
data_b[k] = data_b[k].half()
elif args.bf16:
data_b[k] = data_b[k].bfloat16()
return data_b
from torch.nn import CrossEntropyLoss
import numpy as np
from sat.model.mixins import CachedAutoregressiveMixin
from sat.generation.autoregressive_sampling import filling_sequence
from sat.generation.sampling_strategies import BaseStrategy, BeamSearchStrategy
from torch.nn import CrossEntropyLoss
from utils.utils import ItemDataset
The provided code snippet includes necessary dependencies for implementing the `forward_step` function. Write a Python function `def forward_step(data_iterator, model, args, timers)` to solve the following problem:
Forward step.
Here is the function:
def forward_step(data_iterator, model, args, timers):
"""Forward step."""
# Get the batch.
timers('batch generator').start()
data_b = get_batch(
data_iterator, args, timers)
labels = data_b.pop('labels')
timers('batch generator').stop()
logits = model(**data_b)[0]
lm_logits = logits.to(torch.float32)
# Shift so that tokens < n predict n
shift_labels = labels[..., 1:].contiguous()
shift_logits = lm_logits[..., -1-shift_labels.size(-1):-1, :].contiguous()
# Flatten the tokens
loss_fct = CrossEntropyLoss(ignore_index=-100)
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
loss = loss.to(torch.float32)
return loss, {'loss': loss} | Forward step. |
7,464 | import os
import torch
import argparse
from functools import partial
import sys
from sat import mpu, get_args, get_tokenizer
from sat.training.deepspeed_training import training_main
from sat.helpers import print_rank0
from utils.models import FineTuneTestCogVLMModel
from utils.utils import llama2_text_processor, llama2_text_processor_inference, get_image_processor
from collections import defaultdict
from torch.nn import CrossEntropyLoss
import numpy as np
from sat.model.mixins import CachedAutoregressiveMixin
from sat.generation.autoregressive_sampling import filling_sequence
from sat.generation.sampling_strategies import BaseStrategy, BeamSearchStrategy
from torch.nn import CrossEntropyLoss
from utils.utils import ItemDataset
def create_dataset_function(image_processor, text_processor, path, args):
dataset = ItemDataset(image_processor, text_processor, args, path)
return dataset | null |
7,465 | import os
import torch
import argparse
from functools import partial
import sys
from sat import mpu, get_args, get_tokenizer
from sat.training.deepspeed_training import training_main
from sat.helpers import print_rank0
from utils.models import FineTuneTrainCogAgentModel
from utils.utils import llama2_text_processor, llama2_text_processor_inference, get_image_processor
from collections import defaultdict
from torch.nn import CrossEntropyLoss
import numpy as np
from sat.model.mixins import CachedAutoregressiveMixin
from sat.generation.autoregressive_sampling import filling_sequence
from sat.generation.sampling_strategies import BaseStrategy, BeamSearchStrategy
from torch.nn import CrossEntropyLoss
from utils.utils import ItemDataset
from sat.model.finetune.lora2 import LoraMixin
from sat.model.finetune.prompt_tuning import PTuningV2Mixin
def disable_untrainable_params(self):
total_trainable = 0
# enable = ['vit']
enable = ["encoder", "cross_attention", "linear_proj", 'mlp.vision', 'rotary.vision', 'eoi', 'boi', 'vit']
if self.args.use_ptuning:
enable.extend(['ptuning'])
if self.args.use_lora or self.args.use_qlora:
enable.extend(['matrix_A', 'matrix_B'])
for n, p in self.named_parameters():
flag = False
for e in enable:
if type(e) is tuple:
if e[0].lower() in n.lower() and e[1].lower() in n.lower() and 55 > int(n[:n.find('.mlp')].split('.')[-1]) > 45:
flag = True
break
else:
if e.lower() in n.lower():
flag = True
break
if not flag:
p.requires_grad_(False)
else:
total_trainable += p.numel()
if 'encoder' in n or 'vit' in n:
p.lr_scale = 0.1
print_rank0(n)
print_rank0("***** Total trainable parameters: "+str(total_trainable)+" *****") | null |
7,466 | import os
import torch
import argparse
from functools import partial
import sys
from sat import mpu, get_args, get_tokenizer
from sat.training.deepspeed_training import training_main
from sat.helpers import print_rank0
from utils.models import FineTuneTrainCogAgentModel
from utils.utils import llama2_text_processor, llama2_text_processor_inference, get_image_processor
from collections import defaultdict
from torch.nn import CrossEntropyLoss
import numpy as np
from sat.model.mixins import CachedAutoregressiveMixin
from sat.generation.autoregressive_sampling import filling_sequence
from sat.generation.sampling_strategies import BaseStrategy, BeamSearchStrategy
from torch.nn import CrossEntropyLoss
from utils.utils import ItemDataset
from sat.model.finetune.lora2 import LoraMixin
from sat.model.finetune.prompt_tuning import PTuningV2Mixin
def data_collator(examples, cross_image_processor=None):
def to_tensor(value):
"""Converts lists or numpy arrays to tensors."""
if isinstance(value, list):
return torch.tensor(value)
elif isinstance(value, np.ndarray):
return torch.from_numpy(value)
return value
def concatenate_tensors(attribute, key):
"""Concatenates tensors for a specific attribute and key."""
if attribute is None:
return torch.cat([ex[key] for ex in examples if isinstance(ex[key], torch.Tensor)])
else:
return torch.cat([ex[attribute][key] for ex in examples if isinstance(ex[attribute][key], torch.Tensor)])
# Convert all lists and numpy arrays in examples to tensors
for example in examples:
for key, value in example.items():
example[key] = to_tensor(value)
# Extract and concatenate attributes from examples
img_args = {}
for attribute in ['vision', 'cross']:
if attribute == 'cross' and cross_image_processor is None:
continue
if attribute in examples[-1]: # Using the last example as reference
for key in examples[-1][attribute]:
tensor_key = f"{attribute}_{key}"
tensors_to_concatenate = [ex[attribute][key] for ex in examples if isinstance(ex[attribute][key], torch.Tensor)]
if tensors_to_concatenate:
img_args[tensor_key] = concatenate_tensors(attribute, key)
else:
img_args[tensor_key] = examples[-1][attribute][key]
# Remove 'vision' and 'cross' keys from examples
for example in examples:
example.pop('vision', None)
example.pop('cross', None)
# Create model_args by concatenating tensors and copying other attributes
model_args = {key: concatenate_tensors(None, key)
if isinstance(examples[-1][key], torch.Tensor) else examples[-1][key]
for key in examples[-1]
}
# Merge img_args into model_args
model_args.update(img_args)
return model_args | null |
7,467 | import os
import torch
import argparse
from functools import partial
import sys
from sat import mpu, get_args, get_tokenizer
from sat.training.deepspeed_training import training_main
from sat.helpers import print_rank0
from utils.models import FineTuneTrainCogAgentModel
from utils.utils import llama2_text_processor, llama2_text_processor_inference, get_image_processor
from collections import defaultdict
def get_batch(data_iterator, args, timers):
# Broadcast data.
timers('data loader').start()
if data_iterator is not None:
data = next(data_iterator)
else:
data = None
timers('data loader').stop()
data_b = broadcast_auto(data)
for k in data_b:
if type(data_b[k]) is torch.Tensor and data_b[k].dtype is not torch.int32 and data_b[k].dtype is not torch.long:
if args.fp16:
data_b[k] = data_b[k].half()
elif args.bf16:
data_b[k] = data_b[k].bfloat16()
return data_b
from torch.nn import CrossEntropyLoss
import numpy as np
from sat.model.mixins import CachedAutoregressiveMixin
from sat.generation.autoregressive_sampling import filling_sequence
from sat.generation.sampling_strategies import BaseStrategy, BeamSearchStrategy
def chat(model, tokenizer, tokens,
max_length: int = 1800, num_beams=5, top_p=0.95, top_k=0, temperature=0.8, **kwargs):
inputs = tokens.to(model.parameters().__next__().device)[0]
seq = torch.cat(
[inputs, torch.tensor([-1] * (max_length - len(inputs)), device=inputs.device)], dim=0
)
strategy = BaseStrategy(temperature=temperature, top_p=0.4, top_k=1, end_tokens=[tokenizer.eos_token_id])
# strategy = BeamSearchStrategy(temperature=temperature, top_p=top_p, top_k=top_k, end_tokens=[tokenizer.eos_token_id],
# num_beams=num_beams, consider_end=True)
get_func = llama2_text_processor_inference.get_func(None, None, image_rope_mask=kwargs['image_rope_mask'])
output = filling_sequence(
model, seq,
batch_size=1,
strategy=strategy,
get_masks_and_position_ids=get_func,
**kwargs
)[0] # drop memory
return output
from torch.nn import CrossEntropyLoss
from utils.utils import ItemDataset
from sat.model.finetune.lora2 import LoraMixin
from sat.model.finetune.prompt_tuning import PTuningV2Mixin
def forward_step_eval(data_iterator, model, args, timers):
def compute_metrics(eval_preds):
preds, labels, device = eval_preds
preds = preds.unsqueeze(0)
if isinstance(preds, tuple):
preds = preds[0]
decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True)
if args.ignore_pad_token_for_loss:
# Replace -100 in the labels as we can't decode them.
labels = np.where(labels != -100, labels, tokenizer.pad_token_id)
decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
score_dict = {
"acc": [],
"acc_w/o_case": [],
}
for pred, label in zip(decoded_preds, decoded_labels):
if args.rank == 0:
print('pred', pred, 'label', label, flush=True)
if pred == label:
score_dict['acc'].append(1.)
else:
score_dict['acc'].append(0.)
if pred.lower() == label.lower():
score_dict['acc_w/o_case'].append(1.)
else:
score_dict['acc_w/o_case'].append(0.)
for k, v in score_dict.items():
score_dict[k] = float(np.mean(v))
return score_dict
# Get the batch.
timers('batch generator').start()
data_b = get_batch(
data_iterator, args, timers)
timers('batch generator').stop()
context_len = int(data_b['context_length'][0])
tokens = data_b['input_ids'][:, :context_len]
data_b['vision_expert_mask'] = data_b['vision_expert_mask'][:, :context_len]
data_b['image_embed_mask'] = data_b['image_embed_mask'][:, :context_len]
data_b['image_rope_mask'] = data_b['image_rope_mask'][:, :context_len]
data_b.pop('input_ids')
data_b.pop('attention_mask')
data_b.pop('position_ids')
labels = data_b.pop('labels')
qid = data_b.pop('question_id')
model.add_mixin('auto-regressive', CachedAutoregressiveMixin())
outputs = chat(model, tokenizer, tokens, **data_b)[0][context_len:]
# print(outputs)
model.del_mixin('auto-regressive')
return torch.tensor(0, device=outputs.device), {k: torch.tensor(v, device=outputs.device) for k, v in
compute_metrics(
(outputs.cpu(), labels.cpu(), outputs.device)).items()} | null |
7,468 | import os
import torch
import argparse
from functools import partial
import sys
from sat import mpu, get_args, get_tokenizer
from sat.training.deepspeed_training import training_main
from sat.helpers import print_rank0
from utils.models import FineTuneTrainCogAgentModel
from utils.utils import llama2_text_processor, llama2_text_processor_inference, get_image_processor
from collections import defaultdict
def get_batch(data_iterator, args, timers):
# Broadcast data.
timers('data loader').start()
if data_iterator is not None:
data = next(data_iterator)
else:
data = None
timers('data loader').stop()
data_b = broadcast_auto(data)
for k in data_b:
if type(data_b[k]) is torch.Tensor and data_b[k].dtype is not torch.int32 and data_b[k].dtype is not torch.long:
if args.fp16:
data_b[k] = data_b[k].half()
elif args.bf16:
data_b[k] = data_b[k].bfloat16()
return data_b
from torch.nn import CrossEntropyLoss
import numpy as np
from sat.model.mixins import CachedAutoregressiveMixin
from sat.generation.autoregressive_sampling import filling_sequence
from sat.generation.sampling_strategies import BaseStrategy, BeamSearchStrategy
from torch.nn import CrossEntropyLoss
from utils.utils import ItemDataset
from sat.model.finetune.lora2 import LoraMixin
from sat.model.finetune.prompt_tuning import PTuningV2Mixin
The provided code snippet includes necessary dependencies for implementing the `forward_step` function. Write a Python function `def forward_step(data_iterator, model, args, timers)` to solve the following problem:
Forward step.
Here is the function:
def forward_step(data_iterator, model, args, timers):
"""Forward step."""
# Get the batch.
timers('batch generator').start()
data_b = get_batch(
data_iterator, args, timers)
labels = data_b.pop('labels')
timers('batch generator').stop()
logits = model(**data_b)[0]
lm_logits = logits.to(torch.float32)
# Shift so that tokens < n predict n
shift_labels = labels[..., 1:].contiguous()
shift_logits = lm_logits[..., -1-shift_labels.size(-1):-1, :].contiguous()
# Flatten the tokens
loss_fct = CrossEntropyLoss(ignore_index=-100)
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
loss = loss.to(torch.float32)
return loss, {'loss': loss} | Forward step. |
7,469 | import os
import torch
import argparse
from functools import partial
import sys
from sat import mpu, get_args, get_tokenizer
from sat.training.deepspeed_training import training_main
from sat.helpers import print_rank0
from utils.models import FineTuneTrainCogAgentModel
from utils.utils import llama2_text_processor, llama2_text_processor_inference, get_image_processor
from collections import defaultdict
from torch.nn import CrossEntropyLoss
import numpy as np
from sat.model.mixins import CachedAutoregressiveMixin
from sat.generation.autoregressive_sampling import filling_sequence
from sat.generation.sampling_strategies import BaseStrategy, BeamSearchStrategy
from torch.nn import CrossEntropyLoss
from utils.utils import ItemDataset
from sat.model.finetune.lora2 import LoraMixin
from sat.model.finetune.prompt_tuning import PTuningV2Mixin
def create_dataset_function(image_processor, text_processor, cross_image_processor, path, args):
dataset = ItemDataset(image_processor, text_processor, args, path, cross_image_processor=cross_image_processor)
return dataset | null |
7,470 | import os
import torch
import argparse
import sys
from sat import mpu, get_args, get_tokenizer
from sat.training.deepspeed_training import training_main
from sat.helpers import print_rank0
from collections import defaultdict
from functools import partial
from utils.models import FineTuneTestCogAgentModel
from utils.utils import llama2_text_processor, llama2_text_processor_inference, get_image_processor
from torch.nn import CrossEntropyLoss
import numpy as np
from sat.model.mixins import CachedAutoregressiveMixin
from sat.generation.autoregressive_sampling import filling_sequence
from sat.generation.sampling_strategies import BaseStrategy, BeamSearchStrategy
from torch.nn import CrossEntropyLoss
from utils.utils import ItemDataset
def data_collator(examples, cross_image_processor=None):
def to_tensor(value):
"""Converts lists or numpy arrays to tensors."""
if isinstance(value, list):
return torch.tensor(value)
elif isinstance(value, np.ndarray):
return torch.from_numpy(value)
return value
def concatenate_tensors(attribute, key):
"""Concatenates tensors for a specific attribute and key."""
if attribute is None:
return torch.cat([ex[key] for ex in examples if isinstance(ex[key], torch.Tensor)])
else:
return torch.cat([ex[attribute][key] for ex in examples if isinstance(ex[attribute][key], torch.Tensor)])
# Convert all lists and numpy arrays in examples to tensors
for example in examples:
for key, value in example.items():
example[key] = to_tensor(value)
# Extract and concatenate attributes from examples
img_args = {}
for attribute in ['vision', 'cross']:
if attribute == 'cross' and cross_image_processor is None:
continue
if attribute in examples[-1]: # Using the last example as reference
for key in examples[-1][attribute]:
tensor_key = f"{attribute}_{key}"
tensors_to_concatenate = [ex[attribute][key] for ex in examples if isinstance(ex[attribute][key], torch.Tensor)]
if tensors_to_concatenate:
img_args[tensor_key] = concatenate_tensors(attribute, key)
else:
img_args[tensor_key] = examples[-1][attribute][key]
# Remove 'vision' and 'cross' keys from examples
for example in examples:
example.pop('vision', None)
example.pop('cross', None)
# Create model_args by concatenating tensors and copying other attributes
model_args = {key: concatenate_tensors(None, key)
if isinstance(examples[-1][key], torch.Tensor) else examples[-1][key]
for key in examples[-1]
}
# Merge img_args into model_args
model_args.update(img_args)
return model_args | null |
7,471 | import os
import torch
import argparse
import sys
from sat import mpu, get_args, get_tokenizer
from sat.training.deepspeed_training import training_main
from sat.helpers import print_rank0
from collections import defaultdict
from functools import partial
from utils.models import FineTuneTestCogAgentModel
from utils.utils import llama2_text_processor, llama2_text_processor_inference, get_image_processor
def get_batch(data_iterator, args, timers):
from torch.nn import CrossEntropyLoss
import numpy as np
from sat.model.mixins import CachedAutoregressiveMixin
from sat.generation.autoregressive_sampling import filling_sequence
from sat.generation.sampling_strategies import BaseStrategy, BeamSearchStrategy
def chat(model, tokenizer, tokens,
max_length: int = 1800, num_beams=5, top_p=0.95, top_k=0, temperature=0.8, **kwargs):
from torch.nn import CrossEntropyLoss
from utils.utils import ItemDataset
def forward_step_eval(data_iterator, model, args, timers):
def compute_metrics(eval_preds):
preds, labels, device = eval_preds
preds = preds.unsqueeze(0)
if isinstance(preds, tuple):
preds = preds[0]
decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True)
if args.ignore_pad_token_for_loss:
# Replace -100 in the labels as we can't decode them.
labels = np.where(labels != -100, labels, tokenizer.pad_token_id)
decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
score_dict = {
"acc": [],
"acc_w/o_case": [],
}
for pred, label in zip(decoded_preds, decoded_labels):
if args.rank == 0:
print('pred', pred, 'label', label, flush=True)
if pred == label:
score_dict['acc'].append(1.)
else:
score_dict['acc'].append(0.)
if pred.lower() == label.lower():
score_dict['acc_w/o_case'].append(1.)
else:
score_dict['acc_w/o_case'].append(0.)
for k, v in score_dict.items():
score_dict[k] = float(np.mean(v))
return score_dict
# Get the batch.
timers('batch generator').start()
data_b = get_batch(
data_iterator, args, timers)
timers('batch generator').stop()
context_len = int(data_b['context_length'][0])
tokens = data_b['input_ids'][:, :context_len]
data_b['vision_expert_mask'] = data_b['vision_expert_mask'][:, :context_len]
data_b['image_embed_mask'] = data_b['image_embed_mask'][:, :context_len]
data_b['image_rope_mask'] = data_b['image_rope_mask'][:, :context_len]
data_b.pop('input_ids')
data_b.pop('attention_mask')
data_b.pop('position_ids')
labels = data_b.pop('labels')
qid = data_b.pop('question_id')
model.add_mixin('auto-regressive', CachedAutoregressiveMixin())
outputs = chat(model, tokenizer, tokens, **data_b)[0][context_len:]
# print(outputs)
model.del_mixin('auto-regressive')
return torch.tensor(0, device=outputs.device), {k: torch.tensor(v, device=outputs.device) for k, v in
compute_metrics(
(outputs.cpu(), labels.cpu(), outputs.device)).items()} | null |
7,472 | import os
import torch
import argparse
import sys
from sat import mpu, get_args, get_tokenizer
from sat.training.deepspeed_training import training_main
from sat.helpers import print_rank0
from collections import defaultdict
from functools import partial
from utils.models import FineTuneTestCogAgentModel
from utils.utils import llama2_text_processor, llama2_text_processor_inference, get_image_processor
def get_batch(data_iterator, args, timers):
# Broadcast data.
timers('data loader').start()
if data_iterator is not None:
data = next(data_iterator)
else:
data = None
timers('data loader').stop()
data_b = broadcast_auto(data)
for k in data_b:
if type(data_b[k]) is torch.Tensor and data_b[k].dtype is not torch.int32 and data_b[k].dtype is not torch.long:
if args.fp16:
data_b[k] = data_b[k].half()
elif args.bf16:
data_b[k] = data_b[k].bfloat16()
return data_b
from torch.nn import CrossEntropyLoss
import numpy as np
from sat.model.mixins import CachedAutoregressiveMixin
from sat.generation.autoregressive_sampling import filling_sequence
from sat.generation.sampling_strategies import BaseStrategy, BeamSearchStrategy
from torch.nn import CrossEntropyLoss
from utils.utils import ItemDataset
The provided code snippet includes necessary dependencies for implementing the `forward_step` function. Write a Python function `def forward_step(data_iterator, model, args, timers)` to solve the following problem:
Forward step.
Here is the function:
def forward_step(data_iterator, model, args, timers):
"""Forward step."""
# Get the batch.
timers('batch generator').start()
data_b = get_batch(
data_iterator, args, timers)
labels = data_b.pop('labels')
timers('batch generator').stop()
logits = model(**data_b)[0]
lm_logits = logits.to(torch.float32)
# Shift so that tokens < n predict n
shift_labels = labels[..., 1:].contiguous()
shift_logits = lm_logits[..., -1-shift_labels.size(-1):-1, :].contiguous()
# Flatten the tokens
loss_fct = CrossEntropyLoss(ignore_index=-100)
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
loss = loss.to(torch.float32)
return loss, {'loss': loss} | Forward step. |
7,473 | import os
import torch
import argparse
import sys
from sat import mpu, get_args, get_tokenizer
from sat.training.deepspeed_training import training_main
from sat.helpers import print_rank0
from collections import defaultdict
from functools import partial
from utils.models import FineTuneTestCogAgentModel
from utils.utils import llama2_text_processor, llama2_text_processor_inference, get_image_processor
from torch.nn import CrossEntropyLoss
import numpy as np
from sat.model.mixins import CachedAutoregressiveMixin
from sat.generation.autoregressive_sampling import filling_sequence
from sat.generation.sampling_strategies import BaseStrategy, BeamSearchStrategy
from torch.nn import CrossEntropyLoss
from utils.utils import ItemDataset
def create_dataset_function(image_processor, text_processor, cross_image_processor, path, args):
dataset = ItemDataset(image_processor, text_processor, args, path, cross_image_processor=cross_image_processor)
return dataset | null |
7,474 | import os
import shutil
os.makedirs("archive_split", exist_ok=True)
os.makedirs("archive_split/train", exist_ok=True)
os.makedirs("archive_split/valid", exist_ok=True)
os.makedirs("archive_split/test", exist_ok=True)
import random
print("building train")
print("building valid")
print("building test")
print("done")
def find_all_files(path, suffix=".jpg"):
target_files = []
for cur_dir, _, files in os.walk(path, followlinks=True):
for f in files:
if f.endswith(suffix):
target_files.append(os.path.join(cur_dir, f))
print(f'find {len(target_files)} files...')
return target_files | null |
7,475 | from sat.model.official.llama_model import LLaMAModel
import json
import torch
from functools import partial
from sat.model.base_model import BaseMixin
import torch.nn as nn
import numpy as np
from sat.resources.urls import MODEL_URLS
from .eva_clip_L_hf import Eva2LargeEncoder
from .mixin import LlamaVisionExpertFCMixin, LlamaVisionExpertAttnMixin
from .eva_clip_model import EVA2CLIPModel
import argparse
from copy import deepcopy
from sat.model.finetune import PTuningV2Mixin
from sat.model.finetune.lora2 import LoraMixin
def override_dist_dtype_device_args(args, b={}):
if args.mode == 'inference':
minimal_args = argparse.Namespace(
world_size=args.world_size,
rank=args.rank,
local_rank=args.local_rank,
skip_init=args.skip_init,
use_gpu_initialization=args.use_gpu_initialization,
deepspeed=args.deepspeed,
bf16=args.bf16,
fp16=args.fp16,
mode=args.mode,
device=args.device
)
else:
minimal_args = argparse.Namespace(
world_size=args.world_size,
rank=args.rank,
local_rank=args.local_rank,
skip_init=args.skip_init,
use_gpu_initialization=args.use_gpu_initialization,
deepspeed=args.deepspeed,
bf16=args.bf16,
fp16=args.fp16,
mode=args.mode,
checkpoint_activations=args.checkpoint_activations if not hasattr(args, 'vit_checkpoint_activations') else args.vit_checkpoint_activations,
checkpoint_num_layers=args.checkpoint_num_layers,
device=args.device,
hidden_dropout=0.,
attention_dropout=0.,
)
if hasattr(args, 'model_parallel_size'):
b['model_parallel_size'] = args.model_parallel_size
return argparse.Namespace(**deepcopy(b), **vars(minimal_args)) | null |
7,476 | from math import pi
import torch
from torch import nn
from einops import rearrange, repeat
import logging
import torch.nn as nn
import os
from dataclasses import dataclass
from typing import Optional, Tuple, Union
from functools import partial
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
import math
import os
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
import logging
import xformers.ops as xops
def broadcat(tensors, dim = -1):
num_tensors = len(tensors)
shape_lens = set(list(map(lambda t: len(t.shape), tensors)))
assert len(shape_lens) == 1, 'tensors must all have the same number of dimensions'
shape_len = list(shape_lens)[0]
dim = (dim + shape_len) if dim < 0 else dim
dims = list(zip(*map(lambda t: list(t.shape), tensors)))
expandable_dims = [(i, val) for i, val in enumerate(dims) if i != dim]
assert all([*map(lambda t: len(set(t[1])) <= 2, expandable_dims)]), 'invalid dimensions for broadcastable concatentation'
max_dims = list(map(lambda t: (t[0], max(t[1])), expandable_dims))
expanded_dims = list(map(lambda t: (t[0], (t[1],) * num_tensors), max_dims))
expanded_dims.insert(dim, (dim, dims[dim]))
expandable_shapes = list(zip(*map(lambda t: t[1], expanded_dims)))
tensors = list(map(lambda t: t[0].expand(*t[1]), zip(tensors, expandable_shapes)))
return torch.cat(tensors, dim = dim) | null |
7,477 | from math import pi
import torch
from torch import nn
from einops import rearrange, repeat
import logging
import torch.nn as nn
import os
from dataclasses import dataclass
from typing import Optional, Tuple, Union
from functools import partial
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
import math
import os
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
import logging
import xformers.ops as xops
def rotate_half(x):
x = rearrange(x, '... (d r) -> ... d r', r = 2)
x1, x2 = x.unbind(dim = -1)
x = torch.stack((-x2, x1), dim = -1)
return rearrange(x, '... d r -> ... (d r)') | null |
7,478 | from math import pi
import torch
from torch import nn
from einops import rearrange, repeat
import logging
import torch.nn as nn
import os
from dataclasses import dataclass
from typing import Optional, Tuple, Union
from functools import partial
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
import math
import os
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
import logging
import xformers.ops as xops
class EVAVisionTransformer(nn.Module):
""" Vision Transformer with support for patch or hybrid CNN input stage
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12,
num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0.,
drop_path_rate=0., norm_layer=nn.LayerNorm, init_values=None, patch_dropout=0.,
use_abs_pos_emb=True, use_rel_pos_bias=False, use_shared_rel_pos_bias=False, rope=False,
use_mean_pooling=True, init_scale=0.001, grad_checkpointing=False, xattn=False, postnorm=False,
pt_hw_seq_len=16, intp_freq=False, naiveswiglu=False, subln=False):
super().__init__()
self.image_size = img_size
self.num_classes = num_classes
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
self.patch_embed = PatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
# self.mask_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
if use_abs_pos_emb:
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
else:
self.pos_embed = None
self.pos_drop = nn.Dropout(p=drop_rate)
if use_shared_rel_pos_bias:
self.rel_pos_bias = RelativePositionBias(window_size=self.patch_embed.patch_shape, num_heads=num_heads)
else:
self.rel_pos_bias = None
if rope:
half_head_dim = embed_dim // num_heads // 2
hw_seq_len = img_size // patch_size
self.rope = VisionRotaryEmbeddingFast(
dim=half_head_dim,
pt_seq_len=pt_hw_seq_len,
ft_seq_len=hw_seq_len if intp_freq else None,
# patch_dropout=patch_dropout
)
else:
self.rope = None
self.naiveswiglu = naiveswiglu
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.use_rel_pos_bias = use_rel_pos_bias
self.blocks = nn.ModuleList([
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer,
init_values=init_values, window_size=self.patch_embed.patch_shape if use_rel_pos_bias else None,
xattn=xattn, rope=self.rope, postnorm=postnorm, subln=subln, naiveswiglu=naiveswiglu)
for i in range(depth)])
self.norm = nn.Identity() if use_mean_pooling else norm_layer(embed_dim)
self.fc_norm = norm_layer(embed_dim) if use_mean_pooling else None
self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
if self.pos_embed is not None:
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.cls_token, std=.02)
# trunc_normal_(self.mask_token, std=.02)
self.apply(self._init_weights)
self.fix_init_weight()
if isinstance(self.head, nn.Linear):
trunc_normal_(self.head.weight, std=.02)
self.head.weight.data.mul_(init_scale)
self.head.bias.data.mul_(init_scale)
# setting a patch_dropout of 0. would mean it is disabled and this function would be the identity fn
self.patch_dropout = PatchDropout(patch_dropout) if patch_dropout > 0. else nn.Identity()
self.grad_checkpointing = grad_checkpointing
def fix_init_weight(self):
def rescale(param, layer_id):
param.div_(math.sqrt(2.0 * layer_id))
for layer_id, layer in enumerate(self.blocks):
rescale(layer.attn.proj.weight.data, layer_id + 1)
if self.naiveswiglu:
rescale(layer.mlp.w3.weight.data, layer_id + 1)
else:
rescale(layer.mlp.fc2.weight.data, layer_id + 1)
def get_cast_dtype(self) -> torch.dtype:
return self.blocks[0].mlp.fc2.weight.dtype
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def get_num_layers(self):
return len(self.blocks)
def lock(self, unlocked_groups=0, freeze_bn_stats=False):
assert unlocked_groups == 0, 'partial locking not currently supported for this model'
for param in self.parameters():
param.requires_grad = False
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
def no_weight_decay(self):
return {'pos_embed', 'cls_token'}
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool=''):
self.num_classes = num_classes
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x, return_all_features=False):
x = self.patch_embed(x)
batch_size, seq_len, _ = x.size()
cls_tokens = self.cls_token.expand(batch_size, -1, -1) # stole cls_tokens impl from Phil Wang, thanks
x = torch.cat((cls_tokens, x), dim=1)
if self.pos_embed is not None:
x = x + self.pos_embed
x = self.pos_drop(x)
# a patch_dropout of 0. would mean it is disabled and this function would do nothing but return what was passed in
if os.getenv('RoPE') == '1':
if self.training and not isinstance(self.patch_dropout, nn.Identity):
x, patch_indices_keep = self.patch_dropout(x)
self.rope.forward = partial(self.rope.forward, patch_indices_keep=patch_indices_keep)
else:
self.rope.forward = partial(self.rope.forward, patch_indices_keep=None)
x = self.patch_dropout(x)
else:
x = self.patch_dropout(x)
rel_pos_bias = self.rel_pos_bias() if self.rel_pos_bias is not None else None
for i, blk in enumerate(self.blocks):
if i == len(self.blocks)-1:
continue
if self.grad_checkpointing:
x = checkpoint(blk, x, (rel_pos_bias,))
else:
x = blk(x, rel_pos_bias=rel_pos_bias)
if not return_all_features:
x = self.norm(x)
if self.fc_norm is not None:
return self.fc_norm(x.mean(1))
else:
return x[:, 0]
return x
def forward(self, x, return_all_features=False):
if return_all_features:
return self.forward_features(x, return_all_features)
x = self.forward_features(x)
x = self.head(x)
return x
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm (with cast back to input dtype)."""
def forward(self, x: torch.Tensor):
orig_type = x.dtype
x = F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
return x.to(orig_type)
class CLIPVisionCfg:
layers: Union[Tuple[int, int, int, int], int] = 12
width: int = 768
head_width: int = 64
mlp_ratio: float = 4.0
patch_size: int = 16
image_size: Union[Tuple[int, int], int] = 224
ls_init_value: Optional[float] = None # layer scale initial value
patch_dropout: float = 0. # what fraction of patches to dropout during training (0 would mean disabled and no patches dropped) - 0.5 to 0.75 recommended in the paper for optimal results
global_average_pool: bool = False # whether to global average pool the last embedding layer, instead of using CLS token (https://arxiv.org/abs/2205.01580)
drop_path_rate: Optional[float] = None # drop path rate
timm_model_name: str = None # a valid model name overrides layers, width, patch_size
timm_model_pretrained: bool = False # use (imagenet) pretrained weights for named model
timm_pool: str = 'avg' # feature pooling for timm model ('abs_attn', 'rot_attn', 'avg', '')
timm_proj: str = 'linear' # linear projection for timm model output ('linear', 'mlp', '')
timm_proj_bias: bool = False # enable bias final projection
eva_model_name: str = None # a valid eva model name overrides layers, width, patch_size
qkv_bias: bool = True
fusedLN: bool = False
xattn: bool = False
postnorm: bool = False
rope: bool = False
pt_hw_seq_len: int = 16 # 224/14
intp_freq: bool = False
naiveswiglu: bool = False
subln: bool = False
def _build_vision_tower(
embed_dim: int,
vision_cfg: CLIPVisionCfg
):
if isinstance(vision_cfg, dict):
vision_cfg = CLIPVisionCfg(**vision_cfg)
if vision_cfg.eva_model_name:
vision_heads = vision_cfg.width // vision_cfg.head_width
norm_layer = LayerNorm
visual = EVAVisionTransformer(
img_size=vision_cfg.image_size,
patch_size=vision_cfg.patch_size,
num_classes=embed_dim,
use_mean_pooling=vision_cfg.global_average_pool, #False
init_values=vision_cfg.ls_init_value,
patch_dropout=vision_cfg.patch_dropout,
embed_dim=vision_cfg.width,
depth=vision_cfg.layers,
num_heads=vision_heads,
mlp_ratio=vision_cfg.mlp_ratio,
qkv_bias=vision_cfg.qkv_bias,
drop_path_rate=vision_cfg.drop_path_rate,
norm_layer= partial(FusedLayerNorm, eps=1e-6) if vision_cfg.fusedLN else partial(norm_layer, eps=1e-6),
xattn=vision_cfg.xattn,
rope=vision_cfg.rope,
postnorm=vision_cfg.postnorm,
pt_hw_seq_len= vision_cfg.pt_hw_seq_len, # 224/14
intp_freq= vision_cfg.intp_freq,
naiveswiglu= vision_cfg.naiveswiglu,
subln= vision_cfg.subln
)
return visual | null |
7,479 | from sat.model.official.llama_model import LLaMAModel
import json
import torch
from sat.model.base_model import BaseMixin
import torch.nn as nn
from .mixin import LlamaVisionExpertFCMixin, LlamaVisionExpertAttnMixin
from sat.resources.urls import MODEL_URLS
from .eva_clip_model import EVA2CLIPModel
import argparse
from copy import deepcopy
from sat.model.finetune import PTuningV2Mixin
from sat.model.finetune.lora2 import LoraMixin
def override_dist_dtype_device_args(args, b={}):
if args.mode == 'inference':
minimal_args = argparse.Namespace(
world_size=args.world_size,
rank=args.rank,
local_rank=args.local_rank,
skip_init=args.skip_init,
use_gpu_initialization=args.use_gpu_initialization,
deepspeed=args.deepspeed,
bf16=args.bf16,
fp16=args.fp16,
mode=args.mode,
device=args.device
)
else:
minimal_args = argparse.Namespace(
world_size=args.world_size,
rank=args.rank,
local_rank=args.local_rank,
skip_init=args.skip_init,
use_gpu_initialization=args.use_gpu_initialization,
deepspeed=args.deepspeed,
bf16=args.bf16,
fp16=args.fp16,
mode=args.mode,
checkpoint_activations=args.checkpoint_activations if not hasattr(args, 'vit_checkpoint_activations') else args.vit_checkpoint_activations,
checkpoint_num_layers=args.checkpoint_num_layers,
device=args.device,
hidden_dropout=0.,
attention_dropout=0.,
)
if hasattr(args, 'model_parallel_size'):
b['model_parallel_size'] = args.model_parallel_size
return argparse.Namespace(**deepcopy(b), **vars(minimal_args)) | null |
7,480 | from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
import torch
class BlipImageEvalProcessor:
def __init__(self, image_size=384, mean=None, std=None):
super().__init__()
if mean is None:
mean = (0.48145466, 0.4578275, 0.40821073)
if std is None:
std = (0.26862954, 0.26130258, 0.27577711)
self.normalize = transforms.Normalize(mean, std)
self.transform = transforms.Compose(
[
transforms.Resize(
(image_size, image_size), interpolation=InterpolationMode.BICUBIC
),
transforms.ToTensor(),
self.normalize,
]
)
def __call__(self, item):
return self.transform(item)
from functools import partial
def blip2_image_processor_func_with_inputs(image_processor, image):
return {'image': image_processor(image).unsqueeze(0), 'input_ids': torch.zeros(1, 1, dtype=torch.long), 'position_ids': None, 'attention_mask': torch.ones(1, 1, dtype=torch.long)}
def get_image_processor(image_size):
return partial(blip2_image_processor_func_with_inputs, BlipImageEvalProcessor(image_size)) | null |
7,481 | from transformers import LlamaTokenizer
import re
import numpy as np
import torch
from functools import partial
def base_history_to_prompt(self, query, history):
prompt = '<EOI>' + query
return prompt | null |
7,482 | from transformers import LlamaTokenizer
import re
import numpy as np
import torch
from functools import partial
def chat_history_to_prompt(self, query, history):
prompt = "<EOI> [INST] "
for i, (old_query, response) in enumerate(history):
prompt += old_query + " [/INST] " + response + " [INST] "
prompt += query + " [/INST] "
return prompt | null |
7,483 | from transformers import LlamaTokenizer
import re
import numpy as np
import torch
from functools import partial
def vqa_history_to_prompt(self, query, history):
# Only support single round chat in vqa mode
prompt = "<EOI>Question: "
# for i, (old_query, response) in enumerate(history):
# prompt += old_query + " Short answer: " + response + " Question: "
prompt += query + " Short answer:"
return prompt | null |
7,484 | from transformers import LlamaTokenizer
import re
import numpy as np
import torch
from functools import partial
def chat_old_history_to_prompt(self, query, history):
prompt = "<EOI>Question: "
for i, (old_query, response) in enumerate(history):
prompt += old_query + " Answer: " + response + "\nQuestion: "
prompt += query + " Answer:"
return prompt | null |
7,485 | from transformers import LlamaTokenizer
import re
import numpy as np
import torch
from functools import partial
def llama2_tokenizer(tokenizer_path, signal_type="base"):
tokenizer = LlamaTokenizer.from_pretrained(tokenizer_path)
if tokenizer.pad_token_id is None:
tokenizer.pad_token_id = 32000
tokenizer.boi = "[IMG]"
tokenizer.eoi = "[/IMG]"
assert signal_type in ["base", "chat", "vqa", "chat_old"]
tokenizer.signal_type = signal_type
return tokenizer | null |
7,486 | from transformers import LlamaTokenizer
import re
import numpy as np
import torch
from functools import partial
def get_masks_and_position_ids(seq, image_logits_mask):
tokens = seq.unsqueeze(0)
attention_mask = torch.ones((1, len(seq), len(seq)), device=tokens.device)
attention_mask.tril_()
attention_mask.unsqueeze_(1)
position_ids = []
pid = -1
for i in range(len(image_logits_mask[0])):
if image_logits_mask[0][i] == 0 or (i > 0 and image_logits_mask[0][i] != image_logits_mask[0][i - 1]):
pid += 1
position_ids.append(pid)
for i in range(tokens.shape[1]-image_logits_mask.shape[1]):
pid += 1
position_ids.append(pid)
position_ids = torch.tensor(position_ids, dtype=torch.long, device=tokens.device)
position_ids = position_ids.unsqueeze(0)
return tokens, attention_mask, position_ids | null |
7,487 | import os
import logging
import random
import logging
import jsonlines
from io import BytesIO
from PIL import Image
from torch.utils.data import Dataset
from sat.helpers import print_rank0
def find_all_files(path, suffix=".jpg"):
target_files = []
for cur_dir, _, files in os.walk(path, followlinks=True):
for f in files:
if f.endswith(suffix):
target_files.append(os.path.join(cur_dir, f))
print_rank0(f'find {len(target_files)} files...')
return target_files | null |
7,488 | import asyncio
import logging
import time
from signal import SIGINT, SIGTERM, signal
from typing import Optional
import aiohttp
from . import api_helpers, ytlounge
async def finish(devices):
for i in devices:
await i.cancel() | null |
7,489 | import os
import plistlib
from . import config_setup
default_plist = {
"Label": "com.dmunozv04iSponsorBlockTV",
"RunAtLoad": True,
"StartInterval": 20,
"EnvironmentVariables": {"PYTHONUNBUFFERED": "YES"},
"StandardErrorPath": "", # Fill later
"StandardOutPath": "",
"ProgramArguments": "",
"WorkingDirectory": "",
}
def create_plist(path):
plist = default_plist
plist["ProgramArguments"] = [path + "/iSponsorBlockTV-macos"]
plist["StandardErrorPath"] = path + "/iSponsorBlockTV.error.log"
plist["StandardOutPath"] = path + "/iSponsorBlockTV.out.log"
plist["WorkingDirectory"] = path
launchd_path = os.path.expanduser("~/Library/LaunchAgents/")
path_to_save = launchd_path + "com.dmunozv04.iSponsorBlockTV.plist"
with open(path_to_save, "wb") as fp:
plistlib.dump(plist, fp) | null |
7,490 | import os
import plistlib
from . import config_setup
def main():
correct_path = os.path.expanduser("~/iSponsorBlockTV")
if os.path.isfile(correct_path + "/iSponsorBlockTV-macos"):
print("Program is on the right path")
print("The launch daemon will now be installed")
create_plist(correct_path)
run_setup(correct_path + "/config.json")
print(
"Launch daemon installed. Please restart the computer to enable it or"
" use:\n launchctl load"
" ~/Library/LaunchAgents/com.dmunozv04.iSponsorBlockTV.plist"
)
else:
if not os.path.exists(correct_path):
os.makedirs(correct_path)
print(
"Please move the program to the correct path: "
+ correct_path
+ "opening now on finder..."
)
os.system("open -R " + correct_path)
def run_setup(file):
config = {}
config_setup.main(config, file, debug=False) | null |
7,491 | import html
from hashlib import sha256
from aiohttp import ClientSession
from cache import AsyncLRU
from . import constants, dial_client
from .conditional_ttl_cache import AsyncConditionalTTL
def list_to_tuple(function):
def wrapper(*args):
args = [tuple(x) if isinstance(x, list) else x for x in args]
result = function(*args)
result = tuple(result) if isinstance(result, list) else result
return result
return wrapper | null |
7,492 | import asyncio
import socket
import ssdp
import xmltodict
from ssdp import network
def get_ip():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.settimeout(0)
try:
# doesn't even have to be reachable
s.connect(("10.254.254.254", 1))
ip = s.getsockname()[0]
except Exception:
ip = "127.0.0.1"
finally:
s.close()
return ip
class Handler(ssdp.aio.SSDP):
def __init__(self):
super().__init__()
self.devices = []
def clear(self):
self.devices = []
def __call__(self):
return self
def response_received(self, response: ssdp.messages.SSDPResponse, addr):
headers = response.headers
headers = {k.lower(): v for k, v in headers}
# print(headers)
if "location" in headers:
self.devices.append(headers["location"])
async def find_youtube_app(web_session, url_location):
async with web_session.get(url_location) as response:
headers = response.headers
response = await response.text()
# print(headers)
data = xmltodict.parse(response)
name = data["root"]["device"]["friendlyName"]
handler = Handler()
handler.clear()
app_url = headers["application-url"]
youtube_url = app_url + "YouTube"
# print(youtube_url)
async with web_session.get(youtube_url) as response:
status_code = response.status
response = await response.text()
# print(status_code)
if status_code == 200:
data = xmltodict.parse(response)
data = data["service"]
screen_id = data["additionalData"]["screenId"]
return {"screen_id": screen_id, "name": name, "offset": 0}
The provided code snippet includes necessary dependencies for implementing the `discover` function. Write a Python function `async def discover(web_session)` to solve the following problem:
Send out an M-SEARCH request and listening for responses.
Here is the function:
async def discover(web_session):
bind = None
search_target = "urn:dial-multiscreen-org:service:dial:1"
max_wait = 10
handler = Handler()
"""Send out an M-SEARCH request and listening for responses."""
family, addr = network.get_best_family(bind, network.PORT)
loop = asyncio.get_event_loop()
ip_address = get_ip()
connect = loop.create_datagram_endpoint(
handler, family=family, local_addr=(ip_address, None)
)
transport, protocol = await connect
target = network.MULTICAST_ADDRESS_IPV4, network.PORT
search_request = ssdp.messages.SSDPRequest(
"M-SEARCH",
headers={
"HOST": "%s:%d" % target,
"MAN": '"ssdp:discover"',
"MX": str(max_wait), # seconds to delay response [1..5]
"ST": search_target,
},
)
target = network.MULTICAST_ADDRESS_IPV4, network.PORT
search_request.sendto(transport, target)
# print(search_request, addr[:2])
try:
await asyncio.sleep(4)
finally:
transport.close()
devices = []
for i in handler.devices:
devices.append(await find_youtube_app(web_session, i))
return devices | Send out an M-SEARCH request and listening for responses. |
7,493 | import asyncio
import aiohttp
from . import api_helpers, ytlounge
async def pair_device():
try:
lounge_controller = ytlounge.YtLoungeApi("iSponsorBlockTV")
pairing_code = input(
"Enter pairing code (found in Settings - Link with TV code): "
)
pairing_code = int(
pairing_code.replace("-", "").replace(" ", "")
) # remove dashes and spaces
print("Pairing...")
paired = await lounge_controller.pair(pairing_code)
if not paired:
print("Failed to pair device")
return
device = {
"screen_id": lounge_controller.auth.screen_id,
"name": lounge_controller.screen_name,
}
print(f"Paired device: {device['name']}")
return device
except Exception as e:
print(f"Failed to pair device: {e}")
return | null |
7,494 | import argparse
import json
import logging
import os
import sys
import time
from appdirs import user_data_dir
from . import config_setup, main, setup_wizard
from .constants import config_file_blacklist_keys
class Config:
def __init__(self, data_dir):
def validate(self):
def __load(self):
def save(self):
def __eq__(self, other):
def main(config, debug):
def app_start():
# If env has a data dir use that, otherwise use the default
default_data_dir = os.getenv("iSPBTV_data_dir") or user_data_dir(
"iSponsorBlockTV", "dmunozv04"
)
parser = argparse.ArgumentParser(description="iSponsorblockTV")
parser.add_argument(
"--data-dir", "-d", default=default_data_dir, help="data directory"
)
parser.add_argument(
"--setup", "-s", action="store_true", help="setup the program graphically"
)
parser.add_argument(
"--setup-cli",
"-sc",
action="store_true",
help="setup the program in the command line",
)
parser.add_argument("--debug", action="store_true", help="debug mode")
args = parser.parse_args()
config = Config(args.data_dir)
if args.debug:
logging.basicConfig(level=logging.DEBUG)
if args.setup: # Set up the config file graphically
setup_wizard.main(config)
sys.exit()
if args.setup_cli: # Set up the config file
config_setup.main(config, args.debug)
else:
config.validate()
main.main(config, args.debug) | null |
7,495 | import asyncio
import copy
import aiohttp
from textual import on
from textual.app import App, ComposeResult
from textual.containers import (
Container,
Grid,
Horizontal,
ScrollableContainer,
Vertical,
)
from textual.events import Click
from textual.screen import Screen
from textual.validation import Function
from textual.widgets import (
Button,
Checkbox,
ContentSwitcher,
Footer,
Header,
Input,
Label,
RadioButton,
RadioSet,
SelectionList,
Static,
)
from textual.widgets.selection_list import Selection
from textual_slider import Slider
from . import api_helpers, ytlounge
from .constants import skip_categories
def _validate_pairing_code(pairing_code: str) -> bool:
try:
pairing_code = pairing_code.replace("-", "").replace(" ", "")
int(pairing_code)
return len(pairing_code) == 12
except ValueError:
return False # not a number | null |
7,496 | import argparse
import pickle
import numpy as np
def get_parser():
parser = argparse.ArgumentParser(description='DouZero: random data generator')
parser.add_argument('--output', default='eval_data', type=str)
parser.add_argument('--num_games', default=10000, type=int)
return parser | null |
7,497 | import argparse
import pickle
import numpy as np
deck = []
deck.extend([17 for _ in range(4)])
deck.extend([20, 30])
def generate():
_deck = deck.copy()
np.random.shuffle(_deck)
card_play_data = {'landlord': _deck[:20],
'landlord_up': _deck[20:37],
'landlord_down': _deck[37:54],
'three_landlord_cards': _deck[17:20],
}
for key in card_play_data:
card_play_data[key].sort()
return card_play_data | null |
7,498 | import numpy as np
import torch
The provided code snippet includes necessary dependencies for implementing the `_format_observation` function. Write a Python function `def _format_observation(obs, device)` to solve the following problem:
A utility function to process observations and move them to CUDA.
Here is the function:
def _format_observation(obs, device):
"""
A utility function to process observations and
move them to CUDA.
"""
position = obs['position']
if not device == "cpu":
device = 'cuda:' + str(device)
device = torch.device(device)
x_batch = torch.from_numpy(obs['x_batch']).to(device)
z_batch = torch.from_numpy(obs['z_batch']).to(device)
x_no_action = torch.from_numpy(obs['x_no_action'])
z = torch.from_numpy(obs['z'])
obs = {'x_batch': x_batch,
'z_batch': z_batch,
'legal_actions': obs['legal_actions'],
}
return position, obs, x_no_action, z | A utility function to process observations and move them to CUDA. |
7,500 | import os
import threading
import time
import timeit
import pprint
from collections import deque
import numpy as np
import torch
from torch import multiprocessing as mp
from torch import nn
from .file_writer import FileWriter
from .models import Model
from .utils import get_batch, log, create_env, create_buffers, create_optimizers, act
def learn(position,
actor_models,
model,
batch,
optimizer,
flags,
lock):
"""Performs a learning (optimization) step."""
if flags.training_device != "cpu":
device = torch.device('cuda:'+str(flags.training_device))
else:
device = torch.device('cpu')
obs_x_no_action = batch['obs_x_no_action'].to(device)
obs_action = batch['obs_action'].to(device)
obs_x = torch.cat((obs_x_no_action, obs_action), dim=2).float()
obs_x = torch.flatten(obs_x, 0, 1)
obs_z = torch.flatten(batch['obs_z'].to(device), 0, 1).float()
target = torch.flatten(batch['target'].to(device), 0, 1)
episode_returns = batch['episode_return'][batch['done']]
mean_episode_return_buf[position].append(torch.mean(episode_returns).to(device))
with lock:
learner_outputs = model(obs_z, obs_x, return_value=True)
loss = compute_loss(learner_outputs['values'], target)
stats = {
'mean_episode_return_'+position: torch.mean(torch.stack([_r for _r in mean_episode_return_buf[position]])).item(),
'loss_'+position: loss.item(),
}
optimizer.zero_grad()
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), flags.max_grad_norm)
optimizer.step()
for actor_model in actor_models.values():
actor_model.get_model(position).load_state_dict(model.state_dict())
return stats
class FileWriter:
def __init__(self,
xpid: str = None,
xp_args: dict = None,
rootdir: str = '~/palaas'):
if not xpid:
# make unique id
xpid = '{proc}_{unixtime}'.format(
proc=os.getpid(), unixtime=int(time.time()))
self.xpid = xpid
self._tick = 0
# metadata gathering
if xp_args is None:
xp_args = {}
self.metadata = gather_metadata()
# we need to copy the args, otherwise when we close the file writer
# (and rewrite the args) we might have non-serializable objects (or
# other nasty stuff).
self.metadata['args'] = copy.deepcopy(xp_args)
self.metadata['xpid'] = self.xpid
formatter = logging.Formatter('%(message)s')
self._logger = logging.getLogger('palaas/out')
# to stdout handler
shandle = logging.StreamHandler()
shandle.setFormatter(formatter)
self._logger.addHandler(shandle)
self._logger.setLevel(logging.INFO)
rootdir = os.path.expandvars(os.path.expanduser(rootdir))
# to file handler
self.basepath = os.path.join(rootdir, self.xpid)
if not os.path.exists(self.basepath):
self._logger.info('Creating log directory: %s', self.basepath)
os.makedirs(self.basepath, exist_ok=True)
else:
self._logger.info('Found log directory: %s', self.basepath)
# NOTE: remove latest because it creates errors when running on slurm
# multiple jobs trying to write to latest but cannot find it
# Add 'latest' as symlink unless it exists and is no symlink.
# symlink = os.path.join(rootdir, 'latest')
# if os.path.islink(symlink):
# os.remove(symlink)
# if not os.path.exists(symlink):
# os.symlink(self.basepath, symlink)
# self._logger.info('Symlinked log directory: %s', symlink)
self.paths = dict(
msg='{base}/out.log'.format(base=self.basepath),
logs='{base}/logs.csv'.format(base=self.basepath),
fields='{base}/fields.csv'.format(base=self.basepath),
meta='{base}/meta.json'.format(base=self.basepath),
)
self._logger.info('Saving arguments to %s', self.paths['meta'])
if os.path.exists(self.paths['meta']):
self._logger.warning('Path to meta file already exists. '
'Not overriding meta.')
else:
self._save_metadata()
self._logger.info('Saving messages to %s', self.paths['msg'])
if os.path.exists(self.paths['msg']):
self._logger.warning('Path to message file already exists. '
'New data will be appended.')
fhandle = logging.FileHandler(self.paths['msg'])
fhandle.setFormatter(formatter)
self._logger.addHandler(fhandle)
self._logger.info('Saving logs data to %s', self.paths['logs'])
self._logger.info('Saving logs\' fields to %s', self.paths['fields'])
if os.path.exists(self.paths['logs']):
self._logger.warning('Path to log file already exists. '
'New data will be appended.')
with open(self.paths['fields'], 'r') as csvfile:
reader = csv.reader(csvfile)
self.fieldnames = list(reader)[0]
else:
self.fieldnames = ['_tick', '_time']
def log(self, to_log: Dict, tick: int = None,
verbose: bool = False) -> None:
if tick is not None:
raise NotImplementedError
else:
to_log['_tick'] = self._tick
self._tick += 1
to_log['_time'] = time.time()
old_len = len(self.fieldnames)
for k in to_log:
if k not in self.fieldnames:
self.fieldnames.append(k)
if old_len != len(self.fieldnames):
with open(self.paths['fields'], 'w') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(self.fieldnames)
self._logger.info('Updated log fields: %s', self.fieldnames)
if to_log['_tick'] == 0:
# print("\ncreating logs file ")
with open(self.paths['logs'], 'a') as f:
f.write('# %s\n' % ','.join(self.fieldnames))
if verbose:
self._logger.info('LOG | %s', ', '.join(
['{}: {}'.format(k, to_log[k]) for k in sorted(to_log)]))
with open(self.paths['logs'], 'a') as f:
writer = csv.DictWriter(f, fieldnames=self.fieldnames)
writer.writerow(to_log)
# print("\nadded to log file")
def close(self, successful: bool = True) -> None:
self.metadata['date_end'] = datetime.datetime.now().strftime(
'%Y-%m-%d %H:%M:%S.%f')
self.metadata['successful'] = successful
self._save_metadata()
def _save_metadata(self) -> None:
with open(self.paths['meta'], 'w') as jsonfile:
json.dump(self.metadata, jsonfile, indent=4, sort_keys=True)
class Model:
"""
The wrapper for the three models. We also wrap several
interfaces such as share_memory, eval, etc.
"""
def __init__(self, device=0):
self.models = {}
if not device == "cpu":
device = 'cuda:' + str(device)
self.models['landlord'] = LandlordLstmModel().to(torch.device(device))
self.models['landlord_up'] = FarmerLstmModel().to(torch.device(device))
self.models['landlord_down'] = FarmerLstmModel().to(torch.device(device))
def forward(self, position, z, x, training=False, flags=None):
model = self.models[position]
return model.forward(z, x, training, flags)
def share_memory(self):
self.models['landlord'].share_memory()
self.models['landlord_up'].share_memory()
self.models['landlord_down'].share_memory()
def eval(self):
self.models['landlord'].eval()
self.models['landlord_up'].eval()
self.models['landlord_down'].eval()
def parameters(self, position):
return self.models[position].parameters()
def get_model(self, position):
return self.models[position]
def get_models(self):
return self.models
log = logging.getLogger('doudzero')
log.propagate = False
log.addHandler(shandle)
log.setLevel(logging.INFO)
def get_batch(free_queue,
full_queue,
buffers,
flags,
lock):
"""
This function will sample a batch from the buffers based
on the indices received from the full queue. It will also
free the indices by sending it to full_queue.
"""
with lock:
indices = [full_queue.get() for _ in range(flags.batch_size)]
batch = {
key: torch.stack([buffers[key][m] for m in indices], dim=1)
for key in buffers
}
for m in indices:
free_queue.put(m)
return batch
def create_optimizers(flags, learner_model):
"""
Create three optimizers for the three positions
"""
positions = ['landlord', 'landlord_up', 'landlord_down']
optimizers = {}
for position in positions:
optimizer = torch.optim.RMSprop(
learner_model.parameters(position),
lr=flags.learning_rate,
momentum=flags.momentum,
eps=flags.epsilon,
alpha=flags.alpha)
optimizers[position] = optimizer
return optimizers
def create_buffers(flags, device_iterator):
"""
We create buffers for different positions as well as
for different devices (i.e., GPU). That is, each device
will have three buffers for the three positions.
"""
T = flags.unroll_length
positions = ['landlord', 'landlord_up', 'landlord_down']
buffers = {}
for device in device_iterator:
buffers[device] = {}
for position in positions:
x_dim = 319 if position == 'landlord' else 430
specs = dict(
done=dict(size=(T,), dtype=torch.bool),
episode_return=dict(size=(T,), dtype=torch.float32),
target=dict(size=(T,), dtype=torch.float32),
obs_x_no_action=dict(size=(T, x_dim), dtype=torch.int8),
obs_action=dict(size=(T, 54), dtype=torch.int8),
obs_z=dict(size=(T, 5, 162), dtype=torch.int8),
)
_buffers: Buffers = {key: [] for key in specs}
for _ in range(flags.num_buffers):
for key in _buffers:
if not device == "cpu":
_buffer = torch.empty(**specs[key]).to(torch.device('cuda:'+str(device))).share_memory_()
else:
_buffer = torch.empty(**specs[key]).to(torch.device('cpu')).share_memory_()
_buffers[key].append(_buffer)
buffers[device][position] = _buffers
return buffers
def act(i, device, free_queue, full_queue, model, buffers, flags):
"""
This function will run forever until we stop it. It will generate
data from the environment and send the data to buffer. It uses
a free queue and full queue to syncup with the main process.
"""
positions = ['landlord', 'landlord_up', 'landlord_down']
try:
T = flags.unroll_length
log.info('Device %s Actor %i started.', str(device), i)
env = create_env(flags)
env = Environment(env, device)
done_buf = {p: [] for p in positions}
episode_return_buf = {p: [] for p in positions}
target_buf = {p: [] for p in positions}
obs_x_no_action_buf = {p: [] for p in positions}
obs_action_buf = {p: [] for p in positions}
obs_z_buf = {p: [] for p in positions}
size = {p: 0 for p in positions}
position, obs, env_output = env.initial()
while True:
while True:
obs_x_no_action_buf[position].append(env_output['obs_x_no_action'])
obs_z_buf[position].append(env_output['obs_z'])
with torch.no_grad():
agent_output = model.forward(position, obs['z_batch'], obs['x_batch'], flags=flags)
_action_idx = int(agent_output['action'].cpu().detach().numpy())
action = obs['legal_actions'][_action_idx]
obs_action_buf[position].append(_cards2tensor(action))
size[position] += 1
position, obs, env_output = env.step(action)
if env_output['done']:
for p in positions:
diff = size[p] - len(target_buf[p])
if diff > 0:
done_buf[p].extend([False for _ in range(diff-1)])
done_buf[p].append(True)
episode_return = env_output['episode_return'] if p == 'landlord' else -env_output['episode_return']
episode_return_buf[p].extend([0.0 for _ in range(diff-1)])
episode_return_buf[p].append(episode_return)
target_buf[p].extend([episode_return for _ in range(diff)])
break
for p in positions:
while size[p] > T:
index = free_queue[p].get()
if index is None:
break
for t in range(T):
buffers[p]['done'][index][t, ...] = done_buf[p][t]
buffers[p]['episode_return'][index][t, ...] = episode_return_buf[p][t]
buffers[p]['target'][index][t, ...] = target_buf[p][t]
buffers[p]['obs_x_no_action'][index][t, ...] = obs_x_no_action_buf[p][t]
buffers[p]['obs_action'][index][t, ...] = obs_action_buf[p][t]
buffers[p]['obs_z'][index][t, ...] = obs_z_buf[p][t]
full_queue[p].put(index)
done_buf[p] = done_buf[p][T:]
episode_return_buf[p] = episode_return_buf[p][T:]
target_buf[p] = target_buf[p][T:]
obs_x_no_action_buf[p] = obs_x_no_action_buf[p][T:]
obs_action_buf[p] = obs_action_buf[p][T:]
obs_z_buf[p] = obs_z_buf[p][T:]
size[p] -= T
except KeyboardInterrupt:
pass
except Exception as e:
log.error('Exception in worker process %i', i)
traceback.print_exc()
print()
raise e
The provided code snippet includes necessary dependencies for implementing the `train` function. Write a Python function `def train(flags)` to solve the following problem:
This is the main funtion for training. It will first initilize everything, such as buffers, optimizers, etc. Then it will start subprocesses as actors. Then, it will call learning function with multiple threads.
Here is the function:
def train(flags):
"""
This is the main funtion for training. It will first
initilize everything, such as buffers, optimizers, etc.
Then it will start subprocesses as actors. Then, it will call
learning function with multiple threads.
"""
if not flags.actor_device_cpu or flags.training_device != 'cpu':
if not torch.cuda.is_available():
raise AssertionError("CUDA not available. If you have GPUs, please specify the ID after `--gpu_devices`. Otherwise, please train with CPU with `python3 train.py --actor_device_cpu --training_device cpu`")
plogger = FileWriter(
xpid=flags.xpid,
xp_args=flags.__dict__,
rootdir=flags.savedir,
)
checkpointpath = os.path.expandvars(
os.path.expanduser('%s/%s/%s' % (flags.savedir, flags.xpid, 'model.tar')))
T = flags.unroll_length
B = flags.batch_size
if flags.actor_device_cpu:
device_iterator = ['cpu']
else:
device_iterator = range(flags.num_actor_devices)
assert flags.num_actor_devices <= len(flags.gpu_devices.split(',')), 'The number of actor devices can not exceed the number of available devices'
# Initialize actor models
models = {}
for device in device_iterator:
model = Model(device=device)
model.share_memory()
model.eval()
models[device] = model
# Initialize buffers
buffers = create_buffers(flags, device_iterator)
# Initialize queues
actor_processes = []
ctx = mp.get_context('spawn')
free_queue = {}
full_queue = {}
for device in device_iterator:
_free_queue = {'landlord': ctx.SimpleQueue(), 'landlord_up': ctx.SimpleQueue(), 'landlord_down': ctx.SimpleQueue()}
_full_queue = {'landlord': ctx.SimpleQueue(), 'landlord_up': ctx.SimpleQueue(), 'landlord_down': ctx.SimpleQueue()}
free_queue[device] = _free_queue
full_queue[device] = _full_queue
# Learner model for training
learner_model = Model(device=flags.training_device)
# Create optimizers
optimizers = create_optimizers(flags, learner_model)
# Stat Keys
stat_keys = [
'mean_episode_return_landlord',
'loss_landlord',
'mean_episode_return_landlord_up',
'loss_landlord_up',
'mean_episode_return_landlord_down',
'loss_landlord_down',
]
frames, stats = 0, {k: 0 for k in stat_keys}
position_frames = {'landlord':0, 'landlord_up':0, 'landlord_down':0}
# Load models if any
if flags.load_model and os.path.exists(checkpointpath):
checkpoint_states = torch.load(
checkpointpath, map_location=("cuda:"+str(flags.training_device) if flags.training_device != "cpu" else "cpu")
)
for k in ['landlord', 'landlord_up', 'landlord_down']:
learner_model.get_model(k).load_state_dict(checkpoint_states["model_state_dict"][k])
optimizers[k].load_state_dict(checkpoint_states["optimizer_state_dict"][k])
for device in device_iterator:
models[device].get_model(k).load_state_dict(learner_model.get_model(k).state_dict())
stats = checkpoint_states["stats"]
frames = checkpoint_states["frames"]
position_frames = checkpoint_states["position_frames"]
log.info(f"Resuming preempted job, current stats:\n{stats}")
# Starting actor processes
for device in device_iterator:
num_actors = flags.num_actors
for i in range(flags.num_actors):
actor = ctx.Process(
target=act,
args=(i, device, free_queue[device], full_queue[device], models[device], buffers[device], flags))
actor.start()
actor_processes.append(actor)
def batch_and_learn(i, device, position, local_lock, position_lock, lock=threading.Lock()):
"""Thread target for the learning process."""
nonlocal frames, position_frames, stats
while frames < flags.total_frames:
batch = get_batch(free_queue[device][position], full_queue[device][position], buffers[device][position], flags, local_lock)
_stats = learn(position, models, learner_model.get_model(position), batch,
optimizers[position], flags, position_lock)
with lock:
for k in _stats:
stats[k] = _stats[k]
to_log = dict(frames=frames)
to_log.update({k: stats[k] for k in stat_keys})
plogger.log(to_log)
frames += T * B
position_frames[position] += T * B
for device in device_iterator:
for m in range(flags.num_buffers):
free_queue[device]['landlord'].put(m)
free_queue[device]['landlord_up'].put(m)
free_queue[device]['landlord_down'].put(m)
threads = []
locks = {}
for device in device_iterator:
locks[device] = {'landlord': threading.Lock(), 'landlord_up': threading.Lock(), 'landlord_down': threading.Lock()}
position_locks = {'landlord': threading.Lock(), 'landlord_up': threading.Lock(), 'landlord_down': threading.Lock()}
for device in device_iterator:
for i in range(flags.num_threads):
for position in ['landlord', 'landlord_up', 'landlord_down']:
thread = threading.Thread(
target=batch_and_learn, name='batch-and-learn-%d' % i, args=(i,device,position,locks[device][position],position_locks[position]))
thread.start()
threads.append(thread)
def checkpoint(frames):
if flags.disable_checkpoint:
return
log.info('Saving checkpoint to %s', checkpointpath)
_models = learner_model.get_models()
torch.save({
'model_state_dict': {k: _models[k].state_dict() for k in _models},
'optimizer_state_dict': {k: optimizers[k].state_dict() for k in optimizers},
"stats": stats,
'flags': vars(flags),
'frames': frames,
'position_frames': position_frames
}, checkpointpath)
# Save the weights for evaluation purpose
for position in ['landlord', 'landlord_up', 'landlord_down']:
model_weights_dir = os.path.expandvars(os.path.expanduser(
'%s/%s/%s' % (flags.savedir, flags.xpid, position+'_weights_'+str(frames)+'.ckpt')))
torch.save(learner_model.get_model(position).state_dict(), model_weights_dir)
fps_log = []
timer = timeit.default_timer
try:
last_checkpoint_time = timer() - flags.save_interval * 60
while frames < flags.total_frames:
start_frames = frames
position_start_frames = {k: position_frames[k] for k in position_frames}
start_time = timer()
time.sleep(5)
if timer() - last_checkpoint_time > flags.save_interval * 60:
checkpoint(frames)
last_checkpoint_time = timer()
end_time = timer()
fps = (frames - start_frames) / (end_time - start_time)
fps_log.append(fps)
if len(fps_log) > 24:
fps_log = fps_log[1:]
fps_avg = np.mean(fps_log)
position_fps = {k:(position_frames[k]-position_start_frames[k])/(end_time-start_time) for k in position_frames}
log.info('After %i (L:%i U:%i D:%i) frames: @ %.1f fps (avg@ %.1f fps) (L:%.1f U:%.1f D:%.1f) Stats:\n%s',
frames,
position_frames['landlord'],
position_frames['landlord_up'],
position_frames['landlord_down'],
fps,
fps_avg,
position_fps['landlord'],
position_fps['landlord_up'],
position_fps['landlord_down'],
pprint.pformat(stats))
except KeyboardInterrupt:
return
else:
for thread in threads:
thread.join()
log.info('Learning finished after %d frames.', frames)
checkpoint(frames)
plogger.close() | This is the main funtion for training. It will first initilize everything, such as buffers, optimizers, etc. Then it will start subprocesses as actors. Then, it will call learning function with multiple threads. |
7,502 | import multiprocessing as mp
import pickle
from douzero.env.game import GameEnv
def mp_simulate(card_play_data_list, card_play_model_path_dict, q):
players = load_card_play_models(card_play_model_path_dict)
env = GameEnv(players)
for idx, card_play_data in enumerate(card_play_data_list):
env.card_play_init(card_play_data)
while not env.game_over:
env.step()
env.reset()
q.put((env.num_wins['landlord'],
env.num_wins['farmer'],
env.num_scores['landlord'],
env.num_scores['farmer']
))
def data_allocation_per_worker(card_play_data_list, num_workers):
card_play_data_list_each_worker = [[] for k in range(num_workers)]
for idx, data in enumerate(card_play_data_list):
card_play_data_list_each_worker[idx % num_workers].append(data)
return card_play_data_list_each_worker
def evaluate(landlord, landlord_up, landlord_down, eval_data, num_workers):
with open(eval_data, 'rb') as f:
card_play_data_list = pickle.load(f)
card_play_data_list_each_worker = data_allocation_per_worker(
card_play_data_list, num_workers)
del card_play_data_list
card_play_model_path_dict = {
'landlord': landlord,
'landlord_up': landlord_up,
'landlord_down': landlord_down}
num_landlord_wins = 0
num_farmer_wins = 0
num_landlord_scores = 0
num_farmer_scores = 0
ctx = mp.get_context('spawn')
q = ctx.SimpleQueue()
processes = []
for card_paly_data in card_play_data_list_each_worker:
p = ctx.Process(
target=mp_simulate,
args=(card_paly_data, card_play_model_path_dict, q))
p.start()
processes.append(p)
for p in processes:
p.join()
for i in range(num_workers):
result = q.get()
num_landlord_wins += result[0]
num_farmer_wins += result[1]
num_landlord_scores += result[2]
num_farmer_scores += result[3]
num_total_wins = num_landlord_wins + num_farmer_wins
print('WP results:')
print('landlord : Farmers - {} : {}'.format(num_landlord_wins / num_total_wins, num_farmer_wins / num_total_wins))
print('ADP results:')
print('landlord : Farmers - {} : {}'.format(num_landlord_scores / num_total_wins, 2 * num_farmer_scores / num_total_wins)) | null |
7,513 | import collections
def common_handle(moves, rival_move):
new_moves = list()
for move in moves:
if move[0] > rival_move[0]:
new_moves.append(move)
return new_moves
def filter_type_9_serial_pair(moves, rival_move):
return common_handle(moves, rival_move) | null |
7,520 | from matplotlib import pyplot as plt
import numpy as np
import os
def plot_log(csv_file, x_label=None, y_label=None, plot_max=False, label=None, fontsize=20):
log_data = np.loadtxt(csv_file, delimiter=',', skiprows=1, usecols=(1, 2))
x = log_data[:, 0]
y = log_data[:, 1]
plt.plot(x, y, label=label)
if x_label is not None:
plt.xlabel(x_label, fontsize=fontsize)
if y_label is not None:
plt.ylabel(y_label, fontsize=fontsize)
plt.grid(linestyle='-.')
if plot_max:
# 画最大值
index = y.argmax()
plt.text(x[index], y[index], '({}, {})'.format(int(x[index]), round(y[index], 3)), fontsize=fontsize // 2)
plt.scatter(x[index], y[index], marker='1', alpha=0.8, linewidths=1., c='r') | null |
7,521 | from matplotlib import pyplot as plt
import numpy as np
from spikingjelly.activation_based.examples.conv_fashion_mnist import Net
from spikingjelly import visualizing
import torch
import torch.nn as nn
import torchvision
def plot_log(csv_file, title, x_label, y_label, figsize=(12, 8), plot_max=False):
log_data = np.loadtxt(csv_file, delimiter=',', skiprows=1, usecols=(1, 2))
x = log_data[:, 0]
y = log_data[:, 1]
fig = plt.figure(figsize=figsize)
plt.plot(x, y)
plt.xlabel(x_label, fontsize=20)
plt.ylabel(y_label, fontsize=20)
plt.title(title, fontsize=20)
plt.grid(linestyle='-.')
if plot_max:
# 画最大值
index = y.argmax()
plt.text(x[index], y[index], '({}, {})'.format(int(x[index]), round(y[index], 3)), fontsize=14)
plt.scatter(x[index], y[index], marker='1', alpha=0.8, linewidths=0.1, c='r')
# plt.show() | null |
7,523 | from matplotlib import pyplot as plt
import numpy as np
def plot_log(csv_file, title, x_label, y_label, plot_max=False, label=None):
log_data = np.loadtxt(csv_file, delimiter=',', skiprows=1, usecols=(1, 2))
x = log_data[:, 0]
y = log_data[:, 1]
plt.plot(x, y, label=label)
plt.xlabel(x_label, fontsize=20)
plt.ylabel(y_label, fontsize=20)
plt.title(title, fontsize=20)
plt.grid(linestyle='-.')
if plot_max:
# 画最大值
index = y.argmax()
# plt.text(x[index], y[index], '({}, {})'.format(int(x[index]), round(y[index], 3)), fontsize=14)
plt.scatter(x[index], y[index], marker='1', alpha=0.8, linewidths=0.1, c='r')
# plt.show() | null |
7,524 | import numpy as np
from matplotlib import pyplot as plt
plt.style.use(['science'])
for i in range(T.shape[0]):
T_str.append(str(int(T[i])))
plt.title('Execution time of Running Forward with $2^{20}$ Neurons')
plt.xlabel('simulation duration T (step)')
plt.ylabel('execution time (ms)')
plt.xticks(x, T_str)
plt.legend(frameon=True)
plt.savefig('./docs/source/_static/tutorials/activation_based/11_cext_neuron_with_lbl/exe_time_f.svg')
plt.savefig('./docs/source/_static/tutorials/activation_based/11_cext_neuron_with_lbl/exe_time_f.pdf')
plt.savefig('./docs/source/_static/tutorials/activation_based/11_cext_neuron_with_lbl/exe_time_f.png')
plt.clf()
plt.title('Execution time of Running Forward and Backward with $2^{20}$ LIF Neurons')
plt.xlabel('simulation duration T (step)')
plt.ylabel('execution time (ms)')
plt.xticks(x, T_str)
plt.legend(frameon=True)
plt.savefig('./docs/source/_static/tutorials/activation_based/11_cext_neuron_with_lbl/exe_time_fb.svg')
plt.savefig('./docs/source/_static/tutorials/activation_based/11_cext_neuron_with_lbl/exe_time_fb.pdf')
plt.savefig('./docs/source/_static/tutorials/activation_based/11_cext_neuron_with_lbl/exe_time_fb.png')
def plot_bar_and_text(x, y, width, label):
plt.bar(x, y, width=width, label=label)
for i in range(x.shape[0]):
plt.text(x[i], y[i] + 0.2, str(round(y[i], 2)), ha='center') | null |
7,525 | from matplotlib import pyplot as plt
import numpy as np
from spikingjelly import visualizing
import torch
import torch.nn as nn
import torchvision
def plot_log(csv_file, title, x_label, y_label, plot_max=False, label=None):
log_data = np.loadtxt(csv_file, delimiter=',', skiprows=1, usecols=(1, 2))
x = log_data[:, 0]
y = log_data[:, 1]
plt.plot(x, y, label=label)
plt.xlabel(x_label, fontsize=20)
plt.ylabel(y_label, fontsize=20)
plt.title(title, fontsize=20)
plt.grid(linestyle='-.')
if plot_max:
# 画最大值
index = y.argmax()
# plt.text(x[index], y[index], '({}, {})'.format(int(x[index]), round(y[index], 3)), fontsize=14)
plt.scatter(x[index], y[index], marker='1', alpha=0.8, linewidths=0.1, c='r') | null |
7,526 | from matplotlib import pyplot as plt
import numpy as np
from spikingjelly import visualizing
import torch
import torch.nn as nn
import torchvision
def plot_log(csv_file, title, x_label, y_label, figsize=(12, 8), plot_max=False):
log_data = np.loadtxt(csv_file, delimiter=',', skiprows=1, usecols=(1, 2))
x = log_data[:, 0]
y = log_data[:, 1]
fig = plt.figure(figsize=figsize)
plt.plot(x, y)
plt.xlabel(x_label, fontsize=20)
plt.ylabel(y_label, fontsize=20)
plt.title(title, fontsize=20)
plt.grid(linestyle='-.')
if plot_max:
# 画最大值
index = y.argmax()
plt.text(x[index], y[index], '({}, {})'.format(int(x[index]), round(y[index], 3)), fontsize=14)
plt.scatter(x[index], y[index], marker='1', alpha=0.8, linewidths=0.1, c='r')
# plt.show() | null |
7,527 | import torch
from matplotlib import pyplot as plt
def reset_v(h, s):
return h * (1 - s) | null |
7,529 | from matplotlib import pyplot as plt
import numpy as np
def plot_log(csv_file, title, x_label, y_label, plot_max=False):
log_data = np.loadtxt(csv_file, delimiter=',', skiprows=1, usecols=(1, 2))
x = log_data[:, 0]
y = log_data[:, 1]
plt.plot(x, y)
plt.xlabel(x_label, fontsize=20)
plt.ylabel(y_label, fontsize=20)
plt.title(title, fontsize=20)
plt.grid(linestyle='-.')
if plot_max:
# 画最大值
index = y.argmax()
plt.text(x[index], y[index], '({}, {})'.format(int(x[index]), round(y[index], 3)), fontsize=14)
plt.scatter(x[index], y[index], marker='1', alpha=0.8, linewidths=2, c='r') | null |
7,530 | import requests
import os
from tqdm import tqdm
def download_url(url, dst):
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:67.0) Gecko/20100101 Firefox/67.0'
}
response = requests.get(url, headers=headers, stream=True) # (1)
file_size = int(response.headers['content-length']) # (2)
if os.path.exists(dst):
first_byte = os.path.getsize(dst) # (3)
else:
first_byte = 0
if first_byte >= file_size: # (4)
return file_size
header = {"Range": f"bytes={first_byte}-{file_size}"}
pbar = tqdm(total=file_size, initial=first_byte, unit='B', unit_scale=True, desc=dst)
req = requests.get(url, headers=header, stream=True) # (5)
with open(dst, 'ab') as f:
for chunk in req.iter_content(chunk_size=1024): # (6)
if chunk:
f.write(chunk)
pbar.update(1024)
pbar.close()
return file_size | null |
7,531 | import torch
import torchvision
import torch.nn as nn
import spikingjelly
from spikingjelly.activation_based import ann2snn
from tqdm import tqdm
from spikingjelly.activation_based.ann2snn.sample_models import mnist_cnn
import numpy as np
import matplotlib.pyplot as plt
def val(net, device, data_loader, T=None):
net.eval().to(device)
correct = 0.0
total = 0.0
if T is not None:
corrects = np.zeros(T)
with torch.no_grad():
for batch, (img, label) in enumerate(tqdm(data_loader)):
img = img.to(device)
if T is None:
out = net(img)
correct += (out.argmax(dim=1) == label.to(device)).float().sum().item()
else:
for m in net.modules():
if hasattr(m, 'reset'):
m.reset()
for t in range(T):
if t == 0:
out = net(img)
else:
out += net(img)
corrects[t] += (out.argmax(dim=1) == label.to(device)).float().sum().item()
total += out.shape[0]
return correct / total if T is None else corrects / total | null |
7,532 | import torch
import torchvision
from tqdm import tqdm
import spikingjelly.activation_based.ann2snn as ann2snn
from spikingjelly.activation_based.ann2snn.sample_models import cifar10_resnet
def val(net, device, data_loader, T=None):
net.eval().to(device)
correct = 0.0
total = 0.0
with torch.no_grad():
for batch, (img, label) in enumerate(tqdm(data_loader)):
img = img.to(device)
if T is None:
out = net(img)
else:
for m in net.modules():
if hasattr(m, 'reset'):
m.reset()
for t in range(T):
if t == 0:
out = net(img)
else:
out += net(img)
correct += (out.argmax(dim=1) == label.to(device)).float().sum().item()
total += out.shape[0]
acc = correct / total
print('Validating Accuracy: %.3f' % (acc))
return acc | null |
7,533 | import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(
in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,
stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
self.relu1 = nn.ReLU()
self.relu2 = nn.ReLU()
def forward(self, x):
out = self.relu1(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = self.relu2(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3,
stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(512*block.expansion, num_classes)
self.flatten = nn.Flatten()
self.relu = nn.ReLU()
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = self.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = self.flatten(out)
out = self.linear(out)
return out
def ResNet18():
return ResNet(BasicBlock, [2, 2, 2, 2]) | null |
7,534 | import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(
in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,
stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
self.relu1 = nn.ReLU()
self.relu2 = nn.ReLU()
def forward(self, x):
out = self.relu1(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = self.relu2(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3,
stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(512*block.expansion, num_classes)
self.flatten = nn.Flatten()
self.relu = nn.ReLU()
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = self.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = self.flatten(out)
out = self.linear(out)
return out
def ResNet34():
return ResNet(BasicBlock, [3, 4, 6, 3]) | null |
7,535 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,
stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion *
planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion*planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
self.relu1 = nn.ReLU()
self.relu2 = nn.ReLU()
self.relu3 = nn.ReLU()
def forward(self, x):
out = self.relu1(self.bn1(self.conv1(x)))
out = self.relu2(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = self.relu3(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3,
stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(512*block.expansion, num_classes)
self.flatten = nn.Flatten()
self.relu = nn.ReLU()
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = self.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = self.flatten(out)
out = self.linear(out)
return out
def ResNet50():
return ResNet(Bottleneck, [3, 4, 6, 3]) | null |
7,536 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,
stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion *
planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion*planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
self.relu1 = nn.ReLU()
self.relu2 = nn.ReLU()
self.relu3 = nn.ReLU()
def forward(self, x):
out = self.relu1(self.bn1(self.conv1(x)))
out = self.relu2(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = self.relu3(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3,
stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(512*block.expansion, num_classes)
self.flatten = nn.Flatten()
self.relu = nn.ReLU()
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = self.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = self.flatten(out)
out = self.linear(out)
return out
def ResNet101():
return ResNet(Bottleneck, [3, 4, 23, 3]) | null |
7,537 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,
stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion *
planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion*planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
self.relu1 = nn.ReLU()
self.relu2 = nn.ReLU()
self.relu3 = nn.ReLU()
def forward(self, x):
out = self.relu1(self.bn1(self.conv1(x)))
out = self.relu2(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = self.relu3(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3,
stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(512*block.expansion, num_classes)
self.flatten = nn.Flatten()
self.relu = nn.ReLU()
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = self.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = self.flatten(out)
out = self.linear(out)
return out
def ResNet152():
return ResNet(Bottleneck, [3, 8, 36, 3]) | null |
7,538 | import torch
import torch.nn as nn
import torch.nn.functional as F
import logging
from typing import Union, Callable
from . import neuron, base, surrogate
def step_quantize_forward(x: torch.Tensor, step: float):
x = x / step
torch.round_(x)
return x * step | null |
7,539 | import torch
import torch.nn as nn
import torch.nn.functional as F
import logging
from typing import Union, Callable
from . import neuron, base, surrogate
def step_quantize(x: torch.Tensor, step: float = 1.):
"""
:param x: a float tensor whose range is ``0 <= x <= 1``.
:type x: torch.Tensor
:param step: the quantization step
:type step: float
:return: ``y = round(x / step) * step``
:rtype: torch.Tensor
The step quantizer defined in `Lava`.
Denote ``k`` as an ``int``, ``x[i]`` will be quantized to the nearest ``k * step``.
"""
return step_quantize_atgf.apply(x, step)
try:
import lava.lib.dl.slayer as slayer
# ----------------------------------------
# data reshape function
# ----------------------------------------
# quantize function
def step_quantize(x: torch.Tensor, step: float = 1.):
"""
:param x: the input tensor
:type x: torch.Tensor
:param step: the quantize step
:type step: float
:return: quantized tensor
:rtype: torch.Tensor
The step quantize function. Here is an example:
.. code-block:: python
# plt.style.use(['science', 'muted', 'grid'])
fig = plt.figure(dpi=200, figsize=(6, 4))
x = torch.arange(-4, 4, 0.001)
plt.plot(x, lava_exchange.step_quantize(x, 2.), label='quantize(x, step=2)')
plt.plot(x, x, label='y=x', ls='-.')
plt.legend()
plt.grid(ls='--')
plt.title('step quantize')
plt.xlabel('Input')
plt.ylabel('Output')
plt.savefig('./docs/source/_static/API/activation_based/lava_exchange/step_quantize.svg')
plt.savefig('./docs/source/_static/API/activation_based/lava_exchange/step_quantize.pdf')
.. image:: ../_static/API/activation_based/lava_exchange/step_quantize.*
:width: 100%
"""
return _step_quantize.apply(x, step)
# ----------------------------------------
# convert function
except BaseException as e:
logging.info(f'spikingjelly.activation_based.lava_exchange: {e}')
slayer = None
The provided code snippet includes necessary dependencies for implementing the `quantize_8b` function. Write a Python function `def quantize_8b(x, scale, descale=False)` to solve the following problem:
Denote ``k`` as an ``int``, ``x[i]`` will be quantized to the nearest ``2 * k / scale``, \ and ``k = {-128, -127, ..., 126, 127}``.
Here is the function:
def quantize_8b(x, scale, descale=False):
"""
Denote ``k`` as an ``int``, ``x[i]`` will be quantized to the nearest ``2 * k / scale``, \
and ``k = {-128, -127, ..., 126, 127}``.
"""
if not descale:
return step_quantize(x, step=2 / scale).clamp(-256 / scale, 255 / scale)
else:
return step_quantize(x, step=2 / scale).clamp(-256 / scale, 255 / scale) * scale | Denote ``k`` as an ``int``, ``x[i]`` will be quantized to the nearest ``2 * k / scale``, \ and ``k = {-128, -127, ..., 126, 127}``. |
7,540 | import torch
import torch.nn as nn
import torch.nn.functional as F
import logging
from typing import Union, Callable
from . import neuron, base, surrogate
def right_shift_to_zero(x: torch.Tensor, bits: int):
def _listep_forward(x: torch.Tensor, decay: torch.Tensor, state: torch.Tensor, w_scale: int,
dtype: torch.dtype = torch.int32, hw_bits: int = 12):
# y = (state * w_scale * ((1 << hw_bits) - decay) / (1 << hw_bits) + w_scale * x) / w_scale
# y = state * (1 - decay / (1 << hw_bits)) + x
scaled_state = (state * w_scale).to(dtype=dtype)
decay_int = (1 << hw_bits) - decay.to(dtype=dtype)
output = right_shift_to_zero(scaled_state * decay_int, hw_bits) + (w_scale * x).to(dtype=dtype)
return output / w_scale | null |
7,541 | import torch
import torch.nn as nn
import torch.nn.functional as F
import logging
from typing import Union, Callable
from . import neuron, base, surrogate
def _listep_backward(grad_output: torch.Tensor, decay: torch.Tensor, state: torch.Tensor, hw_bits: int = 12):
grad_state = (1 - decay / (1 << hw_bits)) * grad_output
grad_decay = - state / (1 << hw_bits) * grad_output
grad_decay = grad_decay.sum()
return grad_output, grad_decay, grad_state
# x, decay, state | null |
7,542 | import torch
import torch.nn as nn
import torch.nn.functional as F
import logging
from typing import Union, Callable
from . import neuron, base, surrogate
def TNX_to_NXT(x_seq: torch.Tensor):
# x_seq.shape = [T, N, *]
permute_args = list(range(1, x_seq.dim()))
permute_args.append(0)
return x_seq.permute(permute_args) | null |
7,543 | import torch
import torch.nn as nn
import torch.nn.functional as F
import logging
from typing import Union, Callable
from . import neuron, base, surrogate
def NXT_to_TNX(x_seq: torch.Tensor):
# x_seq.shape = [N, *, T]
permute_args = list(range(x_seq.dim() - 1))
permute_args.insert(0, x_seq.dim() - 1)
return x_seq.permute(permute_args) | null |
7,544 | import torch
import torch.nn as nn
import torch.nn.functional as F
import logging
from typing import Union, Callable
from . import neuron, base, surrogate
def lava_neuron_forward(lava_neuron: nn.Module, x_seq: torch.Tensor, v: torch.Tensor or float):
# x_seq.shape = [T, N, *]
# lave uses shape = [*, T], while SJ uses shape = [T, *]
unsqueeze_flag = False
if x_seq.dim() == 2:
x_seq = x_seq.unsqueeze(1)
# lave needs input with shape [N, ... ,T]
unsqueeze_flag = True
if isinstance(v, float):
v_init = v
v = torch.zeros_like(x_seq[0])
if v_init != 0.:
torch.fill_(v, v_init)
x_seq_shape = x_seq.shape
x_seq = x_seq.flatten(2).permute(1, 2, 0)
# [T, N, *] -> [N, *, T]
lava_neuron.voltage_state = v
spike = lava_neuron(x_seq).permute(2, 0, 1)
v = lava_neuron.voltage_state.reshape(x_seq_shape[1:])
spike = spike.reshape(x_seq_shape)
if unsqueeze_flag:
v = v.squeeze(1)
spike = spike.squeeze(1)
return spike, v | null |
7,545 | import torch
import torch.nn as nn
import torch.nn.functional as F
import logging
from typing import Union, Callable
from . import neuron, base, surrogate
def step_quantize(x: torch.Tensor, step: float = 1.):
"""
:param x: a float tensor whose range is ``0 <= x <= 1``.
:type x: torch.Tensor
:param step: the quantization step
:type step: float
:return: ``y = round(x / step) * step``
:rtype: torch.Tensor
The step quantizer defined in `Lava`.
Denote ``k`` as an ``int``, ``x[i]`` will be quantized to the nearest ``k * step``.
"""
return step_quantize_atgf.apply(x, step)
try:
import lava.lib.dl.slayer as slayer
# ----------------------------------------
# data reshape function
# ----------------------------------------
# quantize function
def step_quantize(x: torch.Tensor, step: float = 1.):
"""
:param x: the input tensor
:type x: torch.Tensor
:param step: the quantize step
:type step: float
:return: quantized tensor
:rtype: torch.Tensor
The step quantize function. Here is an example:
.. code-block:: python
# plt.style.use(['science', 'muted', 'grid'])
fig = plt.figure(dpi=200, figsize=(6, 4))
x = torch.arange(-4, 4, 0.001)
plt.plot(x, lava_exchange.step_quantize(x, 2.), label='quantize(x, step=2)')
plt.plot(x, x, label='y=x', ls='-.')
plt.legend()
plt.grid(ls='--')
plt.title('step quantize')
plt.xlabel('Input')
plt.ylabel('Output')
plt.savefig('./docs/source/_static/API/activation_based/lava_exchange/step_quantize.svg')
plt.savefig('./docs/source/_static/API/activation_based/lava_exchange/step_quantize.pdf')
.. image:: ../_static/API/activation_based/lava_exchange/step_quantize.*
:width: 100%
"""
return _step_quantize.apply(x, step)
# ----------------------------------------
# convert function
except BaseException as e:
logging.info(f'spikingjelly.activation_based.lava_exchange: {e}')
slayer = None
def quantize_8bit(x: torch.Tensor, scale, descale=False):
if descale:
return step_quantize(x, 2. / scale).clamp(-256. / scale, 255. / scale) * scale
else:
return step_quantize(x, 2. / scale).clamp(-256. / scale, 255. / scale) | null |
7,546 | import torch
import torch.nn as nn
import torch.nn.functional as F
import logging
from typing import Union, Callable
from . import neuron, base, surrogate
try:
import lava.lib.dl.slayer as slayer
# ----------------------------------------
# data reshape function
# ----------------------------------------
# quantize function
# ----------------------------------------
# convert function
def to_lava_neuron_param_dict(sj_ms_neuron: nn.Module):
if isinstance(sj_ms_neuron, neuron.IFNode):
if sj_ms_neuron.v_reset != 0.:
raise ValueError('lava only supports for v_reset == 0!')
return {
'threshold': sj_ms_neuron.v_threshold,
'current_decay': 1.,
'voltage_decay': 0.,
'tau_grad': 1, 'scale_grad': 1, 'scale': sj_ms_neuron.lava_s_cale,
'norm': None, 'dropout': None,
'shared_param': True, 'persistent_state': True, 'requires_grad': False,
'graded_spike': False
}
elif isinstance(sj_ms_neuron, neuron.LIFNode):
if sj_ms_neuron.v_reset != 0.:
raise ValueError('lava only supports for v_reset == 0!')
if sj_ms_neuron.decay_input:
raise ValueError('lava only supports for decay_input == False!')
return {
'threshold': sj_ms_neuron.v_threshold,
'current_decay': 1.,
'voltage_decay': 1. / sj_ms_neuron.tau,
'tau_grad': 1, 'scale_grad': 1, 'scale': sj_ms_neuron.lava_s_cale,
'norm': None, 'dropout': None,
'shared_param': True, 'persistent_state': True, 'requires_grad': False,
'graded_spike': False
}
else:
raise NotImplementedError(sj_ms_neuron)
except BaseException as e:
logging.info(f'spikingjelly.activation_based.lava_exchange: {e}')
slayer = None
def to_lava_neuron(sj_ms_neuron: nn.Module):
if isinstance(sj_ms_neuron, (neuron.IFNode, neuron.LIFNode)):
return slayer.neuron.cuba.Neuron(
**to_lava_neuron_param_dict(sj_ms_neuron)
)
else:
raise NotImplementedError(sj_ms_neuron) | null |
7,547 | import torch
import torch.nn as nn
import torch.nn.functional as F
import logging
from typing import Union, Callable
from . import neuron, base, surrogate
try:
import lava.lib.dl.slayer as slayer
# ----------------------------------------
# data reshape function
# ----------------------------------------
# quantize function
# ----------------------------------------
# convert function
def check_instance(m, instance):
if not isinstance(m, instance):
raise ValueError(
f'expected {m} with type {instance}, but got {m} with type {type(m)}!')
def check_no_bias(m):
if m.bias is not None:
raise ValueError(f'lava does not support for {type(m)} with bias!')
except BaseException as e:
logging.info(f'spikingjelly.activation_based.lava_exchange: {e}')
slayer = None
The provided code snippet includes necessary dependencies for implementing the `linear_to_lava_synapse_dense` function. Write a Python function `def linear_to_lava_synapse_dense(fc: nn.Linear)` to solve the following problem:
:param fc: a pytorch linear layer without bias :type fc: nn.Linear :return: a lava slayer dense synapse :rtype: slayer.synapse.Dense Codes example: .. code-block:: python T = 4 N = 2 layer_nn = nn.Linear(8, 4, bias=False) layer_sl = lava_exchange.linear_to_lava_synapse_dense(layer_nn) x_seq = torch.rand([T, N, 8]) with torch.no_grad(): y_nn = functional.seq_to_ann_forward(x_seq, layer_nn) y_sl = lava_exchange.NXT_to_TNX(layer_sl(lava_exchange.TNX_to_NXT(x_seq))) print('max error:', (y_nn - y_sl).abs().max())
Here is the function:
def linear_to_lava_synapse_dense(fc: nn.Linear):
"""
:param fc: a pytorch linear layer without bias
:type fc: nn.Linear
:return: a lava slayer dense synapse
:rtype: slayer.synapse.Dense
Codes example:
.. code-block:: python
T = 4
N = 2
layer_nn = nn.Linear(8, 4, bias=False)
layer_sl = lava_exchange.linear_to_lava_synapse_dense(layer_nn)
x_seq = torch.rand([T, N, 8])
with torch.no_grad():
y_nn = functional.seq_to_ann_forward(x_seq, layer_nn)
y_sl = lava_exchange.NXT_to_TNX(layer_sl(lava_exchange.TNX_to_NXT(x_seq)))
print('max error:', (y_nn - y_sl).abs().max())
"""
check_instance(fc, nn.Linear)
check_no_bias(fc)
slayer_dense = slayer.synapse.Dense(fc.in_features, fc.out_features)
# `slayer_dense` is a `torch.torch.nn.Conv3d`. Its weight has shape [out_features, in_features, 1, 1, 1]
slayer_dense.weight.data[:, :, 0, 0, 0] = fc.weight.data.clone()
return slayer_dense | :param fc: a pytorch linear layer without bias :type fc: nn.Linear :return: a lava slayer dense synapse :rtype: slayer.synapse.Dense Codes example: .. code-block:: python T = 4 N = 2 layer_nn = nn.Linear(8, 4, bias=False) layer_sl = lava_exchange.linear_to_lava_synapse_dense(layer_nn) x_seq = torch.rand([T, N, 8]) with torch.no_grad(): y_nn = functional.seq_to_ann_forward(x_seq, layer_nn) y_sl = lava_exchange.NXT_to_TNX(layer_sl(lava_exchange.TNX_to_NXT(x_seq))) print('max error:', (y_nn - y_sl).abs().max()) |
7,548 | import torch
import torch.nn as nn
import torch.nn.functional as F
import logging
from typing import Union, Callable
from . import neuron, base, surrogate
try:
import lava.lib.dl.slayer as slayer
# ----------------------------------------
# data reshape function
# ----------------------------------------
# quantize function
# ----------------------------------------
# convert function
def check_instance(m, instance):
if not isinstance(m, instance):
raise ValueError(
f'expected {m} with type {instance}, but got {m} with type {type(m)}!')
def check_no_bias(m):
if m.bias is not None:
raise ValueError(f'lava does not support for {type(m)} with bias!')
except BaseException as e:
logging.info(f'spikingjelly.activation_based.lava_exchange: {e}')
slayer = None
The provided code snippet includes necessary dependencies for implementing the `conv2d_to_lava_synapse_conv` function. Write a Python function `def conv2d_to_lava_synapse_conv(conv2d_nn: nn.Conv2d)` to solve the following problem:
:param conv2d_nn: a pytorch conv2d layer without bias :type conv2d_nn: nn.Conv2d :return: a lava slayer conv synapse :rtype: slayer.synapse.Conv Codes example: .. code-block:: python T = 4 N = 2 layer_nn = nn.Conv2d(3, 8, kernel_size=3, stride=1, padding=1, bias=False) layer_sl = lava_exchange.conv2d_to_lava_synapse_conv(layer_nn) x_seq = torch.rand([T, N, 3, 28, 28]) with torch.no_grad(): y_nn = functional.seq_to_ann_forward(x_seq, layer_nn) y_sl = lava_exchange.NXT_to_TNX(layer_sl(lava_exchange.TNX_to_NXT(x_seq))) print('max error:', (y_nn - y_sl).abs().max())
Here is the function:
def conv2d_to_lava_synapse_conv(conv2d_nn: nn.Conv2d):
"""
:param conv2d_nn: a pytorch conv2d layer without bias
:type conv2d_nn: nn.Conv2d
:return: a lava slayer conv synapse
:rtype: slayer.synapse.Conv
Codes example:
.. code-block:: python
T = 4
N = 2
layer_nn = nn.Conv2d(3, 8, kernel_size=3, stride=1, padding=1, bias=False)
layer_sl = lava_exchange.conv2d_to_lava_synapse_conv(layer_nn)
x_seq = torch.rand([T, N, 3, 28, 28])
with torch.no_grad():
y_nn = functional.seq_to_ann_forward(x_seq, layer_nn)
y_sl = lava_exchange.NXT_to_TNX(layer_sl(lava_exchange.TNX_to_NXT(x_seq)))
print('max error:', (y_nn - y_sl).abs().max())
"""
check_instance(conv2d_nn, nn.Conv2d)
check_no_bias(conv2d_nn)
slayer_conv = slayer.synapse.Conv(in_features=conv2d_nn.in_channels, out_features=conv2d_nn.out_channels,
kernel_size=conv2d_nn.kernel_size, stride=conv2d_nn.stride,
padding=conv2d_nn.padding, dilation=conv2d_nn.dilation,
groups=conv2d_nn.groups)
# `slayer_conv` is a `torch.torch.nn.Conv3d`.
slayer_conv.weight.data[:, :, :, :, 0] = conv2d_nn.weight.data.clone()
return slayer_conv | :param conv2d_nn: a pytorch conv2d layer without bias :type conv2d_nn: nn.Conv2d :return: a lava slayer conv synapse :rtype: slayer.synapse.Conv Codes example: .. code-block:: python T = 4 N = 2 layer_nn = nn.Conv2d(3, 8, kernel_size=3, stride=1, padding=1, bias=False) layer_sl = lava_exchange.conv2d_to_lava_synapse_conv(layer_nn) x_seq = torch.rand([T, N, 3, 28, 28]) with torch.no_grad(): y_nn = functional.seq_to_ann_forward(x_seq, layer_nn) y_sl = lava_exchange.NXT_to_TNX(layer_sl(lava_exchange.TNX_to_NXT(x_seq))) print('max error:', (y_nn - y_sl).abs().max()) |
7,549 | import torch
import torch.nn as nn
import torch.nn.functional as F
import logging
from typing import Union, Callable
from . import neuron, base, surrogate
try:
import lava.lib.dl.slayer as slayer
# ----------------------------------------
# data reshape function
# ----------------------------------------
# quantize function
# ----------------------------------------
# convert function
def check_instance(m, instance):
if not isinstance(m, instance):
raise ValueError(
f'expected {m} with type {instance}, but got {m} with type {type(m)}!')
except BaseException as e:
logging.info(f'spikingjelly.activation_based.lava_exchange: {e}')
slayer = None
The provided code snippet includes necessary dependencies for implementing the `avgpool2d_to_lava_synapse_pool` function. Write a Python function `def avgpool2d_to_lava_synapse_pool(pool2d_nn: nn.AvgPool2d)` to solve the following problem:
:param pool2d_nn: a pytorch AvgPool2d layer :type pool2d_nn: nn.AvgPool2d :return: a lava slayer pool layer :rtype: slayer.synapse.Pool .. admonition:: Warning :class: warning The lava slayer pool layer applies sum pooling, rather than average pooling. .. code-block:: python T = 4 N = 2 layer_nn = nn.AvgPool2d(kernel_size=2, stride=2) layer_sl = lava_exchange.avgpool2d_to_lava_synapse_pool(layer_nn) x_seq = torch.rand([T, N, 3, 28, 28]) with torch.no_grad(): y_nn = functional.seq_to_ann_forward(x_seq, layer_nn) y_sl = lava_exchange.NXT_to_TNX(layer_sl(lava_exchange.TNX_to_NXT(x_seq))) / 4. print('max error:', (y_nn - y_sl).abs().max())
Here is the function:
def avgpool2d_to_lava_synapse_pool(pool2d_nn: nn.AvgPool2d):
"""
:param pool2d_nn: a pytorch AvgPool2d layer
:type pool2d_nn: nn.AvgPool2d
:return: a lava slayer pool layer
:rtype: slayer.synapse.Pool
.. admonition:: Warning
:class: warning
The lava slayer pool layer applies sum pooling, rather than average pooling.
.. code-block:: python
T = 4
N = 2
layer_nn = nn.AvgPool2d(kernel_size=2, stride=2)
layer_sl = lava_exchange.avgpool2d_to_lava_synapse_pool(layer_nn)
x_seq = torch.rand([T, N, 3, 28, 28])
with torch.no_grad():
y_nn = functional.seq_to_ann_forward(x_seq, layer_nn)
y_sl = lava_exchange.NXT_to_TNX(layer_sl(lava_exchange.TNX_to_NXT(x_seq))) / 4.
print('max error:', (y_nn - y_sl).abs().max())
"""
check_instance(pool2d_nn, nn.AvgPool2d)
logging.warning(
'The lava slayer pool layer applies sum pooling, rather than average pooling. `avgpool2d_to_lava_synapse_pool` will return a sum pooling layer.')
return slayer.synapse.Pool(pool2d_nn.kernel_size, pool2d_nn.stride, pool2d_nn.padding) | :param pool2d_nn: a pytorch AvgPool2d layer :type pool2d_nn: nn.AvgPool2d :return: a lava slayer pool layer :rtype: slayer.synapse.Pool .. admonition:: Warning :class: warning The lava slayer pool layer applies sum pooling, rather than average pooling. .. code-block:: python T = 4 N = 2 layer_nn = nn.AvgPool2d(kernel_size=2, stride=2) layer_sl = lava_exchange.avgpool2d_to_lava_synapse_pool(layer_nn) x_seq = torch.rand([T, N, 3, 28, 28]) with torch.no_grad(): y_nn = functional.seq_to_ann_forward(x_seq, layer_nn) y_sl = lava_exchange.NXT_to_TNX(layer_sl(lava_exchange.TNX_to_NXT(x_seq))) / 4. print('max error:', (y_nn - y_sl).abs().max()) |
7,550 | import torch
import torch.nn as nn
import torch.nn.functional as F
import logging
from typing import Union, Callable
from . import neuron, base, surrogate
try:
import lava.lib.dl.slayer as slayer
# ----------------------------------------
# data reshape function
# ----------------------------------------
# quantize function
# ----------------------------------------
# convert function
def to_lava_block_dense(fc: nn.Linear, sj_ms_neuron: nn.Module, quantize_to_8bit: bool = True):
check_instance(fc, nn.Linear)
check_no_bias(fc)
neuron_params = to_lava_neuron_param_dict(sj_ms_neuron)
if isinstance(sj_ms_neuron, (neuron.IFNode, neuron.LIFNode)):
block_init = slayer.block.cuba.Dense
else:
raise NotImplementedError(sj_ms_neuron)
if quantize_to_8bit:
# if 'pre_hook_fx' not in kwargs.keys(), then `pre_hook_fx` will be set to `quantize_8bit` by default
lava_block = block_init(neuron_params, fc.in_features, fc.out_features, delay_shift=False)
else:
lava_block = block_init(neuron_params, fc.in_features, fc.out_features, delay_shift=False, pre_hook_fx=None)
lava_block.synapse.weight.data[:, :, 0, 0, 0] = fc.weight.data.clone()
return lava_block
def to_lava_block_conv(conv2d_nn: nn.Conv2d, sj_ms_neuron: nn.Module, quantize_to_8bit: bool = True):
check_instance(conv2d_nn, nn.Conv2d)
check_no_bias(conv2d_nn)
neuron_params = to_lava_neuron_param_dict(sj_ms_neuron)
if isinstance(sj_ms_neuron, (neuron.IFNode, neuron.LIFNode)):
block_init = slayer.block.cuba.Conv
else:
raise NotImplementedError(sj_ms_neuron)
if quantize_to_8bit:
# if 'pre_hook_fx' not in kwargs.keys(), then `pre_hook_fx` will be set to `quantize_8bit` by default
lava_block = block_init(neuron_params, in_features=conv2d_nn.in_channels,
out_features=conv2d_nn.out_channels, kernel_size=conv2d_nn.kernel_size,
stride=conv2d_nn.stride, padding=conv2d_nn.padding, dilation=conv2d_nn.dilation,
groups=conv2d_nn.groups, delay_shift=False)
else:
lava_block = block_init(neuron_params, in_features=conv2d_nn.in_channels,
out_features=conv2d_nn.out_channels, kernel_size=conv2d_nn.kernel_size,
stride=conv2d_nn.stride, padding=conv2d_nn.padding, dilation=conv2d_nn.dilation,
groups=conv2d_nn.groups, delay_shift=False, pre_hook_fx=None)
lava_block.synapse.weight.data[:, :, :, :, 0] = conv2d_nn.weight.data.clone()
return lava_block
def to_lava_block_pool(pool2d_nn: nn.AvgPool2d, sj_ms_neuron: nn.Module, quantize_to_8bit: bool = True):
check_instance(pool2d_nn, nn.AvgPool2d)
neuron_params = to_lava_neuron_param_dict(sj_ms_neuron)
if isinstance(sj_ms_neuron, (neuron.IFNode, neuron.LIFNode)):
block_init = slayer.block.cuba.Pool
else:
raise NotImplementedError(sj_ms_neuron)
if quantize_to_8bit:
# if 'pre_hook_fx' not in kwargs.keys(), then `pre_hook_fx` will be set to `quantize_8bit` by default
lava_block = block_init(neuron_params, pool2d_nn.kernel_size, pool2d_nn.stride, pool2d_nn.padding,
delay_shift=False)
else:
lava_block = block_init(neuron_params, pool2d_nn.kernel_size, pool2d_nn.stride, pool2d_nn.padding,
delay_shift=False, pre_hook_fx=None)
logging.warning(
'The lava slayer pool layer applies sum pooling, rather than average pooling. `avgpool2d_to_lava_synapse_pool` will return a sum pooling layer.')
return lava_block
def to_lava_block_flatten(flatten_nn: nn.Flatten):
check_instance(flatten_nn, nn.Flatten)
if flatten_nn.start_dim != 1:
raise ValueError('lava only supports for flatten_nn.start_dim == 1!')
return slayer.block.cuba.Flatten()
except BaseException as e:
logging.info(f'spikingjelly.activation_based.lava_exchange: {e}')
slayer = None
The provided code snippet includes necessary dependencies for implementing the `to_lava_blocks` function. Write a Python function `def to_lava_blocks(net: list or tuple or nn.Sequential)` to solve the following problem:
Supported layer types input : {shape, type} flatten: {shape, type} average: {shape, type} concat : {shape, type, layers} dense : {shape, type, neuron, inFeatures, outFeatures, weight, delay(if available)} pool : {shape, type, neuron, kernelSize, stride, padding, dilation, weight} conv : {shape, type, neuron, inChannels, outChannels, kernelSize, stride, | padding, dilation, groups, weight, delay(if available)} | |-> this is the description of the compartment parameters |-> {iDecay, vDecay, vThMant, refDelay, ... (other additional params)}
Here is the function:
def to_lava_blocks(net: list or tuple or nn.Sequential):
# https://lava-nc.org/lava-lib-dl/netx/netx.html
'''
Supported layer types
input : {shape, type}
flatten: {shape, type}
average: {shape, type}
concat : {shape, type, layers}
dense : {shape, type, neuron, inFeatures, outFeatures, weight, delay(if available)}
pool : {shape, type, neuron, kernelSize, stride, padding, dilation, weight}
conv : {shape, type, neuron, inChannels, outChannels, kernelSize, stride,
| padding, dilation, groups, weight, delay(if available)}
|
|-> this is the description of the compartment parameters
|-> {iDecay, vDecay, vThMant, refDelay, ... (other additional params)}
'''
blocks = []
length = net.__len__()
i = 0
k = None
while True:
if isinstance(net[i], nn.Linear):
if k is not None:
if isinstance(net[i], (nn.Conv2d, nn.Linear)):
net[i].weight.data /= k
else:
raise NotImplementedError(type(net[i]))
k = None
if i + 1 < length and isinstance(net[i + 1], (neuron.IFNode, neuron.LIFNode)):
blocks.append(to_lava_block_dense(net[i], net[i + 1]))
i += 2
else:
raise ValueError(type(net[i]))
elif isinstance(net[i], nn.Conv2d):
if i + 1 < length and isinstance(net[i + 1], (neuron.IFNode, neuron.LIFNode)):
blocks.append(to_lava_block_conv(net[i], net[i + 1]))
i += 2
else:
raise ValueError(type(net[i]))
elif isinstance(net[i], nn.AvgPool2d):
if i + 1 < length and isinstance(net[i + 1], (neuron.IFNode, neuron.LIFNode)):
blocks.append(to_lava_block_pool(net[i], net[i + 1]))
i += 2
if isinstance(net[i].kernel_size, int):
k = float(net[i].kernel_size * net[i].kernel_size)
else:
k = float(net[i].kernel_size[0] * net[i].kernel_size[1])
else:
raise ValueError(type(net[i]))
elif isinstance(net[i], nn.Flatten):
blocks.append(to_lava_block_flatten(net[i]))
i += 1
else:
raise ValueError(type(net[i]))
if i == length:
break
return blocks | Supported layer types input : {shape, type} flatten: {shape, type} average: {shape, type} concat : {shape, type, layers} dense : {shape, type, neuron, inFeatures, outFeatures, weight, delay(if available)} pool : {shape, type, neuron, kernelSize, stride, padding, dilation, weight} conv : {shape, type, neuron, inChannels, outChannels, kernelSize, stride, | padding, dilation, groups, weight, delay(if available)} | |-> this is the description of the compartment parameters |-> {iDecay, vDecay, vThMant, refDelay, ... (other additional params)} |
7,551 | import copy
import os
from typing import Dict
import torch
import torch.nn as nn
import torch.nn.functional as F
import logging
import numpy as np
from . import neuron, functional, layer
def to_lynxi_supported_module(m_in: nn.Module, T: int):
def to_lynxi_supported_modules(net: list or tuple or nn.Sequential, T: int):
output_net = []
for i in range(net.__len__()):
m_in = net[i]
m_out = to_lynxi_supported_module(m_in, T)
output_net.append(m_out)
return output_net | null |
7,552 | import copy
import os
from typing import Dict
import torch
import torch.nn as nn
import torch.nn.functional as F
import logging
import numpy as np
from . import neuron, functional, layer
def torch_tensor_to_lynxi(x: torch.Tensor, device_id: int = 0, to_apu: bool = True):
x_size_in_byte = x.element_size() * x.numel()
x = x.cpu().detach().numpy()
x = lynpy.Tensor(dev_id=device_id, size=x_size_in_byte).from_numpy(x)
if to_apu:
x = x.apu()
return x | null |
7,553 | import copy
import os
from typing import Dict
import torch
import torch.nn as nn
import torch.nn.functional as F
import logging
import numpy as np
from . import neuron, functional, layer
def lynxi_tensor_to_torch(x: lynpy.Tensor, shape: tuple or list = None, dtype: str = None):
if shape is not None and dtype is not None:
x = x.view_as(shape, dtype)
if x.devptr is not None:
x = x.cpu()
x = torch.from_numpy(x.numpy())
return x | null |
7,554 | import copy
import os
from typing import Dict
import torch
import torch.nn as nn
import torch.nn.functional as F
import logging
import numpy as np
from . import neuron, functional, layer
try:
'''
适配灵汐科技的芯片
'''
import lyngor
import lynpy
logging.info(f'lynpy.version={lynpy.version}')
logging.info(f'lyngor.version={lyngor.version}')
except BaseException as e:
logging.info(f'spikingjelly.activation_based.lynxi_exchange: {e}')
def compile_lynxi_model(output_dir: str, net: nn.Module, in_data_type: str = 'float32', out_data_type: str = 'float32', input_shape_dict : Dict = {}):
model = lyngor.DLModel()
model.load(net, model_type='Pytorch', in_type=in_data_type, out_type=out_data_type,
inputs_dict=input_shape_dict)
offline_builder = lyngor.Builder(target='apu', is_map=True)
out_path = offline_builder.build(model.graph, model.params,
out_path=output_dir, apu_only=True)
print(os.listdir(out_path))
return os.path.join(out_path, 'Net_0') | null |
7,555 | import copy
import os
from typing import Dict
import torch
import torch.nn as nn
import torch.nn.functional as F
import logging
import numpy as np
from . import neuron, functional, layer
def load_lynxi_model(device_id: int, model_path: str):
return lynpy.Model(dev_id=device_id, path=model_path) | null |
7,556 | import gym
import math
import random
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.distributions import Normal
from torch.utils.tensorboard import SummaryWriter
from spikingjelly.activation_based.examples.common.multiprocessing_env import SubprocVecEnv
from spikingjelly.activation_based import neuron, functional, layer
seed = 1
def make_env():
def _thunk():
env = gym.make(env_name)
env.seed(seed)
return env
return _thunk | null |
7,557 | import gym
import math
import random
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.distributions import Normal
from torch.utils.tensorboard import SummaryWriter
from spikingjelly.activation_based.examples.common.multiprocessing_env import SubprocVecEnv
from spikingjelly.activation_based import neuron, functional, layer
device = torch.device('cuda' if use_cuda else 'cpu')
torch.cuda.manual_seed(seed)
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
def test_env(vis=False):
state = env.reset()
if vis: env.render()
done = False
total_reward = 0
while not done:
state = torch.FloatTensor(state).unsqueeze(0).to(device)
dist, _ = model(state)
functional.reset_net(model)
next_state, reward, done, _ = env.step(torch.max(dist.sample(), 1)[1].cpu().numpy()[0])
state = next_state
if vis: env.render()
total_reward += reward
return total_reward | null |
7,558 | import gym
import math
import random
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.distributions import Normal
from torch.utils.tensorboard import SummaryWriter
from spikingjelly.activation_based.examples.common.multiprocessing_env import SubprocVecEnv
from spikingjelly.activation_based import neuron, functional, layer
def compute_gae(next_value, rewards, masks, values, gamma=0.99, tau=0.95):
values = values + [next_value]
gae = 0
returns = []
for step in reversed(range(len(rewards))):
delta = rewards[step] + gamma * values[step + 1] * masks[step] - values[step]
gae = delta + gamma * tau * masks[step] * gae
returns.insert(0, gae + values[step])
return returns | null |
7,559 | import gym
import math
import random
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.distributions import Normal
from torch.utils.tensorboard import SummaryWriter
from spikingjelly.activation_based.examples.common.multiprocessing_env import SubprocVecEnv
from spikingjelly.activation_based import neuron, functional, layer
torch.cuda.manual_seed(seed)
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
if __name__ == '__main__':
# Create Environments
num_envs = 4
env_name = 'CartPole-v0'
envs = [make_env() for i in range(num_envs)]
envs = SubprocVecEnv(envs)
env = gym.make(env_name)
env.seed(seed)
# Neural Network
# GAE
# Proximal Policy Optimization Algorithm
# Arxiv: "https://arxiv.org/abs/1707.06347"
def ppo_iter(mini_batch_size, states, actions, log_probs, returns, advantage):
batch_size = states.size(0)
ids = np.random.permutation(batch_size)
ids = np.split(ids[:batch_size // mini_batch_size * mini_batch_size], batch_size // mini_batch_size)
for i in range(len(ids)):
yield states[ids[i], :], actions[ids[i], :], log_probs[ids[i], :], returns[ids[i], :], advantage[ids[i], :]
num_inputs = envs.observation_space.shape[0]
num_outputs = env.action_space.n
print('State Num: %d, Action Num: %d' % (num_inputs, num_outputs))
# Hyper params:
hidden_size = 32
lr = 1e-3
num_steps = 128
mini_batch_size = 256
ppo_epochs = 30
T = 16
model = ActorCritic(num_inputs, num_outputs, hidden_size, T).to(device)
optimizer = optim.Adam(model.parameters(), lr=lr)
max_steps = 10000
step_idx = 0
state = envs.reset()
writer = SummaryWriter(logdir='./log')
while step_idx < max_steps:
log_probs = []
values = []
states = []
actions = []
rewards = []
masks = []
entropy = 0
for _ in range(num_steps):
state = torch.FloatTensor(state).to(device)
dist, value = model(state)
functional.reset_net(model)
action = dist.sample()
next_state, reward, done, _ = envs.step(torch.max(action, 1)[1].cpu().numpy())
log_prob = dist.log_prob(action)
entropy += dist.entropy().mean()
log_probs.append(log_prob)
values.append(value)
rewards.append(torch.FloatTensor(reward).unsqueeze(1).to(device))
masks.append(torch.FloatTensor(1 - done).unsqueeze(1).to(device))
states.append(state)
actions.append(action)
state = next_state
step_idx += 1
if step_idx % 100 == 0:
test_reward = test_env()
print('Step: %d, Reward: %.2f' % (step_idx, test_reward))
writer.add_scalar('Spiking-PPO-' + env_name + '/Reward', test_reward, step_idx)
next_state = torch.FloatTensor(next_state).to(device)
_, next_value = model(next_state)
functional.reset_net(model)
returns = compute_gae(next_value, rewards, masks, values)
returns = torch.cat(returns).detach()
log_probs = torch.cat(log_probs).detach()
values = torch.cat(values).detach()
states = torch.cat(states)
actions = torch.cat(actions)
advantage = returns - values
ppo_update(ppo_epochs, mini_batch_size, states, actions, log_probs, returns, advantage)
print('----------------------------')
print('Complete')
writer.close()
def ppo_update(ppo_epochs, mini_batch_size, states, actions, log_probs, returns, advantages, clip_param=0.2):
for _ in range(ppo_epochs):
for state, action, old_log_probs, return_, advantage in ppo_iter(mini_batch_size, states, actions, log_probs, returns, advantages):
dist, value = model(state)
functional.reset_net(model)
entropy = dist.entropy().mean()
new_log_probs = dist.log_prob(action)
ratio = (new_log_probs - old_log_probs).exp()
surr1 = ratio * advantage
surr2 = torch.clamp(ratio, 1.0 - clip_param, 1.0 + clip_param) * advantage
actor_loss = - torch.min(surr1, surr2).mean()
critic_loss = (return_ - value).pow(2).mean()
loss = 0.5 * critic_loss + actor_loss - 0.001 * entropy
optimizer.zero_grad()
loss.backward()
optimizer.step() | null |
7,560 | import torch
import torch.nn as nn
from spikingjelly.activation_based import neuron, layer, learning
from matplotlib import pyplot as plt
def f_pre(x, w_min, alpha=0.):
return (x - w_min) ** alpha | null |
7,561 | import torch
import torch.nn as nn
from spikingjelly.activation_based import neuron, layer, learning
from matplotlib import pyplot as plt
def f_post(x, w_max, alpha=0.):
return (w_max - x) ** alpha | null |
7,562 | import gym
import math
import random
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.distributions import Categorical
from torch.utils.tensorboard import SummaryWriter
from spikingjelly.activation_based.examples.common.multiprocessing_env import SubprocVecEnv
def make_env():
def _thunk():
env = gym.make(env_name)
env.seed(seed)
return env
return _thunk | null |
7,563 | import gym
import math
import random
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.distributions import Categorical
from torch.utils.tensorboard import SummaryWriter
from spikingjelly.activation_based.examples.common.multiprocessing_env import SubprocVecEnv
def test_env(vis=False):
state = env.reset()
if vis: env.render()
done = False
total_reward = 0
while not done:
state = torch.FloatTensor(state).unsqueeze(0).to(device)
dist, _ = model(state)
next_state, reward, done, _ = env.step(dist.sample().cpu().numpy()[0])
state = next_state
if vis: env.render()
total_reward += reward
return total_reward | null |
7,564 | import gym
import math
import random
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.distributions import Categorical
from torch.utils.tensorboard import SummaryWriter
from spikingjelly.activation_based.examples.common.multiprocessing_env import SubprocVecEnv
def compute_returns(next_value, rewards, masks, gamma=0.99):
R = next_value
returns = []
for step in reversed(range(len(rewards))):
R = rewards[step] + gamma * R * masks[step]
returns.insert(0, R)
return returns | null |
7,565 | import gym
import math
import random
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.distributions import Normal
from torch.utils.tensorboard import SummaryWriter
from spikingjelly.activation_based.examples.common.multiprocessing_env import SubprocVecEnv
seed = 1
env_name = 'CartPole-v0'
def make_env():
def _thunk():
env = gym.make(env_name)
env.seed(seed)
return env
return _thunk | null |
7,566 | import gym
import math
import random
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.distributions import Normal
from torch.utils.tensorboard import SummaryWriter
from spikingjelly.activation_based.examples.common.multiprocessing_env import SubprocVecEnv
device = torch.device('cuda' if use_cuda else 'cpu')
torch.cuda.manual_seed(seed)
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
def test_env(vis=False):
state = env.reset()
if vis: env.render()
done = False
total_reward = 0
while not done:
state = torch.FloatTensor(state).unsqueeze(0).to(device)
dist, _ = model(state)
next_state, reward, done, _ = env.step(torch.max(dist.sample(), 1)[1].cpu().numpy()[0])
state = next_state
if vis: env.render()
total_reward += reward
return total_reward | null |
7,567 | import gym
import math
import random
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.distributions import Normal
from torch.utils.tensorboard import SummaryWriter
from spikingjelly.activation_based.examples.common.multiprocessing_env import SubprocVecEnv
def compute_gae(next_value, rewards, masks, values, gamma=0.99, tau=0.95):
values = values + [next_value]
gae = 0
returns = []
for step in reversed(range(len(rewards))):
delta = rewards[step] + gamma * values[step + 1] * masks[step] - values[step]
gae = delta + gamma * tau * masks[step] * gae
returns.insert(0, gae + values[step])
return returns | null |
7,568 | import gym
import math
import random
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.distributions import Normal
from torch.utils.tensorboard import SummaryWriter
from spikingjelly.activation_based.examples.common.multiprocessing_env import SubprocVecEnv
torch.cuda.manual_seed(seed)
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
if __name__ == '__main__':
envs = [make_env() for i in range(num_envs)]
envs = SubprocVecEnv(envs)
env = gym.make(env_name)
env.seed(seed)
# GAE
# Proximal Policy Optimization Algorithm
# Arxiv: "https://arxiv.org/abs/1707.06347"
def ppo_iter(mini_batch_size, states, actions, log_probs, returns, advantage):
num_inputs = envs.observation_space.shape[0]
num_outputs = env.action_space.n
print('State Num: %d, Action Num: %d' % (num_inputs, num_outputs))
# Hyper params:
hidden_size = 32
lr = 1e-3
num_steps = 128
mini_batch_size = 256
ppo_epochs = 30
model = ActorCritic(num_inputs, num_outputs, hidden_size).to(device)
optimizer = optim.Adam(model.parameters(), lr=lr)
max_steps = 10000
step_idx = 0
state = envs.reset()
writer = SummaryWriter(logdir='./log')
while step_idx < max_steps:
log_probs = []
values = []
states = []
actions = []
rewards = []
masks = []
entropy = 0
for _ in range(num_steps):
state = torch.FloatTensor(state).to(device)
dist, value = model(state)
action = dist.sample()
next_state, reward, done, _ = envs.step(torch.max(action, 1)[1].cpu().numpy())
log_prob = dist.log_prob(action)
entropy += dist.entropy().mean()
log_probs.append(log_prob)
values.append(value)
rewards.append(torch.FloatTensor(reward).unsqueeze(1).to(device))
masks.append(torch.FloatTensor(1 - done).unsqueeze(1).to(device))
states.append(state)
actions.append(action)
state = next_state
step_idx += 1
if step_idx % 100 == 0:
test_reward = test_env()
print('Step: %d, Reward: %.2f' % (step_idx, test_reward))
writer.add_scalar('PPO-' + env_name + '/Reward', test_reward, step_idx)
next_state = torch.FloatTensor(next_state).to(device)
_, next_value = model(next_state)
returns = compute_gae(next_value, rewards, masks, values)
returns = torch.cat(returns).detach()
log_probs = torch.cat(log_probs).detach()
values = torch.cat(values).detach()
states = torch.cat(states)
actions = torch.cat(actions)
advantage = returns - values
ppo_update(ppo_epochs, mini_batch_size, states, actions, log_probs, returns, advantage)
print('----------------------------')
print('Complete')
writer.close()
def ppo_update(ppo_epochs, mini_batch_size, states, actions, log_probs, returns, advantages, clip_param=0.2):
for _ in range(ppo_epochs):
for state, action, old_log_probs, return_, advantage in ppo_iter(mini_batch_size, states, actions, log_probs, returns, advantages):
dist, value = model(state)
entropy = dist.entropy().mean()
new_log_probs = dist.log_prob(action)
ratio = (new_log_probs - old_log_probs).exp()
surr1 = ratio * advantage
surr2 = torch.clamp(ratio, 1.0 - clip_param, 1.0 + clip_param) * advantage
actor_loss = - torch.min(surr1, surr2).mean()
critic_loss = (return_ - value).pow(2).mean()
loss = 0.5 * critic_loss + actor_loss - 0.001 * entropy
optimizer.zero_grad()
loss.backward()
optimizer.step() | null |
7,569 | import logging
import torch
import torch.nn as nn
import torchvision
from torchvision import transforms
from spikingjelly.activation_based import functional, lava_exchange, surrogate, encoding, neuron
import torch.nn.functional as F
from torch.utils.data import DataLoader
import os
import argparse
import h5py
def export_hdf5(net, filename):
# network export to hdf5 format
h = h5py.File(filename, 'w')
layer = h.create_group('layer')
for i, b in enumerate(net):
handle = layer.create_group(f'{i}')
b.export_hdf5(handle) | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.