id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
14,100 | import os
import traceback
from Exceptions import PipelineCreateException
from const import EnumInferenceTypes, PitchExtractorType
from data.ModelSlot import EasyVCModelSlot
from voice_changer.EasyVC.pipeline.Pipeline import Pipeline
from voice_changer.RVC.deviceManager.DeviceManager import DeviceManager
from voice_changer.RVC.embedder.EmbedderManager import EmbedderManager
from voice_changer.RVC.inferencer.InferencerManager import InferencerManager
from voice_changer.RVC.pitchExtractor.PitchExtractorManager import PitchExtractorManager
from voice_changer.utils.VoiceChangerParams import VoiceChangerParams
class PipelineCreateException(Exception):
def __str__(self):
return repr("Failed to create Pipeline.")
class EnumInferenceTypes(Enum):
pyTorchRVC = "pyTorchRVC"
pyTorchRVCNono = "pyTorchRVCNono"
pyTorchRVCv2 = "pyTorchRVCv2"
pyTorchRVCv2Nono = "pyTorchRVCv2Nono"
pyTorchWebUI = "pyTorchWebUI"
pyTorchWebUINono = "pyTorchWebUINono"
pyTorchVoRASbeta = "pyTorchVoRASbeta"
onnxRVC = "onnxRVC"
onnxRVCNono = "onnxRVCNono"
easyVC = "easyVC"
PitchExtractorType: TypeAlias = Literal[
"harvest",
"dio",
"crepe",
"crepe_full",
"crepe_tiny",
"rmvpe",
"rmvpe_onnx",
"fcpe",
]
class EasyVCModelSlot(ModelSlot):
voiceChangerType: VoiceChangerType = "EasyVC"
modelFile: str = ""
version: str = ""
samplingRate: int = -1
class Pipeline(object):
embedder: Embedder
inferencer: Inferencer
pitchExtractor: PitchExtractor
index: Any | None
big_npy: Any | None
# feature: Any | None
targetSR: int
device: torch.device
isHalf: bool
def __init__(
self,
embedder: Embedder,
inferencer: Inferencer,
pitchExtractor: PitchExtractor,
targetSR,
device,
isHalf,
):
self.embedder = embedder
self.inferencer = inferencer
self.pitchExtractor = pitchExtractor
logger.info("GENERATE INFERENCER" + str(self.inferencer))
logger.info("GENERATE EMBEDDER" + str(self.embedder))
logger.info("GENERATE PITCH EXTRACTOR" + str(self.pitchExtractor))
self.targetSR = targetSR
self.device = device
self.isHalf = isHalf
self.sr = 16000
self.window = 160
def getPipelineInfo(self):
inferencerInfo = self.inferencer.getInferencerInfo() if self.inferencer else {}
embedderInfo = self.embedder.getEmbedderInfo()
pitchExtractorInfo = self.pitchExtractor.getPitchExtractorInfo()
return {"inferencer": inferencerInfo, "embedder": embedderInfo, "pitchExtractor": pitchExtractorInfo, "isHalf": self.isHalf}
def setPitchExtractor(self, pitchExtractor: PitchExtractor):
self.pitchExtractor = pitchExtractor
def extractPitch(self, audio_pad, if_f0, pitchf, f0_up_key, silence_front):
try:
if if_f0 == 1:
pitch, pitchf = self.pitchExtractor.extract(
audio_pad,
pitchf,
f0_up_key,
self.sr,
self.window,
silence_front=silence_front,
)
# pitch = pitch[:p_len]
# pitchf = pitchf[:p_len]
pitch = torch.tensor(pitch, device=self.device).unsqueeze(0).long()
pitchf = torch.tensor(pitchf, device=self.device, dtype=torch.float).unsqueeze(0)
else:
pitch = None
pitchf = None
except IndexError as e: # NOQA
print(e)
import traceback
traceback.print_exc()
raise NotEnoughDataExtimateF0()
return pitch, pitchf
def extractFeatures(self, feats):
with autocast(enabled=self.isHalf):
try:
feats = self.embedder.extractFeatures(feats)
if torch.isnan(feats).all():
raise DeviceCannotSupportHalfPrecisionException()
return feats
except RuntimeError as e:
if "HALF" in e.__str__().upper():
raise HalfPrecisionChangingException()
elif "same device" in e.__str__():
raise DeviceChangingException()
else:
raise e
def infer(self, feats, p_len, pitch, pitchf, sid, out_size):
try:
with torch.no_grad():
with autocast(enabled=self.isHalf):
audio1 = self.inferencer.infer(feats, p_len, pitch, pitchf, sid, out_size)
audio1 = (audio1 * 32767.5).data.to(dtype=torch.int16)
return audio1
except RuntimeError as e:
if "HALF" in e.__str__().upper():
print("HalfPresicion Error:", e)
raise HalfPrecisionChangingException()
else:
raise e
def exec(
self,
sid,
audio, # torch.tensor [n]
pitchf, # np.array [m]
feature, # np.array [m, feat]
f0_up_key,
index_rate,
if_f0,
silence_front,
repeat,
out_size=None,
):
# print(f"pipeline exec input, audio:{audio.shape}, pitchf:{pitchf.shape}, feature:{feature.shape}")
# print(f"pipeline exec input, silence_front:{silence_front}, out_size:{out_size}")
enablePipelineTimer = False
with Timer2("Pipeline-Exec", enablePipelineTimer) as t: # NOQA
# 16000のサンプリングレートで入ってきている。以降この世界は16000で処理。
# self.t_pad = self.sr * repeat # 1秒
# self.t_pad_tgt = self.targetSR * repeat # 1秒 出力時のトリミング(モデルのサンプリングで出力される)
audio = audio.unsqueeze(0)
quality_padding_sec = (repeat * (audio.shape[1] - 1)) / self.sr # padding(reflect)のサイズは元のサイズより小さい必要がある。
self.t_pad = round(self.sr * quality_padding_sec) # 前後に音声を追加
self.t_pad_tgt = round(self.targetSR * quality_padding_sec) # 前後に音声を追加 出力時のトリミング(モデルのサンプリングで出力される)
audio_pad = F.pad(audio, (self.t_pad, self.t_pad), mode="reflect").squeeze(0)
p_len = audio_pad.shape[0] // self.window
sid = torch.tensor(sid, device=self.device).unsqueeze(0).long()
# # RVC QualityがOnのときにはsilence_frontをオフに。
# silence_front = silence_front if repeat == 0 else 0
# pitchf = pitchf if repeat == 0 else np.zeros(p_len)
# out_size = out_size if repeat == 0 else None
# tensor型調整
feats = audio_pad
if feats.dim() == 2: # double channels
feats = feats.mean(-1)
assert feats.dim() == 1, feats.dim()
feats = feats.view(1, -1)
t.record("pre-process")
# ピッチ検出
pitch, pitchf = self.extractPitch(audio_pad, if_f0, pitchf, f0_up_key, silence_front)
t.record("extract-pitch")
# embedding
feats = self.extractFeatures(feats)
t.record("extract-feats")
feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1)
# if protect < 0.5 and search_index:
# feats0 = feats.clone()
# ピッチサイズ調整
p_len = audio_pad.shape[0] // self.window
if feats.shape[1] < p_len:
p_len = feats.shape[1]
if pitch is not None and pitchf is not None:
pitch = pitch[:, :p_len]
pitchf = pitchf[:, :p_len]
feats_len = feats.shape[1]
if pitch is not None and pitchf is not None:
pitch = pitch[:, -feats_len:]
pitchf = pitchf[:, -feats_len:]
p_len = torch.tensor([feats_len], device=self.device).long()
# apply silent front for inference
if type(self.inferencer) in [OnnxRVCInferencer, OnnxRVCInferencerNono]:
npyOffset = math.floor(silence_front * 16000) // 360
feats = feats[:, npyOffset * 2 :, :] # NOQA
feats_len = feats.shape[1]
if pitch is not None and pitchf is not None:
pitch = pitch[:, -feats_len:]
pitchf = pitchf[:, -feats_len:]
p_len = torch.tensor([feats_len], device=self.device).long()
t.record("mid-precess")
# 推論実行
audio1 = self.infer(feats, p_len, pitch, pitchf, sid, out_size)
t.record("infer")
feats_buffer = feats.squeeze(0).detach().cpu()
if pitchf is not None:
pitchf_buffer = pitchf.squeeze(0).detach().cpu()
else:
pitchf_buffer = None
del p_len, pitch, pitchf, feats
# torch.cuda.empty_cache()
# inferで出力されるサンプリングレートはモデルのサンプリングレートになる。
# pipelineに(入力されるときはhubertように16k)
if self.t_pad_tgt != 0:
offset = self.t_pad_tgt
end = -1 * self.t_pad_tgt
audio1 = audio1[offset:end]
del sid
t.record("post-process")
# torch.cuda.empty_cache()
# print("EXEC AVERAGE:", t.avrSecs)
return audio1, pitchf_buffer, feats_buffer
def __del__(self):
del self.embedder
del self.inferencer
del self.pitchExtractor
print("Pipeline has been deleted")
class DeviceManager(object):
_instance = None
forceTensor: bool = False
def get_instance(cls):
if cls._instance is None:
cls._instance = cls()
return cls._instance
def __init__(self):
self.gpu_num = torch.cuda.device_count()
self.mps_enabled: bool = (
getattr(torch.backends, "mps", None) is not None
and torch.backends.mps.is_available()
)
def getDevice(self, id: int):
if id < 0 or self.gpu_num == 0:
if self.mps_enabled is False:
dev = torch.device("cpu")
else:
dev = torch.device("mps")
else:
if id < self.gpu_num:
dev = torch.device("cuda", index=id)
else:
print("[Voice Changer] device detection error, fallback to cpu")
dev = torch.device("cpu")
return dev
def getOnnxExecutionProvider(self, gpu: int):
availableProviders = onnxruntime.get_available_providers()
devNum = torch.cuda.device_count()
if gpu >= 0 and "CUDAExecutionProvider" in availableProviders and devNum > 0:
if gpu < devNum: # ひとつ前のif文で弾いてもよいが、エラーの解像度を上げるため一段下げ。
return ["CUDAExecutionProvider"], [{"device_id": gpu}]
else:
print("[Voice Changer] device detection error, fallback to cpu")
return ["CPUExecutionProvider"], [
{
"intra_op_num_threads": 8,
"execution_mode": onnxruntime.ExecutionMode.ORT_PARALLEL,
"inter_op_num_threads": 8,
}
]
elif gpu >= 0 and "DmlExecutionProvider" in availableProviders:
return ["DmlExecutionProvider"], [{"device_id": gpu}]
else:
return ["CPUExecutionProvider"], [
{
"intra_op_num_threads": 8,
"execution_mode": onnxruntime.ExecutionMode.ORT_PARALLEL,
"inter_op_num_threads": 8,
}
]
def setForceTensor(self, forceTensor: bool):
self.forceTensor = forceTensor
def halfPrecisionAvailable(self, id: int):
if self.gpu_num == 0:
return False
if id < 0:
return False
if self.forceTensor:
return False
try:
gpuName = torch.cuda.get_device_name(id).upper()
if (
("16" in gpuName and "V100" not in gpuName)
or "P40" in gpuName.upper()
or "1070" in gpuName
or "1080" in gpuName
):
return False
except Exception as e:
print(e)
return False
cap = torch.cuda.get_device_capability(id)
if cap[0] < 7: # コンピューティング機能が7以上の場合half precisionが使えるとされている(が例外がある?T500とか)
return False
return True
def getDeviceMemory(self, id: int):
try:
return torch.cuda.get_device_properties(id).total_memory
except Exception as e:
# except:
print(e)
return 0
class EmbedderManager:
currentEmbedder: Embedder | None = None
params: VoiceChangerParams
def initialize(cls, params: VoiceChangerParams):
cls.params = params
def getEmbedder(cls, embederType: EmbedderType, isHalf: bool, dev: device) -> Embedder:
if cls.currentEmbedder is None:
print("[Voice Changer] generate new embedder. (no embedder)")
cls.currentEmbedder = cls.loadEmbedder(embederType, isHalf, dev)
elif cls.currentEmbedder.matchCondition(embederType) is False:
print("[Voice Changer] generate new embedder. (not match)")
cls.currentEmbedder = cls.loadEmbedder(embederType, isHalf, dev)
else:
print("[Voice Changer] generate new embedder. (anyway)")
cls.currentEmbedder = cls.loadEmbedder(embederType, isHalf, dev)
# cls.currentEmbedder.setDevice(dev)
# cls.currentEmbedder.setHalf(isHalf)
return cls.currentEmbedder
def loadEmbedder(cls, embederType: EmbedderType, isHalf: bool, dev: device) -> Embedder:
if embederType == "hubert_base":
try:
if cls.params.content_vec_500_onnx_on is False:
raise Exception("[Voice Changer][Embedder] onnx is off")
file = cls.params.content_vec_500_onnx
return OnnxContentvec().loadModel(file, dev)
except Exception as e: # noqa
print("[Voice Changer] use torch contentvec", e)
file = cls.params.hubert_base
return FairseqHubert().loadModel(file, dev, isHalf)
elif embederType == "hubert-base-japanese":
file = cls.params.hubert_base_jp
return FairseqHubertJp().loadModel(file, dev, isHalf)
elif embederType == "contentvec":
try:
if cls.params.content_vec_500_onnx_on is False:
raise Exception("[Voice Changer][Embedder] onnx is off")
file = cls.params.content_vec_500_onnx
return OnnxContentvec().loadModel(file, dev)
except Exception as e:
print(e)
file = cls.params.hubert_base
return FairseqContentvec().loadModel(file, dev, isHalf)
elif embederType == "whisper":
file = cls.params.whisper_tiny
return Whisper().loadModel(file, dev, isHalf)
else:
return FairseqHubert().loadModel(file, dev, isHalf)
class InferencerManager:
currentInferencer: Inferencer | None = None
def getInferencer(
cls,
inferencerType: EnumInferenceTypes,
file: str,
gpu: int,
inferencerTypeVersion: str | None = None,
) -> Inferencer:
cls.currentInferencer = cls.loadInferencer(inferencerType, file, gpu, inferencerTypeVersion)
return cls.currentInferencer
def loadInferencer(
cls,
inferencerType: EnumInferenceTypes,
file: str,
gpu: int,
inferencerTypeVersion: str | None = None,
) -> Inferencer:
if inferencerType == EnumInferenceTypes.pyTorchRVC or inferencerType == EnumInferenceTypes.pyTorchRVC.value:
return RVCInferencer().loadModel(file, gpu)
elif inferencerType == EnumInferenceTypes.pyTorchRVCNono or inferencerType == EnumInferenceTypes.pyTorchRVCNono.value:
return RVCInferencerNono().loadModel(file, gpu)
elif inferencerType == EnumInferenceTypes.pyTorchRVCv2 or inferencerType == EnumInferenceTypes.pyTorchRVCv2.value:
return RVCInferencerv2().loadModel(file, gpu)
elif inferencerType == EnumInferenceTypes.pyTorchVoRASbeta or inferencerType == EnumInferenceTypes.pyTorchVoRASbeta.value:
if sys.platform.startswith("darwin") is False:
from voice_changer.RVC.inferencer.VorasInferencebeta import VoRASInferencer
return VoRASInferencer().loadModel(file, gpu)
else:
raise RuntimeError("[Voice Changer] VoRAS is not supported on macOS")
elif inferencerType == EnumInferenceTypes.pyTorchRVCv2Nono or inferencerType == EnumInferenceTypes.pyTorchRVCv2Nono.value:
return RVCInferencerv2Nono().loadModel(file, gpu)
elif inferencerType == EnumInferenceTypes.pyTorchWebUI or inferencerType == EnumInferenceTypes.pyTorchWebUI.value:
return WebUIInferencer().loadModel(file, gpu)
elif inferencerType == EnumInferenceTypes.pyTorchWebUINono or inferencerType == EnumInferenceTypes.pyTorchWebUINono.value:
return WebUIInferencerNono().loadModel(file, gpu)
elif inferencerType == EnumInferenceTypes.onnxRVC or inferencerType == EnumInferenceTypes.onnxRVC.value:
return OnnxRVCInferencer().loadModel(file, gpu, inferencerTypeVersion)
elif inferencerType == EnumInferenceTypes.onnxRVCNono or inferencerType == EnumInferenceTypes.onnxRVCNono.value:
return OnnxRVCInferencerNono().loadModel(file, gpu, inferencerTypeVersion)
elif inferencerType == EnumInferenceTypes.easyVC or inferencerType == EnumInferenceTypes.easyVC.value:
return EasyVCInferencerONNX().loadModel(file, gpu)
else:
raise RuntimeError("[Voice Changer] Inferencer not found", inferencerType)
class PitchExtractorManager(Protocol):
currentPitchExtractor: PitchExtractor | None = None
params: VoiceChangerParams
def initialize(cls, params: VoiceChangerParams):
cls.params = params
def getPitchExtractor(
cls, pitchExtractorType: PitchExtractorType, gpu: int
) -> PitchExtractor:
cls.currentPitchExtractor = cls.loadPitchExtractor(pitchExtractorType, gpu)
return cls.currentPitchExtractor
def loadPitchExtractor(
cls, pitchExtractorType: PitchExtractorType, gpu: int
) -> PitchExtractor:
if pitchExtractorType == "harvest":
return HarvestPitchExtractor()
elif pitchExtractorType == "dio":
return DioPitchExtractor()
elif pitchExtractorType == "crepe":
return CrepePitchExtractor(gpu)
elif pitchExtractorType == "crepe_tiny":
return CrepeOnnxPitchExtractor(pitchExtractorType, cls.params.crepe_onnx_tiny, gpu)
elif pitchExtractorType == "crepe_full":
return CrepeOnnxPitchExtractor(pitchExtractorType, cls.params.crepe_onnx_full, gpu)
elif pitchExtractorType == "rmvpe":
return RMVPEPitchExtractor(cls.params.rmvpe, gpu)
elif pitchExtractorType == "rmvpe_onnx":
return RMVPEOnnxPitchExtractor(cls.params.rmvpe_onnx, gpu)
elif pitchExtractorType == "fcpe":
# add the FcpePitchExtractor
return FcpePitchExtractor(gpu)
else:
# return hubert as default
print("[Voice Changer] PitchExctractor not found", pitchExtractorType)
print(" fallback to dio")
return DioPitchExtractor()
class VoiceChangerParams:
model_dir: str
content_vec_500: str
content_vec_500_onnx: str
content_vec_500_onnx_on: bool
hubert_base: str
hubert_base_jp: str
hubert_soft: str
nsf_hifigan: str
sample_mode: str
crepe_onnx_full: str
crepe_onnx_tiny: str
rmvpe: str
rmvpe_onnx: str
whisper_tiny: str
def createPipeline(params: VoiceChangerParams, modelSlot: EasyVCModelSlot, gpu: int, f0Detector: PitchExtractorType):
dev = DeviceManager.get_instance().getDevice(gpu)
half = DeviceManager.get_instance().halfPrecisionAvailable(gpu)
# Inferencer 生成
try:
modelPath = os.path.join(params.model_dir, str(modelSlot.slotIndex), os.path.basename(modelSlot.modelFile))
inferencer = InferencerManager.getInferencer(EnumInferenceTypes.easyVC, modelPath, gpu, modelSlot.version)
except Exception as e:
print("[Voice Changer] exception! loading inferencer", e)
traceback.print_exc()
raise PipelineCreateException("[Voice Changer] exception! loading inferencer")
# Embedder 生成
try:
embedder = EmbedderManager.getEmbedder(
"whisper",
half,
dev,
)
except Exception as e:
print("[Voice Changer] exception! loading embedder", e, dev)
traceback.print_exc()
raise PipelineCreateException("[Voice Changer] exception! loading embedder")
# pitchExtractor
pitchExtractor = PitchExtractorManager.getPitchExtractor(f0Detector, gpu)
pipeline = Pipeline(
embedder,
inferencer,
pitchExtractor,
modelSlot.samplingRate,
dev,
half,
)
return pipeline | null |
14,101 | import os
import json
import torch
from onnxsim import simplify
import onnx
from const import TMP_DIR, EnumInferenceTypes
from data.ModelSlot import DiffusionSVCModelSlot
from voice_changer.RVC.deviceManager.DeviceManager import DeviceManager
def _export2onnx(input_model, output_model, output_model_simple, is_half, metadata):
cpt = torch.load(input_model, map_location="cpu")
if is_half:
dev = torch.device("cuda", index=0)
else:
dev = torch.device("cpu")
# EnumInferenceTypesのままだとシリアライズできないのでテキスト化
if metadata["modelType"] == EnumInferenceTypes.pyTorchRVC.value:
net_g_onnx = SynthesizerTrnMs256NSFsid_ONNX(*cpt["config"], is_half=is_half)
elif metadata["modelType"] == EnumInferenceTypes.pyTorchWebUI.value:
net_g_onnx = SynthesizerTrnMsNSFsid_webui_ONNX(**cpt["params"], is_half=is_half)
elif metadata["modelType"] == EnumInferenceTypes.pyTorchRVCNono.value:
net_g_onnx = SynthesizerTrnMs256NSFsid_nono_ONNX(*cpt["config"])
elif metadata["modelType"] == EnumInferenceTypes.pyTorchWebUINono.value:
net_g_onnx = SynthesizerTrnMsNSFsidNono_webui_ONNX(**cpt["params"])
elif metadata["modelType"] == EnumInferenceTypes.pyTorchRVCv2.value:
net_g_onnx = SynthesizerTrnMs768NSFsid_ONNX(*cpt["config"], is_half=is_half)
elif metadata["modelType"] == EnumInferenceTypes.pyTorchRVCv2Nono.value:
net_g_onnx = SynthesizerTrnMs768NSFsid_nono_ONNX(*cpt["config"])
else:
print(
"unknwon::::: ",
metadata["modelType"],
EnumInferenceTypes.pyTorchRVCv2.value,
)
net_g_onnx.eval().to(dev)
net_g_onnx.load_state_dict(cpt["weight"], strict=False)
if is_half:
net_g_onnx = net_g_onnx.half()
if is_half:
feats = torch.HalfTensor(1, 2192, metadata["embChannels"]).to(dev)
else:
feats = torch.FloatTensor(1, 2192, metadata["embChannels"]).to(dev)
p_len = torch.LongTensor([2192]).to(dev)
sid = torch.LongTensor([0]).to(dev)
if metadata["f0"] is True:
pitch = torch.zeros(1, 2192, dtype=torch.int64).to(dev)
pitchf = torch.FloatTensor(1, 2192).to(dev)
input_names = ["feats", "p_len", "pitch", "pitchf", "sid"]
inputs = (
feats,
p_len,
pitch,
pitchf,
sid,
)
else:
input_names = ["feats", "p_len", "sid"]
inputs = (
feats,
p_len,
sid,
)
output_names = [
"audio",
]
torch.onnx.export(
net_g_onnx,
inputs,
output_model,
dynamic_axes={
"feats": [1],
"pitch": [1],
"pitchf": [1],
},
do_constant_folding=False,
opset_version=17,
verbose=False,
input_names=input_names,
output_names=output_names,
)
model_onnx2 = onnx.load(output_model)
model_simp, check = simplify(model_onnx2)
meta = model_simp.metadata_props.add()
meta.key = "metadata"
meta.value = json.dumps(metadata)
onnx.save(model_simp, output_model_simple)
TMP_DIR = os.path.join(tmpdir.name, "tmp_dir") if hasattr(sys, "_MEIPASS") else "tmp_dir"
class DiffusionSVCModelSlot(ModelSlot):
voiceChangerType: VoiceChangerType = "Diffusion-SVC"
modelFile: str = ""
isONNX: bool = False
modelType: DiffusionSVCInferenceType = "combo"
dstId: int = 1
sampleId: str = ""
defaultTune: int = 0
defaultKstep: int = 20
defaultSpeedup: int = 10
kStepMax: int = 100
nLayers: int = 20
nnLayers: int = 20
speakers: dict = field(default_factory=lambda: {1: "user"})
embedder: EmbedderType = "hubert_base"
samplingRate: int = 44100
embChannels: int = 768
class DeviceManager(object):
_instance = None
forceTensor: bool = False
def get_instance(cls):
if cls._instance is None:
cls._instance = cls()
return cls._instance
def __init__(self):
self.gpu_num = torch.cuda.device_count()
self.mps_enabled: bool = (
getattr(torch.backends, "mps", None) is not None
and torch.backends.mps.is_available()
)
def getDevice(self, id: int):
if id < 0 or self.gpu_num == 0:
if self.mps_enabled is False:
dev = torch.device("cpu")
else:
dev = torch.device("mps")
else:
if id < self.gpu_num:
dev = torch.device("cuda", index=id)
else:
print("[Voice Changer] device detection error, fallback to cpu")
dev = torch.device("cpu")
return dev
def getOnnxExecutionProvider(self, gpu: int):
availableProviders = onnxruntime.get_available_providers()
devNum = torch.cuda.device_count()
if gpu >= 0 and "CUDAExecutionProvider" in availableProviders and devNum > 0:
if gpu < devNum: # ひとつ前のif文で弾いてもよいが、エラーの解像度を上げるため一段下げ。
return ["CUDAExecutionProvider"], [{"device_id": gpu}]
else:
print("[Voice Changer] device detection error, fallback to cpu")
return ["CPUExecutionProvider"], [
{
"intra_op_num_threads": 8,
"execution_mode": onnxruntime.ExecutionMode.ORT_PARALLEL,
"inter_op_num_threads": 8,
}
]
elif gpu >= 0 and "DmlExecutionProvider" in availableProviders:
return ["DmlExecutionProvider"], [{"device_id": gpu}]
else:
return ["CPUExecutionProvider"], [
{
"intra_op_num_threads": 8,
"execution_mode": onnxruntime.ExecutionMode.ORT_PARALLEL,
"inter_op_num_threads": 8,
}
]
def setForceTensor(self, forceTensor: bool):
self.forceTensor = forceTensor
def halfPrecisionAvailable(self, id: int):
if self.gpu_num == 0:
return False
if id < 0:
return False
if self.forceTensor:
return False
try:
gpuName = torch.cuda.get_device_name(id).upper()
if (
("16" in gpuName and "V100" not in gpuName)
or "P40" in gpuName.upper()
or "1070" in gpuName
or "1080" in gpuName
):
return False
except Exception as e:
print(e)
return False
cap = torch.cuda.get_device_capability(id)
if cap[0] < 7: # コンピューティング機能が7以上の場合half precisionが使えるとされている(が例外がある?T500とか)
return False
return True
def getDeviceMemory(self, id: int):
try:
return torch.cuda.get_device_properties(id).total_memory
except Exception as e:
# except:
print(e)
return 0
def export2onnx(gpu: int, modelSlot: DiffusionSVCModelSlot):
modelFile = modelSlot.modelFile
output_file = os.path.splitext(os.path.basename(modelFile))[0] + ".onnx"
output_file_simple = os.path.splitext(os.path.basename(modelFile))[0] + "_simple.onnx"
output_path = os.path.join(TMP_DIR, output_file)
output_path_simple = os.path.join(TMP_DIR, output_file_simple)
metadata = {
"application": "VC_CLIENT",
"version": "3",
"voiceChangerType": modelSlot.voiceChangerType,
"modelType": modelSlot.modelType,
"samplingRate": modelSlot.samplingRate,
"embChannels": modelSlot.embChannels,
"embedder": modelSlot.embedder
}
gpuMomory = DeviceManager.get_instance().getDeviceMemory(gpu)
print(f"[Voice Changer] exporting onnx... gpu_id:{gpu} gpu_mem:{gpuMomory}")
if gpuMomory > 0:
_export2onnx(modelFile, output_path, output_path_simple, True, metadata)
else:
print("[Voice Changer] Warning!!! onnx export with float32. maybe size is doubled.")
_export2onnx(modelFile, output_path, output_path_simple, False, metadata)
return output_file_simple | null |
14,102 | import traceback
from Exceptions import PipelineCreateException
from data.ModelSlot import DiffusionSVCModelSlot
from voice_changer.DiffusionSVC.inferencer.InferencerManager import InferencerManager
from voice_changer.DiffusionSVC.pipeline.Pipeline import Pipeline
from voice_changer.DiffusionSVC.pitchExtractor.PitchExtractorManager import PitchExtractorManager
from voice_changer.RVC.deviceManager.DeviceManager import DeviceManager
from voice_changer.RVC.embedder.EmbedderManager import EmbedderManager
import os
import torch
from torchaudio.transforms import Resample
from voice_changer.VoiceChangerParamsManager import VoiceChangerParamsManager
class PipelineCreateException(Exception):
def __str__(self):
class DiffusionSVCModelSlot(ModelSlot):
class InferencerManager:
def initialize(cls, params: VoiceChangerParams):
def getInferencer(
cls,
inferencerType: DiffusionSVCInferenceType,
file: str,
gpu: int,
) -> Inferencer:
def loadInferencer(
cls,
inferencerType: DiffusionSVCInferenceType,
file: str,
gpu: int,
) -> Inferencer:
class Pipeline(object):
def __init__(
self,
embedder: Embedder,
inferencer: Inferencer,
pitchExtractor: PitchExtractor,
# index: Any | None,
targetSR,
device,
isHalf,
resamplerIn: Resample,
resamplerOut: Resample,
):
def getPipelineInfo(self):
def setPitchExtractor(self, pitchExtractor: PitchExtractor):
def extract_volume_and_mask(self, audio: torch.Tensor, threshold: float):
def exec(
self,
sid,
audio, # torch.tensor [n]
sr,
pitchf, # np.array [m]
feature, # np.array [m, feat]
f0_up_key,
k_step,
infer_speedup,
silence_front,
embOutputLayer,
useFinalProj,
protect=0.5,
skip_diffusion=True,
):
class PitchExtractorManager(Protocol):
def initialize(cls, params: VoiceChangerParams):
def getPitchExtractor(
cls, pitchExtractorType: PitchExtractorType, gpu: int
) -> PitchExtractor:
def loadPitchExtractor(
cls, pitchExtractorType: PitchExtractorType, gpu: int
) -> PitchExtractor:
class DeviceManager(object):
def get_instance(cls):
def __init__(self):
def getDevice(self, id: int):
def getOnnxExecutionProvider(self, gpu: int):
def setForceTensor(self, forceTensor: bool):
def halfPrecisionAvailable(self, id: int):
def getDeviceMemory(self, id: int):
class EmbedderManager:
def initialize(cls, params: VoiceChangerParams):
def getEmbedder(cls, embederType: EmbedderType, isHalf: bool, dev: device) -> Embedder:
def loadEmbedder(cls, embederType: EmbedderType, isHalf: bool, dev: device) -> Embedder:
class VoiceChangerParamsManager:
def __init__(self):
def get_instance(cls):
def setParams(self, params: VoiceChangerParams):
def createPipeline(modelSlot: DiffusionSVCModelSlot, gpu: int, f0Detector: str, inputSampleRate: int, outputSampleRate: int):
dev = DeviceManager.get_instance().getDevice(gpu)
vcparams = VoiceChangerParamsManager.get_instance().params
# half = DeviceManager.get_instance().halfPrecisionAvailable(gpu)
half = False
# Inferencer 生成
try:
modelPath = os.path.join(vcparams.model_dir, str(modelSlot.slotIndex), os.path.basename(modelSlot.modelFile))
inferencer = InferencerManager.getInferencer(modelSlot.modelType, modelPath, gpu)
except Exception as e:
print("[Voice Changer] exception! loading inferencer", e)
traceback.print_exc()
raise PipelineCreateException("[Voice Changer] exception! loading inferencer")
# Embedder 生成
try:
embedder = EmbedderManager.getEmbedder(
modelSlot.embedder,
# emmbedderFilename,
half,
dev,
)
except Exception as e:
print("[Voice Changer] exception! loading embedder", e)
traceback.print_exc()
raise PipelineCreateException("[Voice Changer] exception! loading embedder")
# pitchExtractor
pitchExtractor = PitchExtractorManager.getPitchExtractor(f0Detector, gpu)
resamplerIn = Resample(inputSampleRate, 16000, dtype=torch.int16).to(dev)
resamplerOut = Resample(modelSlot.samplingRate, outputSampleRate, dtype=torch.int16).to(dev)
pipeline = Pipeline(
embedder,
inferencer,
pitchExtractor,
modelSlot.samplingRate,
dev,
half,
resamplerIn,
resamplerOut
)
return pipeline | null |
14,103 | import os
from data.ModelSlot import DiffusionSVCModelSlot, ModelSlot
from voice_changer.DiffusionSVC.inferencer.diffusion_svc_model.diffusion.unit2mel import load_model_vocoder_from_combo
from voice_changer.VoiceChangerParamsManager import VoiceChangerParamsManager
from voice_changer.utils.LoadModelParams import LoadModelParams
from voice_changer.utils.ModelSlotGenerator import ModelSlotGenerator
def get_divisors(n):
divisors = []
for i in range(1, int(n**0.5)+1):
if n % i == 0:
divisors.append(i)
if i != n // i:
divisors.append(n // i)
return sorted(divisors) | null |
14,104 | from torchaudio.transforms import Resample
import pyworld as pw
import numpy as np
import torchcrepe
import torch
import torch.nn.functional as F
def median_pool_1d(x, kernel_size):
x = x.unsqueeze(1)
x = F.pad(x, ((kernel_size - 1) // 2, kernel_size // 2), mode="reflect")
x = x.squeeze(1)
x = x.unfold(1, kernel_size, 1)
x, _ = torch.sort(x, dim=-1)
return x[:, :, (kernel_size - 1) // 2] | null |
14,105 | from torchaudio.transforms import Resample
import pyworld as pw
import numpy as np
import torchcrepe
import torch
import torch.nn.functional as F
def masked_avg_pool_1d(x, kernel_size):
x = x.unsqueeze(1)
x = F.pad(x, ((kernel_size - 1) // 2, kernel_size // 2), mode="reflect")
mask = ~torch.isnan(x)
masked_x = torch.where(mask, x, torch.zeros_like(x))
ones_kernel = torch.ones(x.size(1), 1, kernel_size, device=x.device)
# Perform sum pooling
sum_pooled = F.conv1d(
masked_x,
ones_kernel,
stride=1,
padding=0,
groups=x.size(1),
)
# Count the non-masked (valid) elements in each pooling window
valid_count = F.conv1d(
mask.float(),
ones_kernel,
stride=1,
padding=0,
groups=x.size(1),
)
valid_count = valid_count.clamp(min=1) # Avoid division by zero
# Perform masked average pooling
avg_pooled = sum_pooled / valid_count
return avg_pooled.squeeze(1) | null |
14,108 | import torch
from torch import nn
import math
from functools import partial
from einops import rearrange, repeat
from local_attention import LocalAttention
import torch.nn.functional as F
def exists(val):
def default(val, d):
return val if exists(val) else d | null |
14,112 | import torch
from torch import nn
import math
from functools import partial
from einops import rearrange, repeat
from local_attention import LocalAttention
import torch.nn.functional as F
def orthogonal_matrix_chunk(cols, qr_uniform_q = False, device = None):
def gaussian_orthogonal_random_matrix(nb_rows, nb_columns, scaling = 0, qr_uniform_q = False, device = None):
nb_full_blocks = int(nb_rows / nb_columns)
#print (nb_full_blocks)
block_list = []
for _ in range(nb_full_blocks):
q = orthogonal_matrix_chunk(nb_columns, qr_uniform_q = qr_uniform_q, device = device)
block_list.append(q)
# block_list[n] is a orthogonal matrix ... (model_dim * model_dim)
#print (block_list[0].size(), torch.einsum('...nd,...nd->...n', block_list[0], torch.roll(block_list[0],1,1)))
#print (nb_rows, nb_full_blocks, nb_columns)
remaining_rows = nb_rows - nb_full_blocks * nb_columns
#print (remaining_rows)
if remaining_rows > 0:
q = orthogonal_matrix_chunk(nb_columns, qr_uniform_q = qr_uniform_q, device = device)
#print (q[:remaining_rows].size())
block_list.append(q[:remaining_rows])
final_matrix = torch.cat(block_list)
if scaling == 0:
multiplier = torch.randn((nb_rows, nb_columns), device = device).norm(dim = 1)
elif scaling == 1:
multiplier = math.sqrt((float(nb_columns))) * torch.ones((nb_rows,), device = device)
else:
raise ValueError(f'Invalid scaling {scaling}')
return torch.diag(multiplier) @ final_matrix | null |
14,113 | import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
from torch.nn.utils import weight_norm
from .pcmer import PCmer
def l2_regularization(model, l2_alpha):
l2_loss = []
for module in model.modules():
if type(module) is nn.Conv2d:
l2_loss.append((module.weight ** 2).sum() / 2.0)
return l2_alpha * sum(l2_loss) | null |
14,114 | from collections import deque
from functools import partial
from inspect import isfunction
import torch.nn.functional as F
import numpy as np
import torch
from torch import nn
from tqdm import tqdm
def exists(x):
return x is not None
def default(val, d):
if exists(val):
return val
return d() if isfunction(d) else d | null |
14,116 | from collections import deque
from functools import partial
from inspect import isfunction
import torch.nn.functional as F
import numpy as np
import torch
from torch import nn
from tqdm import tqdm
def noise_like(shape, device, repeat=False):
repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))
noise = lambda: torch.randn(shape, device=device)
return repeat_noise() if repeat else noise() | null |
14,119 | import os
import yaml
import torch
import torch.nn as nn
import numpy as np
from voice_changer.DiffusionSVC.inferencer.diffusion_svc_model.diffusion.diffusion import GaussianDiffusion
from voice_changer.DiffusionSVC.inferencer.diffusion_svc_model.diffusion.wavenet import WaveNet
from voice_changer.DiffusionSVC.inferencer.diffusion_svc_model.diffusion.vocoder import Vocoder
from voice_changer.DiffusionSVC.inferencer.diffusion_svc_model.diffusion.naive.naive import Unit2MelNaive
class DotDict(dict):
def __getattr__(*args):
val = dict.get(*args)
return DotDict(val) if type(val) is dict else val
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def load_svc_model(args, vocoder_dimension):
if args.model.type == 'Diffusion':
model = Unit2Mel(
args.data.encoder_out_channels,
args.model.n_spk,
args.model.use_pitch_aug,
vocoder_dimension,
args.model.n_layers,
args.model.n_chans,
args.model.n_hidden,
use_speaker_encoder=args.model.use_speaker_encoder,
speaker_encoder_out_channels=args.data.speaker_encoder_out_channels)
elif args.model.type == 'Naive':
model = Unit2MelNaive(
args.data.encoder_out_channels,
args.model.n_spk,
args.model.use_pitch_aug,
vocoder_dimension,
args.model.n_layers,
args.model.n_chans,
use_speaker_encoder=args.model.use_speaker_encoder,
speaker_encoder_out_channels=args.data.speaker_encoder_out_channels)
elif args.model.type == 'NaiveFS':
model = Unit2MelNaive(
args.data.encoder_out_channels,
args.model.n_spk,
args.model.use_pitch_aug,
vocoder_dimension,
args.model.n_layers,
args.model.n_chans,
use_speaker_encoder=args.model.use_speaker_encoder,
speaker_encoder_out_channels=args.data.speaker_encoder_out_channels,
use_full_siren=True,
l2reg_loss=args.model.l2_reg_loss)
else:
raise ("Unknow model")
return model
class Vocoder:
def __init__(self, vocoder_type, vocoder_ckpt, device=None):
if device is None:
device = 'cuda' if torch.cuda.is_available() else 'cpu'
self.device = device
if vocoder_type == 'nsf-hifigan':
self.vocoder = NsfHifiGAN(vocoder_ckpt, device=device)
elif vocoder_type == 'nsf-hifigan-log10':
self.vocoder = NsfHifiGANLog10(vocoder_ckpt, device=device)
else:
raise ValueError(f" [x] Unknown vocoder: {vocoder_type}")
self.resample_kernel = {}
self.vocoder_sample_rate = self.vocoder.sample_rate()
self.vocoder_hop_size = self.vocoder.hop_size()
self.dimension = self.vocoder.dimension()
def extract(self, audio, sample_rate, keyshift=0):
# resample
if sample_rate == self.vocoder_sample_rate:
audio_res = audio
else:
key_str = str(sample_rate)
if key_str not in self.resample_kernel:
self.resample_kernel[key_str] = Resample(sample_rate, self.vocoder_sample_rate,
lowpass_filter_width=128).to(self.device)
audio_res = self.resample_kernel[key_str](audio)
# extract
mel = self.vocoder.extract(audio_res, keyshift=keyshift) # B, n_frames, bins
return mel
def infer(self, mel, f0):
f0 = f0[:, :mel.size(1), 0] # B, n_frames
audio = self.vocoder(mel, f0)
return audio
def load_model_vocoder(
model_path,
device='cpu',
loaded_vocoder=None):
config_file = os.path.join(os.path.split(model_path)[0], 'config.yaml')
with open(config_file, "r") as config:
args = yaml.safe_load(config)
args = DotDict(args)
# load vocoder
if loaded_vocoder is None:
vocoder = Vocoder(args.vocoder.type, args.vocoder.ckpt, device=device)
else:
vocoder = loaded_vocoder
# load model
model = load_svc_model(args=args, vocoder_dimension=vocoder.dimension)
print(' [Loading] ' + model_path)
ckpt = torch.load(model_path, map_location=torch.device(device))
model.to(device)
model.load_state_dict(ckpt['model'])
model.eval()
return model, vocoder, args | null |
14,120 | import os
import yaml
import torch
import torch.nn as nn
import numpy as np
from voice_changer.DiffusionSVC.inferencer.diffusion_svc_model.diffusion.diffusion import GaussianDiffusion
from voice_changer.DiffusionSVC.inferencer.diffusion_svc_model.diffusion.wavenet import WaveNet
from voice_changer.DiffusionSVC.inferencer.diffusion_svc_model.diffusion.vocoder import Vocoder
from voice_changer.DiffusionSVC.inferencer.diffusion_svc_model.diffusion.naive.naive import Unit2MelNaive
class DotDict(dict):
def __getattr__(*args):
val = dict.get(*args)
return DotDict(val) if type(val) is dict else val
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def load_svc_model(args, vocoder_dimension):
if args.model.type == 'Diffusion':
model = Unit2Mel(
args.data.encoder_out_channels,
args.model.n_spk,
args.model.use_pitch_aug,
vocoder_dimension,
args.model.n_layers,
args.model.n_chans,
args.model.n_hidden,
use_speaker_encoder=args.model.use_speaker_encoder,
speaker_encoder_out_channels=args.data.speaker_encoder_out_channels)
elif args.model.type == 'Naive':
model = Unit2MelNaive(
args.data.encoder_out_channels,
args.model.n_spk,
args.model.use_pitch_aug,
vocoder_dimension,
args.model.n_layers,
args.model.n_chans,
use_speaker_encoder=args.model.use_speaker_encoder,
speaker_encoder_out_channels=args.data.speaker_encoder_out_channels)
elif args.model.type == 'NaiveFS':
model = Unit2MelNaive(
args.data.encoder_out_channels,
args.model.n_spk,
args.model.use_pitch_aug,
vocoder_dimension,
args.model.n_layers,
args.model.n_chans,
use_speaker_encoder=args.model.use_speaker_encoder,
speaker_encoder_out_channels=args.data.speaker_encoder_out_channels,
use_full_siren=True,
l2reg_loss=args.model.l2_reg_loss)
else:
raise ("Unknow model")
return model
def load_model_vocoder_from_combo(combo_model_path, device='cpu'):
read_dict = torch.load(combo_model_path, map_location=torch.device(device))
# args
diff_args = DotDict(read_dict["diff_config_dict"])
naive_args = DotDict(read_dict["naive_config_dict"])
# # vocoder
# vocoder = Vocoder(diff_args.vocoder.type, diff_args.vocoder.ckpt, device=device)
# diff_model
print(' [Loading] ' + combo_model_path)
# diff_model = load_svc_model(args=diff_args, vocoder_dimension=vocoder.dimension)
diff_model = load_svc_model(args=diff_args, vocoder_dimension=128)
diff_model.to(device)
diff_model.load_state_dict(read_dict["diff_model"]['model'])
diff_model.eval()
# naive_model
naive_model = load_svc_model(args=naive_args, vocoder_dimension=128)
naive_model.to(device)
naive_model.load_state_dict(read_dict["naive_model"]['model'])
naive_model.eval()
return diff_model, diff_args, naive_model, naive_args # , vocoder | null |
14,121 | import torch
def expand_dims(v, dims):
"""
Expand the tensor `v` to the dim `dims`.
Args:
`v`: a PyTorch tensor with shape [N].
`dim`: a `int`.
Returns:
a PyTorch tensor with shape [N, 1, 1, ..., 1] and the total dimension is `dims`.
"""
return v[(...,) + (None,)*(dims - 1)]
The provided code snippet includes necessary dependencies for implementing the `model_wrapper` function. Write a Python function `def model_wrapper( model, noise_schedule, model_type="noise", model_kwargs={}, guidance_type="uncond", condition=None, unconditional_condition=None, guidance_scale=1., classifier_fn=None, classifier_kwargs={}, )` to solve the following problem:
Create a wrapper function for the noise prediction model. DPM-Solver needs to solve the continuous-time diffusion ODEs. For DPMs trained on discrete-time labels, we need to firstly wrap the model function to a noise prediction model that accepts the continuous time as the input. We support four types of the diffusion model by setting `model_type`: 1. "noise": noise prediction model. (Trained by predicting noise). 2. "x_start": data prediction model. (Trained by predicting the data x_0 at time 0). 3. "v": velocity prediction model. (Trained by predicting the velocity). The "v" prediction is derivation detailed in Appendix D of [1], and is used in Imagen-Video [2]. [1] Salimans, Tim, and Jonathan Ho. "Progressive distillation for fast sampling of diffusion models." arXiv preprint arXiv:2202.00512 (2022). [2] Ho, Jonathan, et al. "Imagen Video: High Definition Video Generation with Diffusion Models." arXiv preprint arXiv:2210.02303 (2022). 4. "score": marginal score function. (Trained by denoising score matching). Note that the score function and the noise prediction model follows a simple relationship: ``` noise(x_t, t) = -sigma_t * score(x_t, t) ``` We support three types of guided sampling by DPMs by setting `guidance_type`: 1. "uncond": unconditional sampling by DPMs. The input `model` has the following format: `` model(x, t_input, **model_kwargs) -> noise | x_start | v | score `` 2. "classifier": classifier guidance sampling [3] by DPMs and another classifier. The input `model` has the following format: `` model(x, t_input, **model_kwargs) -> noise | x_start | v | score `` The input `classifier_fn` has the following format: `` classifier_fn(x, t_input, cond, **classifier_kwargs) -> logits(x, t_input, cond) `` [3] P. Dhariwal and A. Q. Nichol, "Diffusion models beat GANs on image synthesis," in Advances in Neural Information Processing Systems, vol. 34, 2021, pp. 8780-8794. 3. "classifier-free": classifier-free guidance sampling by conditional DPMs. The input `model` has the following format: `` model(x, t_input, cond, **model_kwargs) -> noise | x_start | v | score `` And if cond == `unconditional_condition`, the model output is the unconditional DPM output. [4] Ho, Jonathan, and Tim Salimans. "Classifier-free diffusion guidance." arXiv preprint arXiv:2207.12598 (2022). The `t_input` is the time label of the model, which may be discrete-time labels (i.e. 0 to 999) or continuous-time labels (i.e. epsilon to T). We wrap the model function to accept only `x` and `t_continuous` as inputs, and outputs the predicted noise: `` def model_fn(x, t_continuous) -> noise: t_input = get_model_input_time(t_continuous) return noise_pred(model, x, t_input, **model_kwargs) `` where `t_continuous` is the continuous time labels (i.e. epsilon to T). And we use `model_fn` for DPM-Solver. =============================================================== Args: model: A diffusion model with the corresponding format described above. noise_schedule: A noise schedule object, such as NoiseScheduleVP. model_type: A `str`. The parameterization type of the diffusion model. "noise" or "x_start" or "v" or "score". model_kwargs: A `dict`. A dict for the other inputs of the model function. guidance_type: A `str`. The type of the guidance for sampling. "uncond" or "classifier" or "classifier-free". condition: A pytorch tensor. The condition for the guided sampling. Only used for "classifier" or "classifier-free" guidance type. unconditional_condition: A pytorch tensor. The condition for the unconditional sampling. Only used for "classifier-free" guidance type. guidance_scale: A `float`. The scale for the guided sampling. classifier_fn: A classifier function. Only used for the classifier guidance. classifier_kwargs: A `dict`. A dict for the other inputs of the classifier function. Returns: A noise prediction model that accepts the noised data and the continuous time as the inputs.
Here is the function:
def model_wrapper(
model,
noise_schedule,
model_type="noise",
model_kwargs={},
guidance_type="uncond",
condition=None,
unconditional_condition=None,
guidance_scale=1.,
classifier_fn=None,
classifier_kwargs={},
):
"""Create a wrapper function for the noise prediction model.
DPM-Solver needs to solve the continuous-time diffusion ODEs. For DPMs trained on discrete-time labels, we need to
firstly wrap the model function to a noise prediction model that accepts the continuous time as the input.
We support four types of the diffusion model by setting `model_type`:
1. "noise": noise prediction model. (Trained by predicting noise).
2. "x_start": data prediction model. (Trained by predicting the data x_0 at time 0).
3. "v": velocity prediction model. (Trained by predicting the velocity).
The "v" prediction is derivation detailed in Appendix D of [1], and is used in Imagen-Video [2].
[1] Salimans, Tim, and Jonathan Ho. "Progressive distillation for fast sampling of diffusion models."
arXiv preprint arXiv:2202.00512 (2022).
[2] Ho, Jonathan, et al. "Imagen Video: High Definition Video Generation with Diffusion Models."
arXiv preprint arXiv:2210.02303 (2022).
4. "score": marginal score function. (Trained by denoising score matching).
Note that the score function and the noise prediction model follows a simple relationship:
```
noise(x_t, t) = -sigma_t * score(x_t, t)
```
We support three types of guided sampling by DPMs by setting `guidance_type`:
1. "uncond": unconditional sampling by DPMs.
The input `model` has the following format:
``
model(x, t_input, **model_kwargs) -> noise | x_start | v | score
``
2. "classifier": classifier guidance sampling [3] by DPMs and another classifier.
The input `model` has the following format:
``
model(x, t_input, **model_kwargs) -> noise | x_start | v | score
``
The input `classifier_fn` has the following format:
``
classifier_fn(x, t_input, cond, **classifier_kwargs) -> logits(x, t_input, cond)
``
[3] P. Dhariwal and A. Q. Nichol, "Diffusion models beat GANs on image synthesis,"
in Advances in Neural Information Processing Systems, vol. 34, 2021, pp. 8780-8794.
3. "classifier-free": classifier-free guidance sampling by conditional DPMs.
The input `model` has the following format:
``
model(x, t_input, cond, **model_kwargs) -> noise | x_start | v | score
``
And if cond == `unconditional_condition`, the model output is the unconditional DPM output.
[4] Ho, Jonathan, and Tim Salimans. "Classifier-free diffusion guidance."
arXiv preprint arXiv:2207.12598 (2022).
The `t_input` is the time label of the model, which may be discrete-time labels (i.e. 0 to 999)
or continuous-time labels (i.e. epsilon to T).
We wrap the model function to accept only `x` and `t_continuous` as inputs, and outputs the predicted noise:
``
def model_fn(x, t_continuous) -> noise:
t_input = get_model_input_time(t_continuous)
return noise_pred(model, x, t_input, **model_kwargs)
``
where `t_continuous` is the continuous time labels (i.e. epsilon to T). And we use `model_fn` for DPM-Solver.
===============================================================
Args:
model: A diffusion model with the corresponding format described above.
noise_schedule: A noise schedule object, such as NoiseScheduleVP.
model_type: A `str`. The parameterization type of the diffusion model.
"noise" or "x_start" or "v" or "score".
model_kwargs: A `dict`. A dict for the other inputs of the model function.
guidance_type: A `str`. The type of the guidance for sampling.
"uncond" or "classifier" or "classifier-free".
condition: A pytorch tensor. The condition for the guided sampling.
Only used for "classifier" or "classifier-free" guidance type.
unconditional_condition: A pytorch tensor. The condition for the unconditional sampling.
Only used for "classifier-free" guidance type.
guidance_scale: A `float`. The scale for the guided sampling.
classifier_fn: A classifier function. Only used for the classifier guidance.
classifier_kwargs: A `dict`. A dict for the other inputs of the classifier function.
Returns:
A noise prediction model that accepts the noised data and the continuous time as the inputs.
"""
def get_model_input_time(t_continuous):
"""
Convert the continuous-time `t_continuous` (in [epsilon, T]) to the model input time.
For discrete-time DPMs, we convert `t_continuous` in [1 / N, 1] to `t_input` in [0, 1000 * (N - 1) / N].
For continuous-time DPMs, we just use `t_continuous`.
"""
if noise_schedule.schedule == 'discrete':
return (t_continuous - 1. / noise_schedule.total_N) * noise_schedule.total_N
else:
return t_continuous
def noise_pred_fn(x, t_continuous, cond=None):
t_input = get_model_input_time(t_continuous)
if cond is None:
output = model(x, t_input, **model_kwargs)
else:
output = model(x, t_input, cond, **model_kwargs)
if model_type == "noise":
return output
elif model_type == "x_start":
alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous)
return (x - expand_dims(alpha_t, x.dim()) * output) / expand_dims(sigma_t, x.dim())
elif model_type == "v":
alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous)
return expand_dims(alpha_t, x.dim()) * output + expand_dims(sigma_t, x.dim()) * x
elif model_type == "score":
sigma_t = noise_schedule.marginal_std(t_continuous)
return -expand_dims(sigma_t, x.dim()) * output
def cond_grad_fn(x, t_input):
"""
Compute the gradient of the classifier, i.e. nabla_{x} log p_t(cond | x_t).
"""
with torch.enable_grad():
x_in = x.detach().requires_grad_(True)
log_prob = classifier_fn(x_in, t_input, condition, **classifier_kwargs)
return torch.autograd.grad(log_prob.sum(), x_in)[0]
def model_fn(x, t_continuous):
"""
The noise predicition model function that is used for DPM-Solver.
"""
if guidance_type == "uncond":
return noise_pred_fn(x, t_continuous)
elif guidance_type == "classifier":
assert classifier_fn is not None
t_input = get_model_input_time(t_continuous)
cond_grad = cond_grad_fn(x, t_input)
sigma_t = noise_schedule.marginal_std(t_continuous)
noise = noise_pred_fn(x, t_continuous)
return noise - guidance_scale * expand_dims(sigma_t, x.dim()) * cond_grad
elif guidance_type == "classifier-free":
if guidance_scale == 1. or unconditional_condition is None:
return noise_pred_fn(x, t_continuous, cond=condition)
else:
x_in = torch.cat([x] * 2)
t_in = torch.cat([t_continuous] * 2)
c_in = torch.cat([unconditional_condition, condition])
noise_uncond, noise = noise_pred_fn(x_in, t_in, cond=c_in).chunk(2)
return noise_uncond + guidance_scale * (noise - noise_uncond)
assert model_type in ["noise", "x_start", "v", "score"]
assert guidance_type in ["uncond", "classifier", "classifier-free"]
return model_fn | Create a wrapper function for the noise prediction model. DPM-Solver needs to solve the continuous-time diffusion ODEs. For DPMs trained on discrete-time labels, we need to firstly wrap the model function to a noise prediction model that accepts the continuous time as the input. We support four types of the diffusion model by setting `model_type`: 1. "noise": noise prediction model. (Trained by predicting noise). 2. "x_start": data prediction model. (Trained by predicting the data x_0 at time 0). 3. "v": velocity prediction model. (Trained by predicting the velocity). The "v" prediction is derivation detailed in Appendix D of [1], and is used in Imagen-Video [2]. [1] Salimans, Tim, and Jonathan Ho. "Progressive distillation for fast sampling of diffusion models." arXiv preprint arXiv:2202.00512 (2022). [2] Ho, Jonathan, et al. "Imagen Video: High Definition Video Generation with Diffusion Models." arXiv preprint arXiv:2210.02303 (2022). 4. "score": marginal score function. (Trained by denoising score matching). Note that the score function and the noise prediction model follows a simple relationship: ``` noise(x_t, t) = -sigma_t * score(x_t, t) ``` We support three types of guided sampling by DPMs by setting `guidance_type`: 1. "uncond": unconditional sampling by DPMs. The input `model` has the following format: `` model(x, t_input, **model_kwargs) -> noise | x_start | v | score `` 2. "classifier": classifier guidance sampling [3] by DPMs and another classifier. The input `model` has the following format: `` model(x, t_input, **model_kwargs) -> noise | x_start | v | score `` The input `classifier_fn` has the following format: `` classifier_fn(x, t_input, cond, **classifier_kwargs) -> logits(x, t_input, cond) `` [3] P. Dhariwal and A. Q. Nichol, "Diffusion models beat GANs on image synthesis," in Advances in Neural Information Processing Systems, vol. 34, 2021, pp. 8780-8794. 3. "classifier-free": classifier-free guidance sampling by conditional DPMs. The input `model` has the following format: `` model(x, t_input, cond, **model_kwargs) -> noise | x_start | v | score `` And if cond == `unconditional_condition`, the model output is the unconditional DPM output. [4] Ho, Jonathan, and Tim Salimans. "Classifier-free diffusion guidance." arXiv preprint arXiv:2207.12598 (2022). The `t_input` is the time label of the model, which may be discrete-time labels (i.e. 0 to 999) or continuous-time labels (i.e. epsilon to T). We wrap the model function to accept only `x` and `t_continuous` as inputs, and outputs the predicted noise: `` def model_fn(x, t_continuous) -> noise: t_input = get_model_input_time(t_continuous) return noise_pred(model, x, t_input, **model_kwargs) `` where `t_continuous` is the continuous time labels (i.e. epsilon to T). And we use `model_fn` for DPM-Solver. =============================================================== Args: model: A diffusion model with the corresponding format described above. noise_schedule: A noise schedule object, such as NoiseScheduleVP. model_type: A `str`. The parameterization type of the diffusion model. "noise" or "x_start" or "v" or "score". model_kwargs: A `dict`. A dict for the other inputs of the model function. guidance_type: A `str`. The type of the guidance for sampling. "uncond" or "classifier" or "classifier-free". condition: A pytorch tensor. The condition for the guided sampling. Only used for "classifier" or "classifier-free" guidance type. unconditional_condition: A pytorch tensor. The condition for the unconditional sampling. Only used for "classifier-free" guidance type. guidance_scale: A `float`. The scale for the guided sampling. classifier_fn: A classifier function. Only used for the classifier guidance. classifier_kwargs: A `dict`. A dict for the other inputs of the classifier function. Returns: A noise prediction model that accepts the noised data and the continuous time as the inputs. |
14,131 | import os
import torch
import torch.utils.data
import numpy as np
import librosa
from librosa.filters import mel as librosa_mel_fn
import soundfile as sf
import torch.nn.functional as F
def load_wav_to_torch(full_path, target_sr=None, return_empty_on_exception=False):
sampling_rate = None
try:
data, sampling_rate = sf.read(full_path, always_2d=True)# than soundfile.
except Exception as ex:
print(f"'{full_path}' failed to load.\nException:")
print(ex)
if return_empty_on_exception:
return [], sampling_rate or target_sr or 48000
else:
raise Exception(ex)
if len(data.shape) > 1:
data = data[:, 0]
assert len(data) > 2# check duration of audio file is > 2 samples (because otherwise the slice operation was on the wrong dimension)
if np.issubdtype(data.dtype, np.integer): # if audio data is type int
max_mag = -np.iinfo(data.dtype).min # maximum magnitude = min possible value of intXX
else: # if audio data is type fp32
max_mag = max(np.amax(data), -np.amin(data))
max_mag = (2**31)+1 if max_mag > (2**15) else ((2**15)+1 if max_mag > 1.01 else 1.0) # data should be either 16-bit INT, 32-bit INT or [-1 to 1] float32
data = torch.FloatTensor(data.astype(np.float32))/max_mag
if (torch.isinf(data) | torch.isnan(data)).any() and return_empty_on_exception:# resample will crash with inf/NaN inputs. return_empty_on_exception will return empty arr instead of except
return [], sampling_rate or target_sr or 48000
if target_sr is not None and sampling_rate != target_sr:
data = torch.from_numpy(librosa.core.resample(data.numpy(), orig_sr=sampling_rate, target_sr=target_sr))
sampling_rate = target_sr
return data, sampling_rate | null |
14,141 | import numpy as np
import torch
import torch.nn.functional as F
import pyworld as pw
import parselmouth
import torchcrepe
import librosa
import fsspec
from tqdm import tqdm
from transformers import HubertModel, Wav2Vec2FeatureExtractor, Wav2Vec2ForCTC
from fairseq import checkpoint_utils
from encoder.hubert.model import HubertSoft
from encoder.speaker_encoder.model import SpeakerEncoder as TTSSpeakerEncoder
import scipy.signal
from torch.nn.modules.utils import consume_prefix_in_state_dict_if_present
from torchaudio.transforms import Resample
def masked_avg_pool_1d(x, kernel_size):
x = x.unsqueeze(1)
x = F.pad(x, ((kernel_size - 1) // 2, kernel_size // 2), mode="reflect")
mask = ~torch.isnan(x)
masked_x = torch.where(mask, x, torch.zeros_like(x))
ones_kernel = torch.ones(x.size(1), 1, kernel_size, device=x.device)
# Perform sum pooling
sum_pooled = F.conv1d(
masked_x,
ones_kernel,
stride=1,
padding=0,
groups=x.size(1),
)
# Count the non-masked (valid) elements in each pooling window
valid_count = F.conv1d(
mask.float(),
ones_kernel,
stride=1,
padding=0,
groups=x.size(1),
)
valid_count = valid_count.clamp(min=1) # Avoid division by zero
# Perform masked average pooling
avg_pooled = sum_pooled / valid_count
return avg_pooled.squeeze(1) | null |
14,142 | import numpy as np
import torch
import torch.nn.functional as F
import pyworld as pw
import parselmouth
import torchcrepe
import librosa
import fsspec
from tqdm import tqdm
from transformers import HubertModel, Wav2Vec2FeatureExtractor, Wav2Vec2ForCTC
from fairseq import checkpoint_utils
from encoder.hubert.model import HubertSoft
from encoder.speaker_encoder.model import SpeakerEncoder as TTSSpeakerEncoder
import scipy.signal
from torch.nn.modules.utils import consume_prefix_in_state_dict_if_present
from torchaudio.transforms import Resample
def median_pool_1d(x, kernel_size):
x = x.unsqueeze(1)
x = F.pad(x, ((kernel_size - 1) // 2, kernel_size // 2), mode="reflect")
x = x.squeeze(1)
x = x.unfold(1, kernel_size, 1)
x, _ = torch.sort(x, dim=-1)
return x[:, :, (kernel_size - 1) // 2] | null |
14,143 | import numpy as np
import torch
import torch.nn.functional as F
import pyworld as pw
import parselmouth
import torchcrepe
import librosa
import fsspec
from tqdm import tqdm
from transformers import HubertModel, Wav2Vec2FeatureExtractor, Wav2Vec2ForCTC
from fairseq import checkpoint_utils
from encoder.hubert.model import HubertSoft
from encoder.speaker_encoder.model import SpeakerEncoder as TTSSpeakerEncoder
import scipy.signal
from torch.nn.modules.utils import consume_prefix_in_state_dict_if_present
from torchaudio.transforms import Resample
def cross_fade(a: np.ndarray, b: np.ndarray, idx: int):
result = np.zeros(idx + b.shape[0])
fade_len = a.shape[0] - idx
np.copyto(dst=result[:idx], src=a[:idx])
k = np.linspace(0, 1.0, num=fade_len, endpoint=True)
result[idx: a.shape[0]] = (1 - k) * a[idx:] + k * b[: fade_len]
np.copyto(dst=result[a.shape[0]:], src=b[fade_len:])
return result | null |
14,144 | import os
import numpy as np
from tqdm import tqdm
import pickle
import torch
from pathlib import Path
def train_index(path):
import faiss
# from: RVC https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI
# 获取文件列表
listdir_res = []
for file in os.listdir(path):
listdir_res.append(os.path.join(path, file))
npys = []
# 读取文件
print(" [INFO] Loading the Units files...")
for name in tqdm(sorted(listdir_res)):
phone = np.load(name)
npys.append(phone)
# 正式内容
big_npy = np.concatenate(npys, 0)
big_npy_idx = np.arange(big_npy.shape[0])
np.random.shuffle(big_npy_idx)
big_npy = big_npy[big_npy_idx]
n_ivf = min(int(16 * np.sqrt(big_npy.shape[0])), big_npy.shape[0] // 39)
index = faiss.index_factory(big_npy.shape[1], "IVF%s,Flat" % n_ivf)
index_ivf = faiss.extract_index_ivf(index) #
index_ivf.nprobe = 1
index.train(big_npy)
batch_size_add = 8192
print(" [INFO] Training the Units indexes...")
for i in tqdm(range(0, big_npy.shape[0], batch_size_add)):
index.add(big_npy[i: i + batch_size_add])
return index | null |
14,145 | import librosa
import torch
import torchaudio
class Slicer:
def __init__(self,
sr: int,
threshold: float = -40.,
min_length: int = 5000,
min_interval: int = 300,
hop_size: int = 20,
max_sil_kept: int = 5000):
if not min_length >= min_interval >= hop_size:
raise ValueError('The following condition must be satisfied: min_length >= min_interval >= hop_size')
if not max_sil_kept >= hop_size:
raise ValueError('The following condition must be satisfied: max_sil_kept >= hop_size')
min_interval = sr * min_interval / 1000
self.threshold = 10 ** (threshold / 20.)
self.hop_size = round(sr * hop_size / 1000)
self.win_size = min(round(min_interval), 4 * self.hop_size)
self.min_length = round(sr * min_length / 1000 / self.hop_size)
self.min_interval = round(min_interval / self.hop_size)
self.max_sil_kept = round(sr * max_sil_kept / 1000 / self.hop_size)
def _apply_slice(self, waveform, begin, end):
if len(waveform.shape) > 1:
return waveform[:, begin * self.hop_size: min(waveform.shape[1], end * self.hop_size)]
else:
return waveform[begin * self.hop_size: min(waveform.shape[0], end * self.hop_size)]
# @timeit
def slice(self, waveform):
if len(waveform.shape) > 1:
samples = librosa.to_mono(waveform)
else:
samples = waveform
if samples.shape[0] <= self.min_length:
return {"0": {"slice": False, "split_time": f"0,{len(waveform)}"}}
rms_list = librosa.feature.rms(y=samples, frame_length=self.win_size, hop_length=self.hop_size).squeeze(0)
sil_tags = []
silence_start = None
clip_start = 0
for i, rms in enumerate(rms_list):
# Keep looping while frame is silent.
if rms < self.threshold:
# Record start of silent frames.
if silence_start is None:
silence_start = i
continue
# Keep looping while frame is not silent and silence start has not been recorded.
if silence_start is None:
continue
# Clear recorded silence start if interval is not enough or clip is too short
is_leading_silence = silence_start == 0 and i > self.max_sil_kept
need_slice_middle = i - silence_start >= self.min_interval and i - clip_start >= self.min_length
if not is_leading_silence and not need_slice_middle:
silence_start = None
continue
# Need slicing. Record the range of silent frames to be removed.
if i - silence_start <= self.max_sil_kept:
pos = rms_list[silence_start: i + 1].argmin() + silence_start
if silence_start == 0:
sil_tags.append((0, pos))
else:
sil_tags.append((pos, pos))
clip_start = pos
elif i - silence_start <= self.max_sil_kept * 2:
pos = rms_list[i - self.max_sil_kept: silence_start + self.max_sil_kept + 1].argmin()
pos += i - self.max_sil_kept
pos_l = rms_list[silence_start: silence_start + self.max_sil_kept + 1].argmin() + silence_start
pos_r = rms_list[i - self.max_sil_kept: i + 1].argmin() + i - self.max_sil_kept
if silence_start == 0:
sil_tags.append((0, pos_r))
clip_start = pos_r
else:
sil_tags.append((min(pos_l, pos), max(pos_r, pos)))
clip_start = max(pos_r, pos)
else:
pos_l = rms_list[silence_start: silence_start + self.max_sil_kept + 1].argmin() + silence_start
pos_r = rms_list[i - self.max_sil_kept: i + 1].argmin() + i - self.max_sil_kept
if silence_start == 0:
sil_tags.append((0, pos_r))
else:
sil_tags.append((pos_l, pos_r))
clip_start = pos_r
silence_start = None
# Deal with trailing silence.
total_frames = rms_list.shape[0]
if silence_start is not None and total_frames - silence_start >= self.min_interval:
silence_end = min(total_frames, silence_start + self.max_sil_kept)
pos = rms_list[silence_start: silence_end + 1].argmin() + silence_start
sil_tags.append((pos, total_frames + 1))
# Apply and return slices.
if len(sil_tags) == 0:
return {"0": {"slice": False, "split_time": f"0,{len(waveform)}"}}
else:
chunks = []
# 第一段静音并非从头开始,补上有声片段
if sil_tags[0][0]:
chunks.append(
{"slice": False, "split_time": f"0,{min(waveform.shape[0], sil_tags[0][0] * self.hop_size)}"})
for i in range(0, len(sil_tags)):
# 标识有声片段(跳过第一段)
if i:
chunks.append({"slice": False,
"split_time": f"{sil_tags[i - 1][1] * self.hop_size},{min(waveform.shape[0], sil_tags[i][0] * self.hop_size)}"})
# 标识所有静音片段
chunks.append({"slice": True,
"split_time": f"{sil_tags[i][0] * self.hop_size},{min(waveform.shape[0], sil_tags[i][1] * self.hop_size)}"})
# 最后一段静音并非结尾,补上结尾片段
if sil_tags[-1][1] * self.hop_size < len(waveform):
chunks.append({"slice": False, "split_time": f"{sil_tags[-1][1] * self.hop_size},{len(waveform)}"})
chunk_dict = {}
for i in range(len(chunks)):
chunk_dict[str(i)] = chunks[i]
return chunk_dict
def cut(audio_path, db_thresh=-30, min_len=5000, flask_mode=False, flask_sr=None):
if not flask_mode:
audio, sr = librosa.load(audio_path, sr=None)
else:
audio = audio_path
sr = flask_sr
slicer = Slicer(
sr=sr,
threshold=db_thresh,
min_length=min_len
)
chunks = slicer.slice(audio)
return chunks | null |
14,146 | import librosa
import torch
import torchaudio
def split(audio, sample_rate, hop_size, db_thresh=-40, min_len=5000):
slicer = Slicer(
sr=sample_rate,
threshold=db_thresh,
min_length=min_len)
chunks = dict(slicer.slice(audio))
result = []
for k, v in chunks.items():
tag = v["split_time"].split(",")
if tag[0] != tag[1]:
start_frame = int(int(tag[0]) // hop_size)
end_frame = int(int(tag[1]) // hop_size)
if end_frame > start_frame:
result.append((
start_frame,
audio[int(start_frame * hop_size): int(end_frame * hop_size)]))
return result
def chunks2audio(audio_path, chunks):
chunks = dict(chunks)
audio, sr = torchaudio.load(audio_path)
if len(audio.shape) == 2 and audio.shape[1] >= 2:
audio = torch.mean(audio, dim=0).unsqueeze(0)
audio = audio.cpu().numpy()[0]
result = []
for k, v in chunks.items():
tag = v["split_time"].split(",")
if tag[0] != tag[1]:
result.append((v["slice"], audio[int(tag[0]):int(tag[1])]))
return result, sr | null |
14,147 | from typing import Any, Union
from const import TMP_DIR
import torch
import os
import numpy as np
from dataclasses import dataclass, asdict, field
import onnxruntime
from mods.log_control import VoiceChangaerLogger
from voice_changer.IORecorder import IORecorder
from voice_changer.utils.Timer import Timer2
from voice_changer.utils.VoiceChangerIF import VoiceChangerIF
from voice_changer.utils.VoiceChangerModel import AudioInOut, VoiceChangerModel
from Exceptions import (
DeviceCannotSupportHalfPrecisionException,
DeviceChangingException,
HalfPrecisionChangingException,
NoModeLoadedException,
NotEnoughDataExtimateF0,
ONNXInputArgumentException,
PipelineNotInitializedException,
VoiceChangerIsNotSelectedException,
)
from voice_changer.utils.VoiceChangerParams import VoiceChangerParams
logger = VoiceChangaerLogger.get_instance().getLogger()
PRINT_CONVERT_PROCESSING: bool = False
def print_convert_processing(mess: str):
if PRINT_CONVERT_PROCESSING is True:
logger.info(mess) | null |
14,148 | from typing import Any, Union
from const import TMP_DIR
import torch
import os
import numpy as np
from dataclasses import dataclass, asdict, field
import onnxruntime
from mods.log_control import VoiceChangaerLogger
from voice_changer.IORecorder import IORecorder
from voice_changer.utils.Timer import Timer2
from voice_changer.utils.VoiceChangerIF import VoiceChangerIF
from voice_changer.utils.VoiceChangerModel import AudioInOut, VoiceChangerModel
from Exceptions import (
DeviceCannotSupportHalfPrecisionException,
DeviceChangingException,
HalfPrecisionChangingException,
NoModeLoadedException,
NotEnoughDataExtimateF0,
ONNXInputArgumentException,
PipelineNotInitializedException,
VoiceChangerIsNotSelectedException,
)
from voice_changer.utils.VoiceChangerParams import VoiceChangerParams
AudioInOut: TypeAlias = np.ndarray[Any, np.dtype[np.int16]]
def pad_array(arr: AudioInOut, target_length: int):
current_length = arr.shape[0]
if current_length >= target_length:
return arr
else:
pad_width = target_length - current_length
pad_left = pad_width // 2
pad_right = pad_width - pad_left
# padded_arr = np.pad(
# arr, (pad_left, pad_right), "constant", constant_values=(0, 0)
# )
padded_arr = np.pad(arr, (pad_left, pad_right), "edge")
return padded_arr | null |
14,149 | from scipy.interpolate import interp1d
import torch
import numpy as np
import json
import os
def convert_continuos_f0(f0, f0_size):
# 正式版チェックOK
# get start and end of f0
if (f0 == 0).all():
return np.zeros((f0_size,))
start_f0 = f0[f0 != 0][0]
end_f0 = f0[f0 != 0][-1]
# padding start and end of f0 sequence
cf0 = f0
start_idx = np.where(cf0 == start_f0)[0][0]
end_idx = np.where(cf0 == end_f0)[0][-1]
cf0[:start_idx] = start_f0
cf0[end_idx:] = end_f0
# get non-zero frame index
nz_frames = np.where(cf0 != 0)[0]
# perform linear interpolation
f = interp1d(nz_frames, cf0[nz_frames], bounds_error=False, fill_value=0.0)
# print(cf0.shape, cf0_.shape, f0.shape, f0_size)
# print(cf0_)
return f(np.arange(0, f0_size)) | null |
14,150 | from scipy.interpolate import interp1d
import torch
import numpy as np
import json
import os
hann_window = {}
def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False):
# 正式版チェックOK
if torch.min(y) < -1.:
print('min value is ', torch.min(y))
if torch.max(y) > 1.:
print('max value is ', torch.max(y))
dtype_device = str(y.dtype) + '_' + str(y.device)
wnsize_dtype_device = str(win_size) + '_' + dtype_device
if wnsize_dtype_device not in hann_window:
hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)
y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)), mode='reflect')
y = y.squeeze(1)
spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device],
center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=True)
spec = torch.view_as_real(spec)
spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
return spec | null |
14,151 | from scipy.interpolate import interp1d
import torch
import numpy as np
import json
import os
class HParams():
# 正式版チェックOK
def __init__(self, **kwargs):
for k, v in kwargs.items():
if type(v) == dict:
v = HParams(**v)
self[k] = v
def keys(self):
return self.__dict__.keys()
def items(self):
return self.__dict__.items()
def values(self):
return self.__dict__.values()
def __len__(self):
return len(self.__dict__)
def __getitem__(self, key):
return getattr(self, key)
def __setitem__(self, key, value):
return setattr(self, key, value)
def __contains__(self, key):
return key in self.__dict__
def __repr__(self):
return self.__dict__.__repr__()
def get_hparams_from_file(config_path):
# 正式版チェックOK
with open(config_path, "r", encoding="utf-8") as f:
data = f.read()
config = json.loads(data)
hparams = HParams(**config)
return hparams | null |
14,152 | from scipy.interpolate import interp1d
import torch
import numpy as np
import json
import os
def load_checkpoint(checkpoint_path, model, optimizer=None):
# 正式版チェックOK
assert os.path.isfile(checkpoint_path), f"No such file or directory: {checkpoint_path}"
checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
iteration = checkpoint_dict['iteration']
learning_rate = checkpoint_dict['learning_rate']
if optimizer is not None:
optimizer.load_state_dict(checkpoint_dict['optimizer'])
saved_state_dict = {
**checkpoint_dict['pe'],
**checkpoint_dict['flow'],
**checkpoint_dict['text_enc'],
**checkpoint_dict['dec'],
**checkpoint_dict['emb_g']
}
if hasattr(model, 'module'):
state_dict = model.module.state_dict()
else:
state_dict = model.state_dict()
new_state_dict = {}
for k, v in state_dict.items():
try:
new_state_dict[k] = saved_state_dict[k]
except:
new_state_dict[k] = v
if hasattr(model, 'module'):
model.module.load_state_dict(new_state_dict)
else:
model.load_state_dict(new_state_dict)
return model, optimizer, learning_rate, iteration | null |
14,153 | import torch
from torch.nn import ConstantPad1d as pad1d
The provided code snippet includes necessary dependencies for implementing the `pd_indexing` function. Write a Python function `def pd_indexing(x, d, dilation, batch_index, ch_index)` to solve the following problem:
Pitch-dependent indexing of past and future samples. Args: x (Tensor): Input feature map (B, C, T). d (Tensor): Input pitch-dependent dilated factors (B, 1, T). dilation (Int): Dilation size. batch_index (Tensor): Batch index ch_index (Tensor): Channel index Returns: Tensor: Past output tensor (B, out_channels, T) Tensor: Future output tensor (B, out_channels, T)
Here is the function:
def pd_indexing(x, d, dilation, batch_index, ch_index):
"""Pitch-dependent indexing of past and future samples.
Args:
x (Tensor): Input feature map (B, C, T).
d (Tensor): Input pitch-dependent dilated factors (B, 1, T).
dilation (Int): Dilation size.
batch_index (Tensor): Batch index
ch_index (Tensor): Channel index
Returns:
Tensor: Past output tensor (B, out_channels, T)
Tensor: Future output tensor (B, out_channels, T)
"""
(_, _, batch_length) = d.size()
dilations = d * dilation
# get past index
idxP = torch.arange(-batch_length, 0).float()
idxP = idxP.to(x.device)
idxP = torch.add(-dilations, idxP)
idxP = idxP.round().long()
maxP = -((torch.min(idxP) + batch_length))
assert maxP >= 0
idxP = (batch_index, ch_index, idxP)
# padding past tensor
xP = pad1d((maxP, 0), 0)(x)
# get future index
idxF = torch.arange(0, batch_length).float()
idxF = idxF.to(x.device)
idxF = torch.add(dilations, idxF)
idxF = idxF.round().long()
maxF = torch.max(idxF) - (batch_length - 1)
assert maxF >= 0
idxF = (batch_index, ch_index, idxF)
# padding future tensor
xF = pad1d((0, maxF), 0)(x)
return xP[idxP], xF[idxF] | Pitch-dependent indexing of past and future samples. Args: x (Tensor): Input feature map (B, C, T). d (Tensor): Input pitch-dependent dilated factors (B, 1, T). dilation (Int): Dilation size. batch_index (Tensor): Batch index ch_index (Tensor): Channel index Returns: Tensor: Past output tensor (B, out_channels, T) Tensor: Future output tensor (B, out_channels, T) |
14,154 | import torch
from torch.nn import ConstantPad1d as pad1d
The provided code snippet includes necessary dependencies for implementing the `index_initial` function. Write a Python function `def index_initial(n_batch, n_ch, tensor=True)` to solve the following problem:
Tensor batch and channel index initialization. Args: n_batch (Int): Number of batch. n_ch (Int): Number of channel. tensor (bool): Return tensor or numpy array Returns: Tensor: Batch index Tensor: Channel index
Here is the function:
def index_initial(n_batch, n_ch, tensor=True):
"""Tensor batch and channel index initialization.
Args:
n_batch (Int): Number of batch.
n_ch (Int): Number of channel.
tensor (bool): Return tensor or numpy array
Returns:
Tensor: Batch index
Tensor: Channel index
"""
batch_index = []
for i in range(n_batch):
batch_index.append([[i]] * n_ch)
ch_index = []
for i in range(n_ch):
ch_index += [[i]]
ch_index = [ch_index] * n_batch
if tensor:
batch_index = torch.tensor(batch_index)
ch_index = torch.tensor(ch_index)
if torch.cuda.is_available():
batch_index = batch_index.cuda()
ch_index = ch_index.cuda()
return batch_index, ch_index | Tensor batch and channel index initialization. Args: n_batch (Int): Number of batch. n_ch (Int): Number of channel. tensor (bool): Return tensor or numpy array Returns: Tensor: Batch index Tensor: Channel index |
14,159 | import sys
from logging import getLogger
import numpy as np
import torch
from torch.nn.functional import interpolate
The provided code snippet includes necessary dependencies for implementing the `validate_length` function. Write a Python function `def validate_length(xs, ys=None, hop_size=None)` to solve the following problem:
Validate length Args: xs (ndarray): numpy array of features ys (ndarray): numpy array of audios hop_size (int): upsampling factor Returns: (ndarray): length adjusted features
Here is the function:
def validate_length(xs, ys=None, hop_size=None):
"""Validate length
Args:
xs (ndarray): numpy array of features
ys (ndarray): numpy array of audios
hop_size (int): upsampling factor
Returns:
(ndarray): length adjusted features
"""
min_len_x = min([x.shape[0] for x in xs])
if ys is not None:
min_len_y = min([y.shape[0] for y in ys])
if min_len_y < min_len_x * hop_size:
min_len_x = min_len_y // hop_size
if min_len_y > min_len_x * hop_size:
min_len_y = min_len_x * hop_size
ys = [y[:min_len_y] for y in ys]
xs = [x[:min_len_x] for x in xs]
return xs + ys if ys is not None else xs | Validate length Args: xs (ndarray): numpy array of features ys (ndarray): numpy array of audios hop_size (int): upsampling factor Returns: (ndarray): length adjusted features |
14,160 | import sys
from logging import getLogger
import numpy as np
import torch
from torch.nn.functional import interpolate
The provided code snippet includes necessary dependencies for implementing the `dilated_factor` function. Write a Python function `def dilated_factor(batch_f0, fs, dense_factor)` to solve the following problem:
Pitch-dependent dilated factor Args: batch_f0 (ndarray): the f0 sequence (T) fs (int): sampling rate dense_factor (int): the number of taps in one cycle Return: dilated_factors(np array): float array of the pitch-dependent dilated factors (T)
Here is the function:
def dilated_factor(batch_f0, fs, dense_factor):
"""Pitch-dependent dilated factor
Args:
batch_f0 (ndarray): the f0 sequence (T)
fs (int): sampling rate
dense_factor (int): the number of taps in one cycle
Return:
dilated_factors(np array):
float array of the pitch-dependent dilated factors (T)
"""
batch_f0[batch_f0 == 0] = fs / dense_factor
dilated_factors = torch.ones_like(batch_f0) * fs / dense_factor / batch_f0
# assert np.all(dilated_factors > 0)
return dilated_factors | Pitch-dependent dilated factor Args: batch_f0 (ndarray): the f0 sequence (T) fs (int): sampling rate dense_factor (int): the number of taps in one cycle Return: dilated_factors(np array): float array of the pitch-dependent dilated factors (T) |
14,161 | import numpy as np
import torch
import torch.nn as nn
def upsample(signal: torch.Tensor, factor: int) -> torch.Tensor:
signal = signal.permute(0, 2, 1)
signal = nn.functional.interpolate(torch.cat((signal, signal[:, :, -1:]), 2), size=signal.shape[-1] * factor + 1, mode='linear', align_corners=True)
signal = signal[:, :, :-1]
return signal.permute(0, 2, 1) | null |
14,162 | import json
import os
import sys
from concurrent.futures import ThreadPoolExecutor
from typing import Any, Tuple
from const import RVCSampleMode, getSampleJsonAndModelIds
from data.ModelSample import ModelSamples, generateModelSample
from data.ModelSlot import DiffusionSVCModelSlot, ModelSlot, RVCModelSlot
from mods.log_control import VoiceChangaerLogger
from voice_changer.ModelSlotManager import ModelSlotManager
from voice_changer.RVC.RVCModelSlotGenerator import RVCModelSlotGenerator
from downloader.Downloader import download, download_no_tqdm
logger = VoiceChangaerLogger.get_instance().getLogger()
def _downloadSampleJsons(sampleJsonUrls: list[str]):
sampleJsons = []
for url in sampleJsonUrls:
filename = os.path.basename(url)
download_no_tqdm({"url": url, "saveTo": filename, "position": 0})
sampleJsons.append(filename)
return sampleJsons
def _generateSampleList(sampleJsons: list[str]):
samples: list[ModelSamples] = []
for file in sampleJsons:
with open(file, "r", encoding="utf-8") as f:
jsonDict = json.load(f)
for vcType in jsonDict:
for sampleParams in jsonDict[vcType]:
sample = generateModelSample(sampleParams)
samples.append(sample)
return samples
def _downloadSamples(samples: list[ModelSamples], sampleModelIds: list[Tuple[str, Any]], model_dir: str, slotIndex: list[int], withoutTqdm=False):
downloadParams = []
line_num = 0
modelSlotManager = ModelSlotManager.get_instance(model_dir)
for i, initSampleId in enumerate(sampleModelIds):
targetSampleId = initSampleId[0]
targetSampleParams = initSampleId[1]
targetSlotIndex = slotIndex[i]
# 初期サンプルをサーチ
match = False
for sample in samples:
if sample.id == targetSampleId:
match = True
break
if match is False:
logger.warn(f"[Voice Changer] initiail sample not found. {targetSampleId}")
continue
# 検出されたら、、、
slotDir = os.path.join(model_dir, str(targetSlotIndex))
slotInfo: ModelSlot = ModelSlot()
if sample.voiceChangerType == "RVC":
slotInfo: RVCModelSlot = RVCModelSlot()
os.makedirs(slotDir, exist_ok=True)
modelFilePath = os.path.join(
slotDir,
os.path.basename(sample.modelUrl),
)
downloadParams.append(
{
"url": sample.modelUrl,
"saveTo": modelFilePath,
"position": line_num,
}
)
slotInfo.modelFile = os.path.basename(sample.modelUrl)
line_num += 1
if targetSampleParams["useIndex"] is True and hasattr(sample, "indexUrl") and sample.indexUrl != "":
indexPath = os.path.join(
slotDir,
os.path.basename(sample.indexUrl),
)
downloadParams.append(
{
"url": sample.indexUrl,
"saveTo": indexPath,
"position": line_num,
}
)
slotInfo.indexFile = os.path.basename(sample.indexUrl)
line_num += 1
if hasattr(sample, "icon") and sample.icon != "":
iconPath = os.path.join(
slotDir,
os.path.basename(sample.icon),
)
downloadParams.append(
{
"url": sample.icon,
"saveTo": iconPath,
"position": line_num,
}
)
slotInfo.iconFile = os.path.basename(sample.icon)
line_num += 1
slotInfo.sampleId = sample.id
slotInfo.credit = sample.credit
slotInfo.description = sample.description
slotInfo.name = sample.name
slotInfo.termsOfUseUrl = sample.termsOfUseUrl
slotInfo.defaultTune = 0
slotInfo.defaultIndexRatio = 0
slotInfo.defaultProtect = 0.5
slotInfo.isONNX = slotInfo.modelFile.endswith(".onnx")
modelSlotManager.save_model_slot(targetSlotIndex, slotInfo)
elif sample.voiceChangerType == "Diffusion-SVC":
if sys.platform.startswith("darwin") is True:
continue
slotInfo: DiffusionSVCModelSlot = DiffusionSVCModelSlot()
os.makedirs(slotDir, exist_ok=True)
modelFilePath = os.path.join(
slotDir,
os.path.basename(sample.modelUrl),
)
downloadParams.append(
{
"url": sample.modelUrl,
"saveTo": modelFilePath,
"position": line_num,
}
)
slotInfo.modelFile = os.path.basename(sample.modelUrl)
line_num += 1
if hasattr(sample, "icon") and sample.icon != "":
iconPath = os.path.join(
slotDir,
os.path.basename(sample.icon),
)
downloadParams.append(
{
"url": sample.icon,
"saveTo": iconPath,
"position": line_num,
}
)
slotInfo.iconFile = os.path.basename(sample.icon)
line_num += 1
slotInfo.sampleId = sample.id
slotInfo.credit = sample.credit
slotInfo.description = sample.description
slotInfo.name = sample.name
slotInfo.termsOfUseUrl = sample.termsOfUseUrl
slotInfo.defaultTune = 0
slotInfo.defaultKstep = 0
slotInfo.defaultSpeedup = 0
slotInfo.kStepMax = 0
slotInfo.isONNX = slotInfo.modelFile.endswith(".onnx")
modelSlotManager.save_model_slot(targetSlotIndex, slotInfo)
else:
logger.warn(f"[Voice Changer] {sample.voiceChangerType} is not supported.")
# ダウンロード
logger.info("[Voice Changer] Downloading model files...")
if withoutTqdm:
with ThreadPoolExecutor() as pool:
pool.map(download_no_tqdm, downloadParams)
else:
with ThreadPoolExecutor() as pool:
pool.map(download, downloadParams)
# メタデータ作成
logger.info("[Voice Changer] Generating metadata...")
for targetSlotIndex in slotIndex:
slotInfo = modelSlotManager.get_slot_info(targetSlotIndex)
modelPath = os.path.join(model_dir, str(slotInfo.slotIndex), os.path.basename(slotInfo.modelFile))
if slotInfo.voiceChangerType == "RVC":
if slotInfo.isONNX:
slotInfo = RVCModelSlotGenerator._setInfoByONNX(modelPath, slotInfo)
else:
slotInfo = RVCModelSlotGenerator._setInfoByPytorch(modelPath, slotInfo)
modelSlotManager.save_model_slot(targetSlotIndex, slotInfo)
elif slotInfo.voiceChangerType == "Diffusion-SVC":
if sys.platform.startswith("darwin") is False:
from voice_changer.DiffusionSVC.DiffusionSVCModelSlotGenerator import DiffusionSVCModelSlotGenerator
if slotInfo.isONNX:
pass
else:
slotInfo = DiffusionSVCModelSlotGenerator._setInfoByPytorch(slotInfo)
modelSlotManager.save_model_slot(targetSlotIndex, slotInfo)
RVCSampleMode: TypeAlias = Literal[
"production",
"testOfficial",
"testDDPNTorch",
"testDDPNONNX",
"testONNXFull",
]
def getSampleJsonAndModelIds(mode: RVCSampleMode):
if mode == "production":
return [
"https://huggingface.co/wok000/vcclient_model/raw/main/samples_0004_t.json",
"https://huggingface.co/wok000/vcclient_model/raw/main/samples_0004_o.json",
"https://huggingface.co/wok000/vcclient_model/raw/main/samples_0004_d.json",
], [
("Tsukuyomi-chan_o", {"useIndex": False}),
("Amitaro_o", {"useIndex": False}),
("KikotoMahiro_o", {"useIndex": False}),
("TokinaShigure_o", {"useIndex": False}),
]
elif mode == "testAll":
return [
"https://huggingface.co/wok000/vcclient_model/raw/main/test/test_official_v1_v2.json",
"https://huggingface.co/wok000/vcclient_model/raw/main/test/test_ddpn_v1_v2.json",
], [
("test-official-v1-f0-48k-l9-hubert_t", {"useIndex": True}),
("test-official-v1-nof0-48k-l9-hubert_t", {"useIndex": False}),
("test-official-v2-f0-40k-l12-hubert_t", {"useIndex": False}),
("test-official-v2-nof0-40k-l12-hubert_t", {"useIndex": False}),
("test-official-v1-f0-48k-l9-hubert_o", {"useIndex": True}),
("test-official-v1-nof0-48k-l9-hubert_o", {"useIndex": False}),
("test-official-v2-f0-40k-l12-hubert_o", {"useIndex": False}),
("test-official-v2-nof0-40k-l12-hubert_o", {"useIndex": False}),
("test-ddpn-v1-f0-48k-l9-hubert_t", {"useIndex": False}),
("test-ddpn-v1-nof0-48k-l9-hubert_t", {"useIndex": False}),
("test-ddpn-v2-f0-40k-l12-hubert_t", {"useIndex": False}),
("test-ddpn-v2-nof0-40k-l12-hubert_t", {"useIndex": False}),
("test-ddpn-v2-f0-40k-l12-hubert_jp_t", {"useIndex": False}),
("test-ddpn-v2-nof0-40k-l12-hubert_jp_t", {"useIndex": False}),
("test-ddpn-v1-f0-48k-l9-hubert_o", {"useIndex": False}),
("test-ddpn-v1-nof0-48k-l9-hubert_o", {"useIndex": False}),
("test-ddpn-v2-f0-40k-l12-hubert_o", {"useIndex": False}),
("test-ddpn-v2-nof0-40k-l12-hubert_o", {"useIndex": False}),
("test-ddpn-v2-f0-40k-l12-hubert_jp_o", {"useIndex": False}),
("test-ddpn-v2-nof0-40k-l12-hubert_jp_o", {"useIndex": False}),
("test-official-v1-f0-48k-l9-hubert_o_full", {"useIndex": False}),
("test-official-v1-nof0-48k-l9-hubert_o_full", {"useIndex": False}),
("test-official-v2-f0-40k-l12-hubert_o_full", {"useIndex": False}),
("test-official-v2-nof0-40k-l12-hubert_o_full", {"useIndex": False}),
("test-ddpn-v1-f0-48k-l9-hubert_o_full", {"useIndex": False}),
("test-ddpn-v1-nof0-48k-l9-hubert_o_full", {"useIndex": False}),
("test-ddpn-v2-f0-40k-l12-hubert_o_full", {"useIndex": False}),
("test-ddpn-v2-nof0-40k-l12-hubert_o_full", {"useIndex": False}),
("test-ddpn-v2-f0-40k-l12-hubert_jp_o_full", {"useIndex": False}),
("test-ddpn-v2-nof0-40k-l12-hubert_jp_o_full", {"useIndex": False}),
]
elif mode == "testOfficial":
return [
"https://huggingface.co/wok000/vcclient_model/raw/main/test/test_official_v1_v2.json",
"https://huggingface.co/wok000/vcclient_model/raw/main/test/test_ddpn_v1_v2.json",
], [
("test-official-v1-f0-48k-l9-hubert_t", {"useIndex": True}),
("test-official-v1-nof0-48k-l9-hubert_t", {"useIndex": False}),
("test-official-v2-f0-40k-l12-hubert_t", {"useIndex": False}),
("test-official-v2-nof0-40k-l12-hubert_t", {"useIndex": False}),
("test-official-v1-f0-48k-l9-hubert_o", {"useIndex": True}),
("test-official-v1-nof0-48k-l9-hubert_o", {"useIndex": False}),
("test-official-v2-f0-40k-l12-hubert_o", {"useIndex": False}),
("test-official-v2-nof0-40k-l12-hubert_o", {"useIndex": False}),
]
elif mode == "testDDPNTorch":
return [
"https://huggingface.co/wok000/vcclient_model/raw/main/test/test_official_v1_v2.json",
"https://huggingface.co/wok000/vcclient_model/raw/main/test/test_ddpn_v1_v2.json",
], [
("test-ddpn-v1-f0-48k-l9-hubert_t", {"useIndex": False}),
("test-ddpn-v1-nof0-48k-l9-hubert_t", {"useIndex": False}),
("test-ddpn-v2-f0-40k-l12-hubert_t", {"useIndex": False}),
("test-ddpn-v2-nof0-40k-l12-hubert_t", {"useIndex": False}),
("test-ddpn-v2-f0-40k-l12-hubert_jp_t", {"useIndex": False}),
("test-ddpn-v2-nof0-40k-l12-hubert_jp_t", {"useIndex": False}),
]
elif mode == "testDDPNONNX":
return [
"https://huggingface.co/wok000/vcclient_model/raw/main/test/test_official_v1_v2.json",
"https://huggingface.co/wok000/vcclient_model/raw/main/test/test_ddpn_v1_v2.json",
], [
("test-ddpn-v1-f0-48k-l9-hubert_o", {"useIndex": False}),
("test-ddpn-v1-nof0-48k-l9-hubert_o", {"useIndex": False}),
("test-ddpn-v2-f0-40k-l12-hubert_o", {"useIndex": False}),
("test-ddpn-v2-nof0-40k-l12-hubert_o", {"useIndex": False}),
("test-ddpn-v2-f0-40k-l12-hubert_jp_o", {"useIndex": False}),
("test-ddpn-v2-nof0-40k-l12-hubert_jp_o", {"useIndex": False}),
]
elif mode == "testONNXFull":
return [
"https://huggingface.co/wok000/vcclient_model/raw/main/test/test_official_v1_v2.json",
"https://huggingface.co/wok000/vcclient_model/raw/main/test/test_ddpn_v1_v2.json",
], [
("test-official-v1-f0-48k-l9-hubert_o_full", {"useIndex": False}),
("test-official-v1-nof0-48k-l9-hubert_o_full", {"useIndex": False}),
("test-official-v2-f0-40k-l12-hubert_o_full", {"useIndex": False}),
("test-official-v2-nof0-40k-l12-hubert_o_full", {"useIndex": False}),
("test-ddpn-v1-f0-48k-l9-hubert_o_full", {"useIndex": False}),
("test-ddpn-v1-nof0-48k-l9-hubert_o_full", {"useIndex": False}),
("test-ddpn-v2-f0-40k-l12-hubert_o_full", {"useIndex": False}),
("test-ddpn-v2-nof0-40k-l12-hubert_o_full", {"useIndex": False}),
("test-ddpn-v2-f0-40k-l12-hubert_jp_o_full", {"useIndex": False}),
("test-ddpn-v2-nof0-40k-l12-hubert_jp_o_full", {"useIndex": False}),
]
else:
return [], []
def downloadInitialSamples(mode: RVCSampleMode, model_dir: str):
sampleJsonUrls, sampleModels = getSampleJsonAndModelIds(mode)
sampleJsons = _downloadSampleJsons(sampleJsonUrls)
if os.path.exists(model_dir):
logger.info("[Voice Changer] model_dir is already exists. skip download samples.")
return
samples = _generateSampleList(sampleJsons)
slotIndex = list(range(len(sampleModels)))
_downloadSamples(samples, sampleModels, model_dir, slotIndex)
pass | null |
14,163 | import json
import os
import sys
from concurrent.futures import ThreadPoolExecutor
from typing import Any, Tuple
from const import RVCSampleMode, getSampleJsonAndModelIds
from data.ModelSample import ModelSamples, generateModelSample
from data.ModelSlot import DiffusionSVCModelSlot, ModelSlot, RVCModelSlot
from mods.log_control import VoiceChangaerLogger
from voice_changer.ModelSlotManager import ModelSlotManager
from voice_changer.RVC.RVCModelSlotGenerator import RVCModelSlotGenerator
from downloader.Downloader import download, download_no_tqdm
def _generateSampleJsons(sampleJsonUrls: list[str]):
sampleJsons = []
for url in sampleJsonUrls:
filename = os.path.basename(url)
sampleJsons.append(filename)
return sampleJsons
def _generateSampleList(sampleJsons: list[str]):
samples: list[ModelSamples] = []
for file in sampleJsons:
with open(file, "r", encoding="utf-8") as f:
jsonDict = json.load(f)
for vcType in jsonDict:
for sampleParams in jsonDict[vcType]:
sample = generateModelSample(sampleParams)
samples.append(sample)
return samples
def _downloadSamples(samples: list[ModelSamples], sampleModelIds: list[Tuple[str, Any]], model_dir: str, slotIndex: list[int], withoutTqdm=False):
downloadParams = []
line_num = 0
modelSlotManager = ModelSlotManager.get_instance(model_dir)
for i, initSampleId in enumerate(sampleModelIds):
targetSampleId = initSampleId[0]
targetSampleParams = initSampleId[1]
targetSlotIndex = slotIndex[i]
# 初期サンプルをサーチ
match = False
for sample in samples:
if sample.id == targetSampleId:
match = True
break
if match is False:
logger.warn(f"[Voice Changer] initiail sample not found. {targetSampleId}")
continue
# 検出されたら、、、
slotDir = os.path.join(model_dir, str(targetSlotIndex))
slotInfo: ModelSlot = ModelSlot()
if sample.voiceChangerType == "RVC":
slotInfo: RVCModelSlot = RVCModelSlot()
os.makedirs(slotDir, exist_ok=True)
modelFilePath = os.path.join(
slotDir,
os.path.basename(sample.modelUrl),
)
downloadParams.append(
{
"url": sample.modelUrl,
"saveTo": modelFilePath,
"position": line_num,
}
)
slotInfo.modelFile = os.path.basename(sample.modelUrl)
line_num += 1
if targetSampleParams["useIndex"] is True and hasattr(sample, "indexUrl") and sample.indexUrl != "":
indexPath = os.path.join(
slotDir,
os.path.basename(sample.indexUrl),
)
downloadParams.append(
{
"url": sample.indexUrl,
"saveTo": indexPath,
"position": line_num,
}
)
slotInfo.indexFile = os.path.basename(sample.indexUrl)
line_num += 1
if hasattr(sample, "icon") and sample.icon != "":
iconPath = os.path.join(
slotDir,
os.path.basename(sample.icon),
)
downloadParams.append(
{
"url": sample.icon,
"saveTo": iconPath,
"position": line_num,
}
)
slotInfo.iconFile = os.path.basename(sample.icon)
line_num += 1
slotInfo.sampleId = sample.id
slotInfo.credit = sample.credit
slotInfo.description = sample.description
slotInfo.name = sample.name
slotInfo.termsOfUseUrl = sample.termsOfUseUrl
slotInfo.defaultTune = 0
slotInfo.defaultIndexRatio = 0
slotInfo.defaultProtect = 0.5
slotInfo.isONNX = slotInfo.modelFile.endswith(".onnx")
modelSlotManager.save_model_slot(targetSlotIndex, slotInfo)
elif sample.voiceChangerType == "Diffusion-SVC":
if sys.platform.startswith("darwin") is True:
continue
slotInfo: DiffusionSVCModelSlot = DiffusionSVCModelSlot()
os.makedirs(slotDir, exist_ok=True)
modelFilePath = os.path.join(
slotDir,
os.path.basename(sample.modelUrl),
)
downloadParams.append(
{
"url": sample.modelUrl,
"saveTo": modelFilePath,
"position": line_num,
}
)
slotInfo.modelFile = os.path.basename(sample.modelUrl)
line_num += 1
if hasattr(sample, "icon") and sample.icon != "":
iconPath = os.path.join(
slotDir,
os.path.basename(sample.icon),
)
downloadParams.append(
{
"url": sample.icon,
"saveTo": iconPath,
"position": line_num,
}
)
slotInfo.iconFile = os.path.basename(sample.icon)
line_num += 1
slotInfo.sampleId = sample.id
slotInfo.credit = sample.credit
slotInfo.description = sample.description
slotInfo.name = sample.name
slotInfo.termsOfUseUrl = sample.termsOfUseUrl
slotInfo.defaultTune = 0
slotInfo.defaultKstep = 0
slotInfo.defaultSpeedup = 0
slotInfo.kStepMax = 0
slotInfo.isONNX = slotInfo.modelFile.endswith(".onnx")
modelSlotManager.save_model_slot(targetSlotIndex, slotInfo)
else:
logger.warn(f"[Voice Changer] {sample.voiceChangerType} is not supported.")
# ダウンロード
logger.info("[Voice Changer] Downloading model files...")
if withoutTqdm:
with ThreadPoolExecutor() as pool:
pool.map(download_no_tqdm, downloadParams)
else:
with ThreadPoolExecutor() as pool:
pool.map(download, downloadParams)
# メタデータ作成
logger.info("[Voice Changer] Generating metadata...")
for targetSlotIndex in slotIndex:
slotInfo = modelSlotManager.get_slot_info(targetSlotIndex)
modelPath = os.path.join(model_dir, str(slotInfo.slotIndex), os.path.basename(slotInfo.modelFile))
if slotInfo.voiceChangerType == "RVC":
if slotInfo.isONNX:
slotInfo = RVCModelSlotGenerator._setInfoByONNX(modelPath, slotInfo)
else:
slotInfo = RVCModelSlotGenerator._setInfoByPytorch(modelPath, slotInfo)
modelSlotManager.save_model_slot(targetSlotIndex, slotInfo)
elif slotInfo.voiceChangerType == "Diffusion-SVC":
if sys.platform.startswith("darwin") is False:
from voice_changer.DiffusionSVC.DiffusionSVCModelSlotGenerator import DiffusionSVCModelSlotGenerator
if slotInfo.isONNX:
pass
else:
slotInfo = DiffusionSVCModelSlotGenerator._setInfoByPytorch(slotInfo)
modelSlotManager.save_model_slot(targetSlotIndex, slotInfo)
RVCSampleMode: TypeAlias = Literal[
"production",
"testOfficial",
"testDDPNTorch",
"testDDPNONNX",
"testONNXFull",
]
def getSampleJsonAndModelIds(mode: RVCSampleMode):
if mode == "production":
return [
"https://huggingface.co/wok000/vcclient_model/raw/main/samples_0004_t.json",
"https://huggingface.co/wok000/vcclient_model/raw/main/samples_0004_o.json",
"https://huggingface.co/wok000/vcclient_model/raw/main/samples_0004_d.json",
], [
("Tsukuyomi-chan_o", {"useIndex": False}),
("Amitaro_o", {"useIndex": False}),
("KikotoMahiro_o", {"useIndex": False}),
("TokinaShigure_o", {"useIndex": False}),
]
elif mode == "testAll":
return [
"https://huggingface.co/wok000/vcclient_model/raw/main/test/test_official_v1_v2.json",
"https://huggingface.co/wok000/vcclient_model/raw/main/test/test_ddpn_v1_v2.json",
], [
("test-official-v1-f0-48k-l9-hubert_t", {"useIndex": True}),
("test-official-v1-nof0-48k-l9-hubert_t", {"useIndex": False}),
("test-official-v2-f0-40k-l12-hubert_t", {"useIndex": False}),
("test-official-v2-nof0-40k-l12-hubert_t", {"useIndex": False}),
("test-official-v1-f0-48k-l9-hubert_o", {"useIndex": True}),
("test-official-v1-nof0-48k-l9-hubert_o", {"useIndex": False}),
("test-official-v2-f0-40k-l12-hubert_o", {"useIndex": False}),
("test-official-v2-nof0-40k-l12-hubert_o", {"useIndex": False}),
("test-ddpn-v1-f0-48k-l9-hubert_t", {"useIndex": False}),
("test-ddpn-v1-nof0-48k-l9-hubert_t", {"useIndex": False}),
("test-ddpn-v2-f0-40k-l12-hubert_t", {"useIndex": False}),
("test-ddpn-v2-nof0-40k-l12-hubert_t", {"useIndex": False}),
("test-ddpn-v2-f0-40k-l12-hubert_jp_t", {"useIndex": False}),
("test-ddpn-v2-nof0-40k-l12-hubert_jp_t", {"useIndex": False}),
("test-ddpn-v1-f0-48k-l9-hubert_o", {"useIndex": False}),
("test-ddpn-v1-nof0-48k-l9-hubert_o", {"useIndex": False}),
("test-ddpn-v2-f0-40k-l12-hubert_o", {"useIndex": False}),
("test-ddpn-v2-nof0-40k-l12-hubert_o", {"useIndex": False}),
("test-ddpn-v2-f0-40k-l12-hubert_jp_o", {"useIndex": False}),
("test-ddpn-v2-nof0-40k-l12-hubert_jp_o", {"useIndex": False}),
("test-official-v1-f0-48k-l9-hubert_o_full", {"useIndex": False}),
("test-official-v1-nof0-48k-l9-hubert_o_full", {"useIndex": False}),
("test-official-v2-f0-40k-l12-hubert_o_full", {"useIndex": False}),
("test-official-v2-nof0-40k-l12-hubert_o_full", {"useIndex": False}),
("test-ddpn-v1-f0-48k-l9-hubert_o_full", {"useIndex": False}),
("test-ddpn-v1-nof0-48k-l9-hubert_o_full", {"useIndex": False}),
("test-ddpn-v2-f0-40k-l12-hubert_o_full", {"useIndex": False}),
("test-ddpn-v2-nof0-40k-l12-hubert_o_full", {"useIndex": False}),
("test-ddpn-v2-f0-40k-l12-hubert_jp_o_full", {"useIndex": False}),
("test-ddpn-v2-nof0-40k-l12-hubert_jp_o_full", {"useIndex": False}),
]
elif mode == "testOfficial":
return [
"https://huggingface.co/wok000/vcclient_model/raw/main/test/test_official_v1_v2.json",
"https://huggingface.co/wok000/vcclient_model/raw/main/test/test_ddpn_v1_v2.json",
], [
("test-official-v1-f0-48k-l9-hubert_t", {"useIndex": True}),
("test-official-v1-nof0-48k-l9-hubert_t", {"useIndex": False}),
("test-official-v2-f0-40k-l12-hubert_t", {"useIndex": False}),
("test-official-v2-nof0-40k-l12-hubert_t", {"useIndex": False}),
("test-official-v1-f0-48k-l9-hubert_o", {"useIndex": True}),
("test-official-v1-nof0-48k-l9-hubert_o", {"useIndex": False}),
("test-official-v2-f0-40k-l12-hubert_o", {"useIndex": False}),
("test-official-v2-nof0-40k-l12-hubert_o", {"useIndex": False}),
]
elif mode == "testDDPNTorch":
return [
"https://huggingface.co/wok000/vcclient_model/raw/main/test/test_official_v1_v2.json",
"https://huggingface.co/wok000/vcclient_model/raw/main/test/test_ddpn_v1_v2.json",
], [
("test-ddpn-v1-f0-48k-l9-hubert_t", {"useIndex": False}),
("test-ddpn-v1-nof0-48k-l9-hubert_t", {"useIndex": False}),
("test-ddpn-v2-f0-40k-l12-hubert_t", {"useIndex": False}),
("test-ddpn-v2-nof0-40k-l12-hubert_t", {"useIndex": False}),
("test-ddpn-v2-f0-40k-l12-hubert_jp_t", {"useIndex": False}),
("test-ddpn-v2-nof0-40k-l12-hubert_jp_t", {"useIndex": False}),
]
elif mode == "testDDPNONNX":
return [
"https://huggingface.co/wok000/vcclient_model/raw/main/test/test_official_v1_v2.json",
"https://huggingface.co/wok000/vcclient_model/raw/main/test/test_ddpn_v1_v2.json",
], [
("test-ddpn-v1-f0-48k-l9-hubert_o", {"useIndex": False}),
("test-ddpn-v1-nof0-48k-l9-hubert_o", {"useIndex": False}),
("test-ddpn-v2-f0-40k-l12-hubert_o", {"useIndex": False}),
("test-ddpn-v2-nof0-40k-l12-hubert_o", {"useIndex": False}),
("test-ddpn-v2-f0-40k-l12-hubert_jp_o", {"useIndex": False}),
("test-ddpn-v2-nof0-40k-l12-hubert_jp_o", {"useIndex": False}),
]
elif mode == "testONNXFull":
return [
"https://huggingface.co/wok000/vcclient_model/raw/main/test/test_official_v1_v2.json",
"https://huggingface.co/wok000/vcclient_model/raw/main/test/test_ddpn_v1_v2.json",
], [
("test-official-v1-f0-48k-l9-hubert_o_full", {"useIndex": False}),
("test-official-v1-nof0-48k-l9-hubert_o_full", {"useIndex": False}),
("test-official-v2-f0-40k-l12-hubert_o_full", {"useIndex": False}),
("test-official-v2-nof0-40k-l12-hubert_o_full", {"useIndex": False}),
("test-ddpn-v1-f0-48k-l9-hubert_o_full", {"useIndex": False}),
("test-ddpn-v1-nof0-48k-l9-hubert_o_full", {"useIndex": False}),
("test-ddpn-v2-f0-40k-l12-hubert_o_full", {"useIndex": False}),
("test-ddpn-v2-nof0-40k-l12-hubert_o_full", {"useIndex": False}),
("test-ddpn-v2-f0-40k-l12-hubert_jp_o_full", {"useIndex": False}),
("test-ddpn-v2-nof0-40k-l12-hubert_jp_o_full", {"useIndex": False}),
]
else:
return [], []
def downloadSample(mode: RVCSampleMode, modelId: str, model_dir: str, slotIndex: int, params: Any):
sampleJsonUrls, _sampleModels = getSampleJsonAndModelIds(mode)
sampleJsons = _generateSampleJsons(sampleJsonUrls)
samples = _generateSampleList(sampleJsons)
_downloadSamples(samples, [(modelId, params)], model_dir, [slotIndex], withoutTqdm=True)
pass | null |
14,164 | import json
import os
import sys
from concurrent.futures import ThreadPoolExecutor
from typing import Any, Tuple
from const import RVCSampleMode, getSampleJsonAndModelIds
from data.ModelSample import ModelSamples, generateModelSample
from data.ModelSlot import DiffusionSVCModelSlot, ModelSlot, RVCModelSlot
from mods.log_control import VoiceChangaerLogger
from voice_changer.ModelSlotManager import ModelSlotManager
from voice_changer.RVC.RVCModelSlotGenerator import RVCModelSlotGenerator
from downloader.Downloader import download, download_no_tqdm
def _generateSampleJsons(sampleJsonUrls: list[str]):
sampleJsons = []
for url in sampleJsonUrls:
filename = os.path.basename(url)
sampleJsons.append(filename)
return sampleJsons
def _generateSampleList(sampleJsons: list[str]):
samples: list[ModelSamples] = []
for file in sampleJsons:
with open(file, "r", encoding="utf-8") as f:
jsonDict = json.load(f)
for vcType in jsonDict:
for sampleParams in jsonDict[vcType]:
sample = generateModelSample(sampleParams)
samples.append(sample)
return samples
RVCSampleMode: TypeAlias = Literal[
"production",
"testOfficial",
"testDDPNTorch",
"testDDPNONNX",
"testONNXFull",
]
def getSampleJsonAndModelIds(mode: RVCSampleMode):
if mode == "production":
return [
"https://huggingface.co/wok000/vcclient_model/raw/main/samples_0004_t.json",
"https://huggingface.co/wok000/vcclient_model/raw/main/samples_0004_o.json",
"https://huggingface.co/wok000/vcclient_model/raw/main/samples_0004_d.json",
], [
("Tsukuyomi-chan_o", {"useIndex": False}),
("Amitaro_o", {"useIndex": False}),
("KikotoMahiro_o", {"useIndex": False}),
("TokinaShigure_o", {"useIndex": False}),
]
elif mode == "testAll":
return [
"https://huggingface.co/wok000/vcclient_model/raw/main/test/test_official_v1_v2.json",
"https://huggingface.co/wok000/vcclient_model/raw/main/test/test_ddpn_v1_v2.json",
], [
("test-official-v1-f0-48k-l9-hubert_t", {"useIndex": True}),
("test-official-v1-nof0-48k-l9-hubert_t", {"useIndex": False}),
("test-official-v2-f0-40k-l12-hubert_t", {"useIndex": False}),
("test-official-v2-nof0-40k-l12-hubert_t", {"useIndex": False}),
("test-official-v1-f0-48k-l9-hubert_o", {"useIndex": True}),
("test-official-v1-nof0-48k-l9-hubert_o", {"useIndex": False}),
("test-official-v2-f0-40k-l12-hubert_o", {"useIndex": False}),
("test-official-v2-nof0-40k-l12-hubert_o", {"useIndex": False}),
("test-ddpn-v1-f0-48k-l9-hubert_t", {"useIndex": False}),
("test-ddpn-v1-nof0-48k-l9-hubert_t", {"useIndex": False}),
("test-ddpn-v2-f0-40k-l12-hubert_t", {"useIndex": False}),
("test-ddpn-v2-nof0-40k-l12-hubert_t", {"useIndex": False}),
("test-ddpn-v2-f0-40k-l12-hubert_jp_t", {"useIndex": False}),
("test-ddpn-v2-nof0-40k-l12-hubert_jp_t", {"useIndex": False}),
("test-ddpn-v1-f0-48k-l9-hubert_o", {"useIndex": False}),
("test-ddpn-v1-nof0-48k-l9-hubert_o", {"useIndex": False}),
("test-ddpn-v2-f0-40k-l12-hubert_o", {"useIndex": False}),
("test-ddpn-v2-nof0-40k-l12-hubert_o", {"useIndex": False}),
("test-ddpn-v2-f0-40k-l12-hubert_jp_o", {"useIndex": False}),
("test-ddpn-v2-nof0-40k-l12-hubert_jp_o", {"useIndex": False}),
("test-official-v1-f0-48k-l9-hubert_o_full", {"useIndex": False}),
("test-official-v1-nof0-48k-l9-hubert_o_full", {"useIndex": False}),
("test-official-v2-f0-40k-l12-hubert_o_full", {"useIndex": False}),
("test-official-v2-nof0-40k-l12-hubert_o_full", {"useIndex": False}),
("test-ddpn-v1-f0-48k-l9-hubert_o_full", {"useIndex": False}),
("test-ddpn-v1-nof0-48k-l9-hubert_o_full", {"useIndex": False}),
("test-ddpn-v2-f0-40k-l12-hubert_o_full", {"useIndex": False}),
("test-ddpn-v2-nof0-40k-l12-hubert_o_full", {"useIndex": False}),
("test-ddpn-v2-f0-40k-l12-hubert_jp_o_full", {"useIndex": False}),
("test-ddpn-v2-nof0-40k-l12-hubert_jp_o_full", {"useIndex": False}),
]
elif mode == "testOfficial":
return [
"https://huggingface.co/wok000/vcclient_model/raw/main/test/test_official_v1_v2.json",
"https://huggingface.co/wok000/vcclient_model/raw/main/test/test_ddpn_v1_v2.json",
], [
("test-official-v1-f0-48k-l9-hubert_t", {"useIndex": True}),
("test-official-v1-nof0-48k-l9-hubert_t", {"useIndex": False}),
("test-official-v2-f0-40k-l12-hubert_t", {"useIndex": False}),
("test-official-v2-nof0-40k-l12-hubert_t", {"useIndex": False}),
("test-official-v1-f0-48k-l9-hubert_o", {"useIndex": True}),
("test-official-v1-nof0-48k-l9-hubert_o", {"useIndex": False}),
("test-official-v2-f0-40k-l12-hubert_o", {"useIndex": False}),
("test-official-v2-nof0-40k-l12-hubert_o", {"useIndex": False}),
]
elif mode == "testDDPNTorch":
return [
"https://huggingface.co/wok000/vcclient_model/raw/main/test/test_official_v1_v2.json",
"https://huggingface.co/wok000/vcclient_model/raw/main/test/test_ddpn_v1_v2.json",
], [
("test-ddpn-v1-f0-48k-l9-hubert_t", {"useIndex": False}),
("test-ddpn-v1-nof0-48k-l9-hubert_t", {"useIndex": False}),
("test-ddpn-v2-f0-40k-l12-hubert_t", {"useIndex": False}),
("test-ddpn-v2-nof0-40k-l12-hubert_t", {"useIndex": False}),
("test-ddpn-v2-f0-40k-l12-hubert_jp_t", {"useIndex": False}),
("test-ddpn-v2-nof0-40k-l12-hubert_jp_t", {"useIndex": False}),
]
elif mode == "testDDPNONNX":
return [
"https://huggingface.co/wok000/vcclient_model/raw/main/test/test_official_v1_v2.json",
"https://huggingface.co/wok000/vcclient_model/raw/main/test/test_ddpn_v1_v2.json",
], [
("test-ddpn-v1-f0-48k-l9-hubert_o", {"useIndex": False}),
("test-ddpn-v1-nof0-48k-l9-hubert_o", {"useIndex": False}),
("test-ddpn-v2-f0-40k-l12-hubert_o", {"useIndex": False}),
("test-ddpn-v2-nof0-40k-l12-hubert_o", {"useIndex": False}),
("test-ddpn-v2-f0-40k-l12-hubert_jp_o", {"useIndex": False}),
("test-ddpn-v2-nof0-40k-l12-hubert_jp_o", {"useIndex": False}),
]
elif mode == "testONNXFull":
return [
"https://huggingface.co/wok000/vcclient_model/raw/main/test/test_official_v1_v2.json",
"https://huggingface.co/wok000/vcclient_model/raw/main/test/test_ddpn_v1_v2.json",
], [
("test-official-v1-f0-48k-l9-hubert_o_full", {"useIndex": False}),
("test-official-v1-nof0-48k-l9-hubert_o_full", {"useIndex": False}),
("test-official-v2-f0-40k-l12-hubert_o_full", {"useIndex": False}),
("test-official-v2-nof0-40k-l12-hubert_o_full", {"useIndex": False}),
("test-ddpn-v1-f0-48k-l9-hubert_o_full", {"useIndex": False}),
("test-ddpn-v1-nof0-48k-l9-hubert_o_full", {"useIndex": False}),
("test-ddpn-v2-f0-40k-l12-hubert_o_full", {"useIndex": False}),
("test-ddpn-v2-nof0-40k-l12-hubert_o_full", {"useIndex": False}),
("test-ddpn-v2-f0-40k-l12-hubert_jp_o_full", {"useIndex": False}),
("test-ddpn-v2-nof0-40k-l12-hubert_jp_o_full", {"useIndex": False}),
]
else:
return [], []
def getSampleInfos(mode: RVCSampleMode):
sampleJsonUrls, _sampleModels = getSampleJsonAndModelIds(mode)
sampleJsons = _generateSampleJsons(sampleJsonUrls)
samples = _generateSampleList(sampleJsons)
return samples | null |
14,165 | import os
from concurrent.futures import ThreadPoolExecutor
from downloader.Downloader import download
from mods.log_control import VoiceChangaerLogger
from voice_changer.utils.VoiceChangerParams import VoiceChangerParams
from Exceptions import WeightDownladException
logger = VoiceChangaerLogger.get_instance().getLogger()
def download(params):
class VoiceChangerParams:
class WeightDownladException(Exception):
def __str__(self):
def downloadWeight(voiceChangerParams: VoiceChangerParams):
content_vec_500_onnx = voiceChangerParams.content_vec_500_onnx
hubert_base = voiceChangerParams.hubert_base
hubert_base_jp = voiceChangerParams.hubert_base_jp
hubert_soft = voiceChangerParams.hubert_soft
nsf_hifigan = voiceChangerParams.nsf_hifigan
crepe_onnx_full = voiceChangerParams.crepe_onnx_full
crepe_onnx_tiny = voiceChangerParams.crepe_onnx_tiny
rmvpe = voiceChangerParams.rmvpe
rmvpe_onnx = voiceChangerParams.rmvpe_onnx
whisper_tiny = voiceChangerParams.whisper_tiny
weight_files = [
content_vec_500_onnx,
hubert_base,
hubert_base_jp,
hubert_soft,
nsf_hifigan,
crepe_onnx_full,
crepe_onnx_tiny,
rmvpe,
whisper_tiny,
]
# file exists check (currently only for rvc)
downloadParams = []
if os.path.exists(hubert_base) is False:
downloadParams.append(
{
"url": "https://huggingface.co/ddPn08/rvc-webui-models/resolve/main/embeddings/hubert_base.pt",
"saveTo": hubert_base,
"position": 0,
}
)
if os.path.exists(hubert_base_jp) is False:
downloadParams.append(
{
"url": "https://huggingface.co/rinna/japanese-hubert-base/resolve/main/fairseq/model.pt",
"saveTo": hubert_base_jp,
"position": 1,
}
)
if os.path.exists(hubert_soft) is False:
downloadParams.append(
{
"url": "https://huggingface.co/wok000/weights/resolve/main/ddsp-svc30/embedder/hubert-soft-0d54a1f4.pt",
"saveTo": hubert_soft,
"position": 2,
}
)
if os.path.exists(nsf_hifigan) is False:
downloadParams.append(
{
"url": "https://huggingface.co/wok000/weights/resolve/main/ddsp-svc30/nsf_hifigan_20221211/model.bin",
"saveTo": nsf_hifigan,
"position": 3,
}
)
nsf_hifigan_config = os.path.join(os.path.dirname(nsf_hifigan), "config.json")
if os.path.exists(nsf_hifigan_config) is False:
downloadParams.append(
{
"url": "https://huggingface.co/wok000/weights/raw/main/ddsp-svc30/nsf_hifigan_20221211/config.json",
"saveTo": nsf_hifigan_config,
"position": 4,
}
)
nsf_hifigan_onnx = os.path.join(os.path.dirname(nsf_hifigan), "nsf_hifigan.onnx")
if os.path.exists(nsf_hifigan_onnx) is False:
downloadParams.append(
{
"url": "https://huggingface.co/wok000/weights/resolve/main/ddsp-svc30/nsf_hifigan_onnx_20221211/nsf_hifigan.onnx",
"saveTo": nsf_hifigan_onnx,
"position": 4,
}
)
if os.path.exists(crepe_onnx_full) is False:
downloadParams.append(
{
"url": "https://huggingface.co/wok000/weights/resolve/main/crepe/onnx/full.onnx",
"saveTo": crepe_onnx_full,
"position": 5,
}
)
if os.path.exists(crepe_onnx_tiny) is False:
downloadParams.append(
{
"url": "https://huggingface.co/wok000/weights/resolve/main/crepe/onnx/tiny.onnx",
"saveTo": crepe_onnx_tiny,
"position": 6,
}
)
if os.path.exists(content_vec_500_onnx) is False:
downloadParams.append(
{
"url": "https://huggingface.co/wok000/weights_gpl/resolve/main/content-vec/contentvec-f.onnx",
"saveTo": content_vec_500_onnx,
"position": 7,
}
)
if os.path.exists(rmvpe) is False:
downloadParams.append(
{
"url": "https://huggingface.co/wok000/weights/resolve/main/rmvpe/rmvpe_20231006.pt",
"saveTo": rmvpe,
"position": 8,
}
)
if os.path.exists(rmvpe_onnx) is False:
downloadParams.append(
{
"url": "https://huggingface.co/wok000/weights_gpl/resolve/main/rmvpe/rmvpe_20231006.onnx",
"saveTo": rmvpe_onnx,
"position": 9,
}
)
if os.path.exists(whisper_tiny) is False:
downloadParams.append(
{
"url": "https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt",
"saveTo": whisper_tiny,
"position": 10,
}
)
with ThreadPoolExecutor() as pool:
pool.map(download, downloadParams)
if os.path.exists(hubert_base) is False or os.path.exists(hubert_base_jp) is False or os.path.exists(hubert_soft) is False or os.path.exists(nsf_hifigan) is False or os.path.exists(nsf_hifigan_config) is False:
raise WeightDownladException()
# ファイルサイズをログに書き込む。(デバッグ用)
for weight in weight_files:
if os.path.exists(weight):
file_size = os.path.getsize(weight)
logger.debug(f"weight file [{weight}]: {file_size}")
else:
logger.warning(f"weight file is missing. {weight}")
raise WeightDownladException() | null |
14,166 | import os
import shutil
from fastapi import UploadFile
def sanitize_filename(filename: str) -> str:
safe_filename = os.path.basename(filename)
max_length = 255
if len(safe_filename) > max_length:
file_root, file_ext = os.path.splitext(safe_filename)
safe_filename = file_root[: max_length - len(file_ext)] + file_ext
return safe_filename
def upload_file(upload_dirname: str, file: UploadFile, filename: str):
if file and filename:
fileobj = file.file
filename = sanitize_filename(filename)
target_path = os.path.join(upload_dirname, filename)
target_dir = os.path.dirname(target_path)
os.makedirs(target_dir, exist_ok=True)
upload_dir = open(target_path, "wb+")
shutil.copyfileobj(fileobj, upload_dir)
upload_dir.close()
return {"status": "OK", "msg": f"uploaded files {filename} "}
return {"status": "ERROR", "msg": "uploaded file is not found."} | null |
14,167 | import os
import shutil
from fastapi import UploadFile
def sanitize_filename(filename: str) -> str:
safe_filename = os.path.basename(filename)
max_length = 255
if len(safe_filename) > max_length:
file_root, file_ext = os.path.splitext(safe_filename)
safe_filename = file_root[: max_length - len(file_ext)] + file_ext
return safe_filename
def concat_file_chunks(upload_dirname: str, filename: str, chunkNum: int, dest_dirname: str):
filename = sanitize_filename(filename)
target_path = os.path.join(upload_dirname, filename)
target_dir = os.path.dirname(target_path)
os.makedirs(target_dir, exist_ok=True)
if os.path.exists(target_path):
os.remove(target_path)
with open(target_path, "ab") as out:
for i in range(chunkNum):
chunkName = f"{filename}_{i}"
chunk_file_path = os.path.join(upload_dirname, chunkName)
stored_chunk_file = open(chunk_file_path, "rb")
out.write(stored_chunk_file.read())
stored_chunk_file.close()
os.remove(chunk_file_path)
out.close()
return {"status": "OK", "msg": f"concat files {out} "} | null |
14,168 | from enum import Enum
import os
import sys
import tempfile
from typing import Literal, TypeAlias
os.makedirs(TMP_DIR, exist_ok=True)
def getFrontendPath():
frontend_path = os.path.join(sys._MEIPASS, "dist") if hasattr(sys, "_MEIPASS") else "../client/demo/dist"
return frontend_path | null |
14,169 | import argparse
import pyaudio
import wave
import struct
import socketio
import ssl
from datetime import datetime
import time
import urllib3
import signal
import sys
import numpy as np
def setupArgParser():
parser = argparse.ArgumentParser()
parser.add_argument("--url", type=str, default="http://localhost:18888", help="url")
parser.add_argument("--input", type=int, required=True, help="input device index")
parser.add_argument("--output", type=int, default=-1, help="input device index")
parser.add_argument("--to", type=str, default="", help="sid")
return parser | null |
14,170 | import os
import json
import argparse
from alfworld_trial import run_trial
from generate_reflections import update_memory
from typing import Any, List, Dict
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--num_trials", type=int, help="The number of trials to run")
parser.add_argument("--num_envs", type=int, help="The number of environments per trial")
parser.add_argument("--run_name", type=str, help="The name of the run")
parser.add_argument("--use_memory", action='store_true', help="Allow the Agent to use memory")
parser.add_argument("--is_resume", action='store_true', help="To resume run")
parser.add_argument("--resume_dir", type=str, help="If resume, the logging directory", default="")
parser.add_argument("--start_trial_num", type=int, help="If resume, the start trial num", default=0)
parser.add_argument("--model", type=str, help="The model to use. One of `gpt-4`, `gpt-3.5-turbo`, or `text-davinci-003")
args = parser.parse_args()
assert args.num_trials > 0, "Number of trials should be positive"
assert args.num_envs > 0, "Number of environments should be positive"
return args | null |
14,171 | from typing import List, Dict
def _get_base_query(base_query: str, start_info: str, memory: List[str]) -> str:
query = base_query
# add memory if it exists
if len(memory) > 0:
query += '\n\nYour memory for the task below:'
for i, m in enumerate(memory):
query += f'\nTrial {i}:\n{m.strip()}'
query += f"\nHere is the task:\n{start_info}"
return query | null |
14,172 | from utils import get_completion
from typing import List, Dict, Any
with open("./reflexion_few_shot_examples.txt", 'r') as f:
FEW_SHOT_EXAMPLES = f.read()
def _generate_reflection_query(log_str: str, memory: List[str]) -> str:
"""Allows the Agent to reflect upon a past experience."""
scenario: str = _get_scenario(log_str)
query: str = f"""You will be given the history of a past experience in which you were placed in an environment and given a task to complete. You were unsuccessful in completing the task. Do not summarize your environment, but rather think about the strategy and path you took to attempt to complete the task. Devise a concise, new plan of action that accounts for your mistake with reference to specific actions that you should have taken. For example, if you tried A and B but forgot C, then devise a plan to achieve C with environment-specific actions. You will need this later when you are solving the same task. Give your plan after "Plan". Here are two examples:
{FEW_SHOT_EXAMPLES}
{scenario}"""
if len(memory) > 0:
query += '\n\nPlans from past attempts:\n'
for i, m in enumerate(memory):
query += f'Trial #{i}: {m}\n'
query += '\n\nNew plan:'
return query
def get_completion(prompt: str, temperature: float = 0.0, max_tokens: int = 256, stop_strs: Optional[List[str]] = None) -> str:
response = openai.Completion.create(
model='text-davinci-003',
prompt=prompt,
temperature=temperature,
max_tokens=max_tokens,
top_p=1,
frequency_penalty=0.0,
presence_penalty=0.0,
stop=stop_strs,
)
return response.choices[0].text
The provided code snippet includes necessary dependencies for implementing the `update_memory` function. Write a Python function `def update_memory(trial_log_path: str, env_configs: List[Dict[str, Any]]) -> List[Dict[str, Any]]` to solve the following problem:
Updates the given env_config with the appropriate reflections.
Here is the function:
def update_memory(trial_log_path: str, env_configs: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""Updates the given env_config with the appropriate reflections."""
with open(trial_log_path, 'r') as f:
full_log: str = f.read()
env_logs: List[str] = full_log.split('#####\n\n#####')
assert len(env_logs) == len(env_configs), print(f'bad: {len(env_logs)}, {len(env_configs)}')
for i, env in enumerate(env_configs):
# if unsolved, get reflection and update env config
if not env['is_success'] and not env['skip']:
if len(env['memory']) > 3:
memory: List[str] = env['memory'][-3:]
else:
memory: List[str] = env['memory']
reflection_query: str = _generate_reflection_query(env_logs[i], memory)
reflection: str = get_completion(reflection_query) # type: ignore
env_configs[i]['memory'] += [reflection]
return env_configs | Updates the given env_config with the appropriate reflections. |
14,173 | import os
import sys
import json
import yaml
import openai
import importlib
import alfworld
import alfworld.agents.environment
from utils import Model, get_chat, get_completion
from env_history import EnvironmentHistory
from typing import List, Dict, Any, Tuple
with open(os.path.join(FOLDER, PROMPT_FILE), 'r') as f:
d = json.load(f)
def alfworld_run(env, base_prompt, memory: List[str], to_print=True, ob='', model: Model = "text-davinci-003") -> Tuple[EnvironmentHistory, bool]:
PREFIXES = {
'pick_and_place': 'put',
'pick_clean_then_place': 'clean',
'pick_heat_then_place': 'heat',
'pick_cool_then_place': 'cool',
'look_at_obj': 'examine',
'pick_two_obj': 'puttwo'
}
Model = Literal["gpt-4", "gpt-3.5-turbo", "text-davinci-003"]
def run_trial(
trial_log_path: str,
world_log_path: str,
trial_idx: int,
env_configs: List[Dict[str, Any]],
use_memory: bool,
model: Model,
) -> List[Dict[str, Any]]:
importlib.reload(alfworld)
importlib.reload(alfworld.agents.environment)
with open('base_config.yaml') as reader:
config = yaml.safe_load(reader)
split = "eval_out_of_distribution"
env = getattr(alfworld.agents.environment, config["env"]["type"])(config, train_eval=split)
env = env.init_env(batch_size=1)
num_successes: int = 0
num_additional_successes: int = 0
num_envs: int = len(env_configs)
for z, env_config in enumerate(env_configs):
ob, info = env.reset()
ob = '\n'.join(ob[0].split('\n\n')[1:])
name = '/'.join(info['extra.gamefile'][0].split('/')[-3:-1])
print(f"using {name}")
if env_config["is_success"]:
num_successes += 1
# log to world log
with open(world_log_path, 'a') as wf:
wf.write(f'Environment #{z} Trial #{trial_idx}: SUCCESS\n')
with open(trial_log_path, 'a') as wf:
wf.write(f'\n#####\n\nEnvironment #{z}: Success\n\n#####\n')
continue
for i, (k, v) in enumerate(PREFIXES.items()):
if name.startswith(k):
base_prompt = 'Interact with a household to solve a task. Here are two examples.\n' + d[f'react_{v}_1'] + d[f'react_{v}_0']
final_env_history, is_success = alfworld_run(env, base_prompt, env_config["memory"] if use_memory else [], to_print=True, ob=ob, model=model)
# update env config
if is_success:
status_str: str = f'Environment #{z} Trial #{trial_idx}: SUCCESS'
env_configs[z]['is_success'] = True
num_successes += 1
num_additional_successes += 1
else:
status_str: str = f'Environment #{z} Trial #{trial_idx}: FAIL'
# log to world log
with open(world_log_path, 'a') as f:
f.write(status_str + '\n')
# log env results to trial log
with open(trial_log_path, 'a') as wf:
wf.write(f'\n#####\n\nEnvironment #{z}:\n{str(final_env_history)}\n\nSTATUS: {"OK" if is_success else "FAIL"}\n\n#####\n')
# close environment object
env.close()
# log trial results to trial and world logs
log_str: str = f"""
-----
SUCCESS: {num_successes}
ADDITIONAL SUCCESS: {num_additional_successes}
FAIL: {num_envs - num_successes}
TOTAL: {num_envs}
ACCURACY: {round(num_successes / num_envs, 2)}
-----"""
with open(trial_log_path, 'a') as wf:
wf.write(log_str)
with open(world_log_path, 'a') as wf:
wf.write(log_str + '\n')
return env_configs | null |
14,174 | from generators.model import ModelBase, Message
import random
from typing import Union, List, Optional, Callable
def print_messages(system_message_text: str, user_message_text: str) -> None:
print(f"""----------------------- SYSTEM MESSAGE -----------------------)
{system_message_text}
----------------------------------------------
----------------------- USER MESSAGE -----------------------
{user_message_text}
----------------------------------------------
""", flush=True)
def print_generated_func_body(func_body_str: str) -> None:
print(f"""--------------------- GENERATED FUNC BODY ---------------------
{func_body_str}
------------------------------------------""")
class Message():
role: MessageRole
content: str
class ModelBase():
def __init__(self, name: str):
self.name = name
self.is_chat = False
def __repr__(self) -> str:
return f'{self.name}'
def generate_chat(self, messages: List[Message], max_tokens: int = 1024, temperature: float = 0.2, num_comps: int = 1) -> Union[List[str], str]:
raise NotImplementedError
def generate(self, prompt: str, max_tokens: int = 1024, stop_strs: Optional[List[str]] = None, temperature: float = 0.0, num_comps=1) -> Union[List[str], str]:
raise NotImplementedError
def generic_generate_func_impl(
func_sig: str,
model: ModelBase,
strategy: str,
prev_func_impl,
feedback,
self_reflection,
num_comps,
temperature,
reflexion_chat_instruction: str,
reflexion_few_shot: str,
simple_chat_instruction: str,
reflexion_completion_instruction: str,
simple_completion_instruction: str,
code_block_instruction: str,
parse_code_block: Callable[[str], str],
add_code_block: Callable[[str], str]
) -> Union[str, List[str]]:
if strategy != "reflexion" and strategy != "simple":
raise ValueError(
f"Invalid strategy: given `{strategy}` but expected one of `reflexion` or `simple`")
if strategy == "reflexion" and (prev_func_impl is None or feedback is None or self_reflection is None):
raise ValueError(
f"Invalid arguments: given `strategy=reflexion` but `prev_func_impl`, `feedback`, or `self_reflection` is None")
if model.is_chat:
if strategy == "reflexion":
message = f"{reflexion_few_shot}\n[previous impl]:\n{add_code_block(prev_func_impl)}\n\n[unit test results from previous impl]:\n{feedback}\n\n[reflection on previous impl]:\n{self_reflection}\n\n[improved impl]:\n{func_sig}"
prompt = f"{reflexion_chat_instruction}\n{code_block_instruction}"
# func_bodies is a really bad name, as it can also be just 1 string
print_messages(prompt, message)
messages = [
Message(
role="system",
content=prompt,
),
Message(
role="user", # TODO: check this
content=reflexion_few_shot,
),
Message(
role="assistant",
content=add_code_block(prev_func_impl),
),
Message(
role="user",
content=f"[unit test results from previous impl]:\n{feedback}\n\n[reflection on previous impl]:",
),
Message(
role="assistant",
content=self_reflection,
),
Message(
role="user",
content=f"[improved impl]:\n{func_sig}",
),
]
func_bodies = model.generate_chat(messages=messages, num_comps=num_comps, temperature=temperature)
else:
system_prompt = f"{simple_chat_instruction}\n{code_block_instruction}"
print_messages(system_prompt, func_sig)
messages = [
Message(
role="system",
content=f"{simple_chat_instruction}\n{code_block_instruction}",
),
Message(
role="user",
content=func_sig,
),
]
func_bodies = model.generate_chat(messages=messages, num_comps=num_comps, temperature=temperature)
else:
if strategy == "reflexion":
prompt = f"{reflexion_completion_instruction}\n{add_code_block(prev_func_impl)}\n\nunit tests:\n{feedback}\n\nhint:\n{self_reflection}\n\n# improved implementation\n{func_sig}\n{code_block_instruction}"
func_bodies = model.generate(
prompt, num_comps=num_comps, temperature=temperature)
else:
prompt = f"{simple_completion_instruction}\n{func_sig}\n{code_block_instruction}"
func_bodies = model.generate(
prompt, num_comps=num_comps, temperature=temperature)
if num_comps == 1:
assert isinstance(func_bodies, str)
func_body_str = parse_code_block(func_bodies)
print_generated_func_body(func_body_str)
return func_body_str
else:
func_bodies = [parse_code_block(func_body) for func_body in func_bodies]
print_generated_func_body("\n\n".join(func_bodies))
return func_bodies | null |
14,175 | from generators.model import ModelBase, Message
import random
from typing import Union, List, Optional, Callable
def sample_n_random(items: List[str], n: int) -> List[str]:
"""Sample min(n, len(items)) random items from a list"""
assert n >= 0
if n >= len(items):
return items
return random.sample(items, n)
class Message():
role: MessageRole
content: str
class ModelBase():
def __init__(self, name: str):
self.name = name
self.is_chat = False
def __repr__(self) -> str:
return f'{self.name}'
def generate_chat(self, messages: List[Message], max_tokens: int = 1024, temperature: float = 0.2, num_comps: int = 1) -> Union[List[str], str]:
raise NotImplementedError
def generate(self, prompt: str, max_tokens: int = 1024, stop_strs: Optional[List[str]] = None, temperature: float = 0.0, num_comps=1) -> Union[List[str], str]:
raise NotImplementedError
The provided code snippet includes necessary dependencies for implementing the `generic_generate_internal_tests` function. Write a Python function `def generic_generate_internal_tests( func_sig: str, model: ModelBase, max_num_tests: int, test_generation_few_shot: str, test_generation_chat_instruction: str, test_generation_completion_instruction: str, parse_tests: Callable[[str], List[str]], is_syntax_valid: Callable[[str], bool], is_react: bool = False ) -> List[str]` to solve the following problem:
Generates tests for a function.
Here is the function:
def generic_generate_internal_tests(
func_sig: str,
model: ModelBase,
max_num_tests: int,
test_generation_few_shot: str,
test_generation_chat_instruction: str,
test_generation_completion_instruction: str,
parse_tests: Callable[[str], List[str]],
is_syntax_valid: Callable[[str], bool],
is_react: bool = False
) -> List[str]:
"""Generates tests for a function."""
if model.is_chat:
if is_react:
messages = [
Message(
role="system",
content=test_generation_chat_instruction,
),
Message(
role="user",
content=f"{test_generation_few_shot}\n\n[func signature]:\n{func_sig}\n\n[think]:"
)
]
output = model.generate_chat(messages=messages, max_tokens=1024)
print(f'React test generation output: {output}')
else:
messages = [
Message(
role="system",
content=test_generation_chat_instruction,
),
Message(
role="user",
content=f"{test_generation_few_shot}\n\n[func signature]:\n{func_sig}\n\n[unit tests]:",
)
]
output = model.generate_chat(messages=messages, max_tokens=1024)
else:
prompt = f'{test_generation_completion_instruction}\n\nfunc signature:\n{func_sig}\nunit tests:'
output = model.generate(prompt, max_tokens=1024)
all_tests = parse_tests(output) # type: ignore
valid_tests = [test for test in all_tests if is_syntax_valid(test)]
return sample_n_random(valid_tests, max_num_tests) | Generates tests for a function. |
14,176 | from generators.model import ModelBase, Message
import random
from typing import Union, List, Optional, Callable
class Message():
class ModelBase():
def __init__(self, name: str):
def __repr__(self) -> str:
def generate_chat(self, messages: List[Message], max_tokens: int = 1024, temperature: float = 0.2, num_comps: int = 1) -> Union[List[str], str]:
def generate(self, prompt: str, max_tokens: int = 1024, stop_strs: Optional[List[str]] = None, temperature: float = 0.0, num_comps=1) -> Union[List[str], str]:
def generic_generate_self_reflection(
func: str,
feedback: str,
model: ModelBase,
self_reflection_chat_instruction: str,
self_reflection_completion_instruction: str,
add_code_block: Callable[[str], str],
self_reflection_few_shot: Optional[str] = None,
) -> str:
if model.is_chat:
if self_reflection_few_shot is not None:
messages = [
Message(
role="system",
content=self_reflection_chat_instruction,
),
Message(
role="user",
content=f'{self_reflection_few_shot}\n\n[function impl]:\n{add_code_block(func)}\n\n[unit test results]:\n{feedback}\n\n[self-reflection]:',
)
]
reflection = model.generate_chat(messages=messages)
print(f'Self reflection output: {reflection}')
else:
messages = [
Message(
role="system",
content=self_reflection_chat_instruction,
),
Message(
role="user",
content=f'[function impl]:\n{add_code_block(func)}\n\n[unit test results]:\n{feedback}\n\n[self-reflection]:',
)
]
reflection = model.generate_chat(messages=messages)
else:
reflection = model.generate(
f'{self_reflection_completion_instruction}\n{add_code_block(func)}\n\n{feedback}\n\nExplanation:')
return reflection # type: ignore | null |
14,177 | from generators.model import ModelBase, message_to_str
from .generator_types import Generator
from .generator_utils import generic_generate_func_impl, generic_generate_internal_tests, generic_generate_self_reflection
from typing import Optional, List, Union
import ast
import re
from .parse import parse_code_block, add_code_block
DUMMY_FUNC_SIG = "def func():"
DUMMY_FUNC_CALL = "func()"
def handle_first_line_indent(func_body: str) -> str:
if func_body.startswith(" "):
return func_body
split = func_body.splitlines()
return f" {split[0]}\n" + "\n".join(split[1:])
def handle_entire_body_indent(func_body: str) -> str:
split = func_body.splitlines()
res = "\n".join([" " + line for line in split])
return res
def fix_turbo_response(func_body: str) -> str:
return fix_markdown(remove_unindented_signatures(func_body))
def fix_markdown(func_body: str) -> str:
return re.sub("`{3}", "", func_body)
The provided code snippet includes necessary dependencies for implementing the `py_fix_indentation` function. Write a Python function `def py_fix_indentation(func_body: str) -> str` to solve the following problem:
3 cases: 1. good syntax 2. first line not good 3. entire body not good
Here is the function:
def py_fix_indentation(func_body: str) -> str:
func_body = fix_turbo_response(func_body)
"""
3 cases:
1. good syntax
2. first line not good
3. entire body not good
"""
def parse_indent_rec(f_body: str, cur_state: int) -> str:
f_body = fix_markdown(f_body)
if cur_state > 1:
return f_body
code = f'{DUMMY_FUNC_SIG}\n{f_body}\n{DUMMY_FUNC_CALL}'
try:
exec(code)
return f_body
except (IndentationError, SyntaxError):
p_func = handle_first_line_indent if cur_state == 0 else handle_entire_body_indent
return parse_indent_rec(p_func(func_body), cur_state + 1)
except Exception:
return f_body
return parse_indent_rec(func_body, 0) | 3 cases: 1. good syntax 2. first line not good 3. entire body not good |
14,178 | from generators.model import ModelBase, message_to_str
from .generator_types import Generator
from .generator_utils import generic_generate_func_impl, generic_generate_internal_tests, generic_generate_self_reflection
from typing import Optional, List, Union
import ast
import re
from .parse import parse_code_block, add_code_block
def py_is_syntax_valid(code: str) -> bool:
try:
ast.parse(code)
return True
except Exception:
return False | null |
14,179 | from typing import List, Union, Optional, Literal
import dataclasses
from tenacity import (
retry,
stop_after_attempt, # type: ignore
wait_random_exponential, # type: ignore
)
import openai
class Message():
role: MessageRole
content: str
def message_to_str(message: Message) -> str:
return f"{message.role}: {message.content}"
def messages_to_str(messages: List[Message]) -> str:
return "\n".join([message_to_str(message) for message in messages]) | null |
14,180 | from typing import List, Union, Optional, Literal
import dataclasses
from tenacity import (
retry,
stop_after_attempt, # type: ignore
wait_random_exponential, # type: ignore
)
import openai
def gpt_completion(
model: str,
prompt: str,
max_tokens: int = 1024,
stop_strs: Optional[List[str]] = None,
temperature: float = 0.0,
num_comps=1,
) -> Union[List[str], str]:
response = openai.Completion.create(
model=model,
prompt=prompt,
temperature=temperature,
max_tokens=max_tokens,
top_p=1,
frequency_penalty=0.0,
presence_penalty=0.0,
stop=stop_strs,
n=num_comps,
)
if num_comps == 1:
return response.choices[0].text # type: ignore
return [choice.text for choice in response.choices] # type: ignore | null |
14,181 | from typing import List, Union, Optional, Literal
import dataclasses
from tenacity import (
retry,
stop_after_attempt, # type: ignore
wait_random_exponential, # type: ignore
)
import openai
class Message():
def gpt_chat(
model: str,
messages: List[Message],
max_tokens: int = 1024,
temperature: float = 0.0,
num_comps=1,
) -> Union[List[str], str]:
response = openai.ChatCompletion.create(
model=model,
messages=[dataclasses.asdict(message) for message in messages],
max_tokens=max_tokens,
temperature=temperature,
top_p=1,
frequency_penalty=0.0,
presence_penalty=0.0,
n=num_comps,
)
if num_comps == 1:
return response.choices[0].message.content # type: ignore
return [choice.message.content for choice in response.choices] # type: ignore | null |
14,182 | from generators.model import ModelBase
from .generator_types import Generator
from .generator_utils import generic_generate_func_impl, generic_generate_internal_tests, generic_generate_self_reflection
from .parse import parse_code_block, add_code_block
from typing import List, Optional, Union
The provided code snippet includes necessary dependencies for implementing the `dump_tests` function. Write a Python function `def dump_tests(tests: List[str]) -> str` to solve the following problem:
Dumps the tests to a string.
Here is the function:
def dump_tests(tests: List[str]) -> str:
"""
Dumps the tests to a string.
"""
return "\n".join(tests) | Dumps the tests to a string. |
14,183 | from generators.model import ModelBase
from .generator_types import Generator
from .generator_utils import generic_generate_func_impl, generic_generate_internal_tests, generic_generate_self_reflection
from .parse import parse_code_block, add_code_block
from typing import List, Optional, Union
The provided code snippet includes necessary dependencies for implementing the `parse_tests` function. Write a Python function `def parse_tests(tests: str) -> List[str]` to solve the following problem:
Parses the tests from a string.
Here is the function:
def parse_tests(tests: str) -> List[str]:
"""
Parses the tests from a string.
"""
return [test.strip() for test in tests.splitlines() if "assert" in test] | Parses the tests from a string. |
14,184 | import re
from typing import Optional
def parse_first_func(code: str, lang: str) -> Optional[str]:
assert lang == "python", "Only python is supported for now. TODO: Rust"
code_lines = code.split("\n")
def_i = -1
last_i = 0
got_return = False
for i, line in enumerate(code_lines):
if line.startswith("def "):
if def_i == -1:
def_i = i
else:
break
elif "return" in line and def_i != -1:
got_return = True
if line == "" and def_i != -1 and got_return:
last_i = i
break
if last_i == 0:
last_i = len(code_lines) - 1
if def_i == -1:
return None
return "\n".join(code_lines[def_i:last_i+1]).rstrip("[/PYTHON]")
def parse_code_block(string: str, lang: str) -> Optional[str]:
code_pattern = fr"```{lang}\n(.*?)\n```"
match = re.search(code_pattern, string, re.DOTALL)
if match:
return match.group(1)
generic_code_pattern = r"```\n(.*?)\n```"
match = re.search(generic_code_pattern, string, re.DOTALL)
if match:
return match.group(1)
return parse_first_func(string, lang) | null |
14,185 | import re
from typing import Optional
def add_code_block(string: str, lang: str) -> str:
return f"```{lang}\n{string}\n```" | null |
14,186 | from .py_generate import PyGenerator
from .rs_generate import RsGenerator
from .generator_types import Generator
from .model import CodeLlama, ModelBase, GPT4, GPT35, StarChat, GPTDavinci
class PyGenerator(Generator):
def self_reflection(self, func: str, feedback: str, model: ModelBase) -> str:
return generic_generate_self_reflection(
func=func,
feedback=feedback,
model=model,
self_reflection_chat_instruction=PY_SELF_REFLECTION_CHAT_INSTRUCTION,
self_reflection_completion_instruction=PY_SELF_REFLECTION_COMPLETION_INSTRUCTION,
add_code_block=lambda x: add_code_block(x, "python"),
self_reflection_few_shot=PY_SELF_REFLECTION_FEW_SHOT
)
def func_impl(
self,
func_sig: str,
model: ModelBase,
strategy: str,
prev_func_impl: Optional[str] = None,
feedback: Optional[str] = None,
self_reflection: Optional[str] = None,
num_comps: int = 1,
temperature: float = 0.0,
) -> Union[str, List[str]]:
return generic_generate_func_impl(
func_sig=func_sig,
model=model,
strategy=strategy,
prev_func_impl=prev_func_impl,
feedback=feedback,
self_reflection=self_reflection,
num_comps=num_comps,
temperature=temperature,
reflexion_chat_instruction=PY_REFLEXION_CHAT_INSTRUCTION,
reflexion_few_shot=PY_REFLEXION_FEW_SHOT_ADD,
simple_chat_instruction=PY_SIMPLE_CHAT_INSTRUCTION,
reflexion_completion_instruction=PY_REFLEXION_COMPLETION_INSTRUCTION,
simple_completion_instruction=PY_SIMPLE_COMPLETION_INSTRUCTION,
code_block_instruction=USE_PYTHON_CODEBLOCK_INSTRUCTION,
parse_code_block=lambda x: parse_code_block(x, "python"),
add_code_block=lambda x: add_code_block(x, "python"),
)
def internal_tests(self, func_sig: str, model: ModelBase, max_num_tests: int = 5) -> List[str]:
def parse_tests(tests: str) -> List[str]:
return [test.strip() for test in tests.splitlines() if "assert" in test]
"""
Generates tests for a function.
"""
return generic_generate_internal_tests(
func_sig=func_sig,
model=model,
max_num_tests=max_num_tests,
test_generation_few_shot=PY_TEST_GENERATION_FEW_SHOT,
test_generation_chat_instruction=PY_TEST_GENERATION_CHAT_INSTRUCTION,
test_generation_completion_instruction=PY_TEST_GENERATION_COMPLETION_INSTRUCTION,
parse_tests=parse_tests,
is_syntax_valid=py_is_syntax_valid,
)
class RsGenerator(Generator):
def self_reflection(self, func: str, feedback: str, model: ModelBase) -> str:
return generic_generate_self_reflection(
func=func,
feedback=feedback,
model=model,
self_reflection_chat_instruction=RS_SELF_REFLECTION_CHAT_INSTRUCTION,
self_reflection_completion_instruction=RS_SELF_REFLECTION_COMPLETION_INSTRUCTION,
add_code_block=lambda x: add_code_block(x, "rust"),
self_reflection_few_shot=RS_SELF_REFLECTION_FEW_SHOT,
)
def func_impl(
self,
func_sig: str,
model: ModelBase,
strategy: str,
prev_func_impl: Optional[str] = None,
feedback: Optional[str] = None,
self_reflection: Optional[str] = None,
num_comps: int = 1,
temperature: float = 0.0,
) -> Union[str, List[str]]:
return generic_generate_func_impl(
func_sig=func_sig,
model=model,
strategy=strategy,
prev_func_impl=prev_func_impl,
feedback=feedback,
self_reflection=self_reflection,
num_comps=num_comps,
temperature=temperature,
reflexion_chat_instruction=RS_REFLEXION_CHAT_INSTRUCTION,
simple_chat_instruction=RS_SIMPLE_CHAT_INSTRUCTION,
reflexion_completion_instruction=RS_REFLEXION_COMPLETION_INSTRUCTION,
simple_completion_instruction=RS_SIMPLE_COMPLETION_INSTRUCTION,
reflexion_few_shot=RS_REFLEXION_FEW_SHOT_ADD,
parse_code_block=lambda x: parse_code_block(x, "rust"),
add_code_block=lambda x: add_code_block(x, "rust"),
)
def internal_tests(
self,
func_sig: str,
model: ModelBase,
max_num_tests: int = 5
) -> List[str]:
def parse_tests(tests: str) -> List[str]:
return [test + ";" for test in tests.split(";")]
"""
Generates tests for a function.
"""
return generic_generate_internal_tests(
func_sig=func_sig,
model=model,
max_num_tests=max_num_tests,
test_generation_few_shot=RS_TEST_GENERATION_FEW_SHOT,
test_generation_chat_instruction=RS_TEST_GENERATION_CHAT_INSTRUCTION,
test_generation_completion_instruction=RS_TEST_GENERATION_COMPLETION_INSTRUCTION,
parse_tests=parse_tests,
is_syntax_valid=(lambda x: True) # TODO: for now. typecheck maybe?
)
class Generator:
def self_reflection(self, func: str, feedback: str, model: ModelBase) -> str:
...
def func_impl(
self,
func_sig: str,
model: ModelBase,
strategy: str,
prev_func_impl: Optional[str] = None,
feedback: Optional[str] = None,
self_reflection: Optional[str] = None,
num_comps: int = 1,
temperature: float = 0.0,
) -> Union[str, List[str]]:
...
def internal_tests(
self,
func_sig: str,
model: ModelBase,
max_num_tests: int = 5
) -> List[str]:
...
def generator_factory(lang: str) -> Generator:
if lang == "py" or lang == "python":
return PyGenerator()
elif lang == "rs" or lang == "rust":
return RsGenerator()
else:
raise ValueError(f"Invalid language for generator: {lang}") | null |
14,187 | from .py_generate import PyGenerator
from .rs_generate import RsGenerator
from .generator_types import Generator
from .model import CodeLlama, ModelBase, GPT4, GPT35, StarChat, GPTDavinci
class ModelBase():
def __init__(self, name: str):
self.name = name
self.is_chat = False
def __repr__(self) -> str:
return f'{self.name}'
def generate_chat(self, messages: List[Message], max_tokens: int = 1024, temperature: float = 0.2, num_comps: int = 1) -> Union[List[str], str]:
raise NotImplementedError
def generate(self, prompt: str, max_tokens: int = 1024, stop_strs: Optional[List[str]] = None, temperature: float = 0.0, num_comps=1) -> Union[List[str], str]:
raise NotImplementedError
class GPT4(GPTChat):
def __init__(self):
super().__init__("gpt-4")
class GPT35(GPTChat):
def __init__(self):
super().__init__("gpt-3.5-turbo")
class GPTDavinci(ModelBase):
def __init__(self, model_name: str):
self.name = model_name
def generate(self, prompt: str, max_tokens: int = 1024, stop_strs: Optional[List[str]] = None, temperature: float = 0, num_comps=1) -> Union[List[str], str]:
return gpt_completion(self.name, prompt, max_tokens, stop_strs, temperature, num_comps)
class StarChat(HFModelBase):
def __init__(self):
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model = AutoModelForCausalLM.from_pretrained(
"HuggingFaceH4/starchat-beta",
torch_dtype=torch.bfloat16,
device_map="auto",
)
tokenizer = AutoTokenizer.from_pretrained(
"HuggingFaceH4/starchat-beta",
)
super().__init__("starchat", model, tokenizer, eos_token_id=49155)
def prepare_prompt(self, messages: List[Message]):
prompt = ""
for i, message in enumerate(messages):
prompt += f"<|{message.role}|>\n{message.content}\n<|end|>\n"
if i == len(messages) - 1:
prompt += "<|assistant|>\n"
return self.tokenizer.encode(prompt, return_tensors="pt").to(self.model.device)
def extract_output(self, output: str) -> str:
out = output.split("<|assistant|>")[1]
if out.endswith("<|end|>"):
out = out[:-len("<|end|>")]
return out
class CodeLlama(HFModelBase):
B_INST, E_INST = "[INST]", "[/INST]"
B_SYS, E_SYS = "<<SYS>>\n", "\n<</SYS>>\n\n"
DEFAULT_SYSTEM_PROMPT = """\
You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.
If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information."""
def __init__(self, version: Literal["34b", "13b", "7b"] = "34b"):
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
f"codellama/CodeLlama-{version}-Instruct-hf",
add_eos_token=True,
add_bos_token=True,
padding_side='left'
)
model = AutoModelForCausalLM.from_pretrained(
f"codellama/CodeLlama-{version}-Instruct-hf",
torch_dtype=torch.bfloat16,
device_map="auto",
)
super().__init__("codellama", model, tokenizer)
def prepare_prompt(self, messages: List[Message]):
if messages[0].role != "system":
messages = [
Message(role="system", content=self.DEFAULT_SYSTEM_PROMPT)
] + messages
messages = [
Message(role=messages[1].role, content=self.B_SYS +
messages[0].content + self.E_SYS + messages[1].content)
] + messages[2:]
assert all([msg.role == "user" for msg in messages[::2]]) and all(
[msg.role == "assistant" for msg in messages[1::2]]
), (
"model only supports 'system', 'user' and 'assistant' roles, "
"starting with 'system', then 'user' and alternating (u/a/u/a/u...)"
)
messages_tokens: List[int] = sum(
[
self.tokenizer.encode(
f"{self.B_INST} {(prompt.content).strip()} {self.E_INST} {(answer.content).strip()} ",
)
for prompt, answer in zip(
messages[::2],
messages[1::2],
)
],
[],
)
assert messages[-1].role == "user", f"Last message must be from user, got {messages[-1].role}"
messages_tokens += self.tokenizer.encode(
f"{self.B_INST} {(messages[-1].content).strip()} {self.E_INST}",
)
# remove eos token from last message
messages_tokens = messages_tokens[:-1]
import torch
return torch.tensor([messages_tokens]).to(self.model.device)
def extract_output(self, output: str) -> str:
out = output.split("[/INST]")[-1].split("</s>")[0].strip()
return out
def model_factory(model_name: str) -> ModelBase:
if model_name == "gpt-4":
return GPT4()
elif model_name == "gpt-3.5-turbo":
return GPT35()
elif model_name == "starchat":
return StarChat()
elif model_name.startswith("codellama"):
# if it has `-` in the name, version was specified
kwargs = {}
if "-" in model_name:
kwargs["version"] = model_name.split("-")[1]
return CodeLlama(**kwargs)
elif model_name.startswith("text-davinci"):
return GPTDavinci(model_name)
else:
raise ValueError(f"Invalid model name: {model_name}") | null |
14,189 | import os
import gzip
import json
import openai
import jsonlines
from typing import List
def read_jsonl_gz(path: str) -> List[dict]:
if not path.endswith(".jsonl.gz"):
raise ValueError(f"File `{path}` is not a jsonl.gz file.")
with gzip.open(path, "rt") as f:
data = [json.loads(line) for line in f]
return data | null |
14,190 | import os
import argparse
from immediate_refinement import run_immediate_refinement
from immediate_reflexion import run_immediate_reflexion
from simple import run_simple
from reflexion import run_reflexion
from reflexion_ucs import run_reflexion_ucs
from test_acc import run_test_acc
from utils import read_jsonl, read_jsonl_gz
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--run_name", type=str, help="The name of the run")
parser.add_argument("--root_dir", type=str,
help="The root logging directory", default="root")
parser.add_argument("--dataset_path", type=str,
help="The path to the benchmark dataset", default="root")
parser.add_argument("--strategy", type=str,
help="Strategy: `simple`, `reflexion`")
parser.add_argument("--language", type=str, help="Strategy: `py` or `rs`")
parser.add_argument(
"--model", type=str, help="OpenAI models only for now. For best results, use GPT-4")
parser.add_argument("--pass_at_k", type=int,
help="Pass@k metric", default=1)
parser.add_argument("--max_iters", type=int,
help="The maximum number of self-improvement iterations", default=10)
parser.add_argument("--expansion_factor", type=int,
help="The expansion factor for the reflexion UCS and A* strategy", default=3)
parser.add_argument("--is_leetcode", action='store_true',
help="To run the leetcode benchmark") # Temporary
parser.add_argument("--verbose", action='store_true',
help="To print live logs")
# TODO: implement this
# parser.add_argument("--is_resume", action='store_true', help="To resume run")
# parser.add_argument("--resume_dir", type=str, help="If resume, the logging directory", default="")
args = parser.parse_args()
return args | null |
14,191 | import os
import argparse
from immediate_refinement import run_immediate_refinement
from immediate_reflexion import run_immediate_reflexion
from simple import run_simple
from reflexion import run_reflexion
from reflexion_ucs import run_reflexion_ucs
from test_acc import run_test_acc
from utils import read_jsonl, read_jsonl_gz
def run_immediate_refinement(
dataset: List[dict],
model_name: str,
language: str,
max_iters: int,
pass_at_k: int,
log_path: str,
verbose: bool,
is_leetcode: bool,
) -> None:
exe = executor_factory(language)
gen = generator_factory(language)
model = model_factory(model_name)
print_v = make_printv(verbose)
num_items = len(dataset)
num_success = 0
for i, item in enumerate_resume(dataset, log_path):
cur_pass = 0
is_solved = False
reflections = []
cur_func_impl = ""
while cur_pass < pass_at_k and not is_solved:
tests_i = gen.internal_tests(item["prompt"], model, 1)
# first attempt
cur_func_impl = gen.func_impl(item["prompt"], model, "simple")
assert isinstance(cur_func_impl, str)
is_passing, feedback, _ = exe.execute(cur_func_impl, tests_i)
# if solved, exit early
if is_passing:
is_passing = exe.evaluate(
item["entry_point"], cur_func_impl, item["test"], timeout=10)
is_solved = is_passing
num_success += int(is_passing)
break
# use self-reflection to iteratively improve
cur_iter = 1
cur_feedback = feedback
while cur_iter < max_iters:
# apply self-reflection in the next attempt
cur_func_impl = gen.func_impl(
func_sig=item["prompt"],
model=model,
strategy="reflexion",
prev_func_impl=cur_func_impl,
feedback=cur_feedback,
self_reflection="No self-reflection"
)
assert isinstance(cur_func_impl, str)
# check if all internal unit tests pass
is_passing, cur_feedback, _ = exe.execute(
cur_func_impl, tests_i)
# if solved, check if it passes the real tests, exit early
if is_passing or cur_iter == max_iters - 1:
is_passing = exe.evaluate(
item["entry_point"], cur_func_impl, item["test"], timeout=10)
if is_passing:
item["solution"] = cur_func_impl
is_solved = True
num_success += 1
break
cur_iter += 1
cur_pass += 1
is_solved = exe.evaluate(
item["entry_point"], cur_func_impl, item["test"], timeout=10)
item["is_solved"] = is_solved
item["reflections"] = reflections
item["solution"] = cur_func_impl
write_jsonl(log_path, [item], append=True)
print_v(
f'completed {i+1}/{num_items}: acc = {round(num_success/(i+1), 2)}')
def run_immediate_reflexion(
dataset: List[dict],
model_name: str,
language: str,
max_iters: int,
pass_at_k: int,
log_path: str,
verbose: bool,
is_leetcode: bool
) -> None:
exe = executor_factory(language)
gen = generator_factory(language)
model = model_factory(model_name)
print_v = make_printv(verbose)
num_items = len(dataset)
num_success = 0
for i, item in enumerate_resume(dataset, log_path):
cur_pass = 0
is_solved = False
reflections = []
cur_func_impl = ""
while cur_pass < pass_at_k and not is_solved:
# first attempt
cur_func_impl = gen.func_impl(item["prompt"], model, "simple")
assert isinstance(cur_func_impl, str)
# use self-reflection to iteratively improve
cur_iter = 1
feedback = "Test cases omitted"
while cur_iter < max_iters:
# get self-reflection
reflection = gen.self_reflection(
cur_func_impl, feedback, model)
reflections += [reflection]
# apply self-reflection in the next attempt
cur_func_impl = gen.func_impl(
func_sig=item["prompt"],
model=model,
strategy="reflexion",
prev_func_impl=cur_func_impl,
feedback=feedback,
self_reflection=reflection
)
assert isinstance(cur_func_impl, str)
cur_iter += 1
cur_pass += 1
is_solved = exe.evaluate(
item["entry_point"], cur_func_impl, item["test"], timeout=10)
if is_solved:
num_success += 1
item["is_solved"] = is_solved
item["reflections"] = reflections
item["solution"] = cur_func_impl
write_jsonl(log_path, [item], append=True)
print_v(
f'completed {i+1}/{num_items}: acc = {round(num_success/(i+1), 2)}')
def run_simple(
dataset: List[dict],
model_name: str,
language: str,
pass_at_k: int,
log_path: str,
verbose: bool,
is_leetcode: bool = False
) -> None:
exe = executor_factory(language, is_leet=is_leetcode)
gen = generator_factory(language)
model = model_factory(model_name)
print_v = make_printv(verbose)
num_items = len(dataset)
num_success = 0
for i, item in enumerate_resume(dataset, log_path):
cur_pass = 0
is_solved = False
cur_func_impl = ""
while cur_pass < pass_at_k:
cur_func_impl = gen.func_impl(item["prompt"], model, "simple")
assert isinstance(cur_func_impl, str)
is_passing = exe.evaluate(item["entry_point"], cur_func_impl, item["test"], timeout = 20 if is_leetcode else 10)
if is_passing:
is_solved = True
num_success += 1
break
cur_pass += 1
item["solution"] = cur_func_impl
item["is_solved"] = is_solved
write_jsonl(log_path, [item], append=True)
print_v(f'completed {i+1}/{num_items}: acc = {round(num_success/(i+1), 2)}')
def run_reflexion(
dataset: List[dict],
model_name: str,
language: str,
max_iters: int,
pass_at_k: int,
log_path: str,
verbose: bool,
is_leetcode: bool = False
) -> None:
exe = executor_factory(language, is_leet=is_leetcode)
gen = generator_factory(language)
model = model_factory(model_name)
print_v = make_printv(verbose)
num_items = len(dataset)
num_success = resume_success_count(dataset)
for i, item in enumerate_resume(dataset, log_path):
cur_pass = 0
is_solved = False
reflections = []
implementations = []
test_feedback = []
cur_func_impl = ""
while cur_pass < pass_at_k and not is_solved:
if is_leetcode:
tests_i = item['visible_tests']
else:
tests_i = gen.internal_tests(item["prompt"], model, 1)
# first attempt
cur_func_impl = gen.func_impl(item["prompt"], model, "simple")
implementations.append(cur_func_impl)
assert isinstance(cur_func_impl, str)
is_passing, feedback, _ = exe.execute(cur_func_impl, tests_i)
test_feedback.append(feedback)
# if solved, exit early
if is_passing:
is_passing = exe.evaluate(
item["entry_point"], cur_func_impl, item["test"], timeout=10)
is_solved = is_passing
num_success += int(is_passing)
break
# use self-reflection to iteratively improve
cur_iter = 1
cur_feedback = feedback
while cur_iter < max_iters:
# get self-reflection
reflection = gen.self_reflection(
cur_func_impl, cur_feedback, model)
reflections += [reflection]
# apply self-reflection in the next attempt
cur_func_impl = gen.func_impl(
func_sig=item["prompt"],
model=model,
strategy="reflexion",
prev_func_impl=cur_func_impl,
feedback=cur_feedback,
self_reflection=reflection,
)
implementations.append(cur_func_impl)
assert isinstance(cur_func_impl, str)
# check if all internal unit tests pass
is_passing, cur_feedback, _ = exe.execute(
cur_func_impl, tests_i)
test_feedback.append(cur_feedback)
# if solved, check if it passes the real tests, exit early
if is_passing or cur_iter == max_iters - 1:
is_passing = exe.evaluate(
item["entry_point"], cur_func_impl, item["test"], timeout=10)
if is_passing:
item["solution"] = cur_func_impl
is_solved = True
num_success += 1
break
cur_iter += 1
cur_pass += 1
item["is_solved"] = is_solved
item["reflections"] = reflections
item["implementations"] = implementations
item["test_feedback"] = test_feedback
item["solution"] = cur_func_impl
write_jsonl(log_path, [item], append=True)
print_v(
f'completed {i+1}/{num_items}: acc = {round(num_success/(i+1), 2)}')
def run_reflexion_ucs(
dataset: List[dict],
model_name: str,
language: str,
max_iters: int,
pass_at_k: int,
log_path: str,
verbose: bool,
expansion_factor: int,
is_leetcode: bool = False
) -> None:
exe = executor_factory(language, is_leet=is_leetcode)
gen = generator_factory(language)
model = model_factory(model_name)
num_items = len(dataset)
num_success = 0
for i, item in enumerate_resume(dataset, log_path):
cur_pass = 0
is_solved = False
reflections = []
cur_func_impl = ""
while cur_pass < pass_at_k and not is_solved:
debug_print(f"item {i} pass {cur_pass}")
tests_i = gen.internal_tests(item["prompt"], model, 1)
if len(tests_i) == 0:
warnings.warn(f"no internal tests generated for item {i}")
# first attempt
debug_print("first attempt")
cur_func_impl = gen.func_impl(item["prompt"], model, "simple")
assert isinstance(cur_func_impl, str) # num_comps of 1
is_passing, feedback, state = exe.execute(cur_func_impl, tests_i)
debug_print(
f"first attempt: \n{cur_func_impl}\n{feedback}\n{state}")
# if solved, exit--pass_at_k 1 early
if is_passing:
debug_print("solved at first attempt")
is_solved = exe.evaluate(
item["entry_point"], cur_func_impl, item["test"])
num_success += 1 if is_solved else 0
break
reflection = gen.self_reflection(
cur_func_impl, feedback, model)
reflections.append(reflection)
start = State(cur_func_impl, feedback, reflection, state)
def expand(state: State) -> Set[Tuple[State, float]]:
nonlocal max_iters
nonlocal expansion_factor
nonlocal item
nonlocal model
nonlocal tests_i
nonlocal reflections
new_states: Set[Tuple[State, float]] = set()
debug_print(f"start expansion of: {state.state}")
new_funcs = gen.func_impl(
func_sig=item["prompt"],
model=model,
strategy="reflexion",
prev_func_impl=state.code,
feedback=state.feedback,
self_reflection=state.reflection,
num_comps=expansion_factor,
temperature=0.75
)
assert isinstance(new_funcs, list)
debug_print(f"generated num of funcs: {len(new_funcs)}")
already_seen = set()
for new_func in new_funcs:
if new_func in already_seen:
debug_print(f"skipping a func because already seen.")
continue
already_seen.add(new_func)
is_passing, feedback, new_state = exe.execute(
new_func, tests_i)
debug_print(
f"expanding: \n{new_func}\n{feedback}\n{new_state}")
if is_passing:
# return immediately if solved
return set([(State(new_func, feedback, "", new_state), 0)])
new_reflection = gen.self_reflection(
new_func, feedback, model)
reflections.append(new_reflection)
num_failing = len([x for x in new_state if not x])
new_states.add(
(State(new_func, feedback, new_reflection, new_state), num_failing))
debug_print(f"returning new states: {new_states}")
return new_states
def when_none(l: List[State]) -> State:
debug_print(f"when_none called on: {l}")
return min(l, key=lambda x: len([y for y in x.state if not y]))
# this is either the goal state, or if not found, the current best state (lowest failed tests)
best = ucs(
start=start,
expand=expand,
is_goal=lambda x: x.is_goal(),
# NOTE: this way we reduce our search space significantly
# the maximum number of nodes is 2^num_tests,
# which is 2^5 = 32
get_unique_id=lambda x: x.get_unique_id(),
when_none=when_none
)
assert best is not None # impossible due to our when_none
debug_print("BEST CODE:\n\n\n")
debug_print(best.code)
is_passing = exe.evaluate(
item["entry_point"], best.code, item["test"], timeout=5)
if is_passing:
item["solution"] = best.code
is_solved = True
num_success += 1
break # breaking pass@k loop
cur_pass += 1
item["is_solved"] = is_solved
item["reflections"] = reflections
item["solution"] = cur_func_impl
write_jsonl(log_path, [item], append=True)
if verbose:
print(
f'completed {i+1}/{num_items}: acc = {round(num_success/(i+1), 2)}')
def run_test_acc(
dataset: List[dict],
model: str,
language: str,
pass_at_k: int,
log_path: str,
verbose: bool,
is_leetcode: bool = False
) -> None:
exe = executor_factory(language, is_leet=is_leetcode)
gen = generator_factory(language)
print_v = make_printv(verbose)
num_items = len(dataset)
num_success = 0
for i, item in enumerate_resume(dataset, log_path):
cur_pass = 0
is_solved = False
tests_i = []
while cur_pass < pass_at_k:
tests_i = gen.internal_tests(item["prompt"], model, 1)
print_v(tests_i)
cur_func_impl = item["prompt"] + item["canonical_solution"]
print_v(cur_func_impl, flush=True)
is_passing, _, _ = exe.execute(cur_func_impl, tests_i)
if is_passing:
is_solved = True
num_success += 1
break
cur_pass += 1
item["solution"] = tests_i
item["is_solved"] = is_solved
write_jsonl(log_path, [item], append=True)
print_v(
f'completed {i+1}/{num_items}: acc = {round(num_success/(i+1), 2)}')
def strategy_factory(strategy: str):
def kwargs_wrapper_gen(func, delete_keys=[]):
def kwargs_wrapper(**kwargs):
for key in delete_keys:
del kwargs[key]
return func(**kwargs)
return kwargs_wrapper
if strategy == "simple":
return kwargs_wrapper_gen(run_simple, delete_keys=["expansion_factor", "max_iters"])
elif strategy == "reflexion":
return kwargs_wrapper_gen(run_reflexion, delete_keys=["expansion_factor"])
elif strategy == "immediate-reflexion":
return kwargs_wrapper_gen(run_immediate_reflexion, delete_keys=["expansion_factor"])
elif strategy == "immediate-refinement":
return kwargs_wrapper_gen(run_immediate_refinement, delete_keys=["expansion_factor"])
elif strategy == "reflexion-ucs":
return kwargs_wrapper_gen(run_reflexion_ucs)
elif strategy == "test-acc":
return kwargs_wrapper_gen(run_test_acc, delete_keys=["expansion_factor", "max_iters"])
else:
raise ValueError(f"Strategy `{strategy}` is not supported") | null |
14,192 | import sys
import signal
from utils import read_jsonl
TIMEOUT = 5
assert len(sys.argv) == 2, "Please provide a log file"
def red_text(text: str) -> str:
def green_text(text: str) -> str:
def count_test_cases(test_str: str) -> int:
def read_jsonl(path: str) -> List[dict]:
def validate_py_results(log_path: str):
if not log_path.endswith(".jsonl"):
raise ValueError("Please provide a valid log file")
data = read_jsonl(log_path)
num_success = 0
for i, item in enumerate(data):
if item["is_solved"]:
func_impl = item["solution"]
code = f'{item["prompt"]}{func_impl}\n\n{item["test"]}\n\ncheck({item["entry_point"]})'
num_tests = count_test_cases(item["test"])
try:
def handler(signum, frame):
nonlocal i
raise Exception("timeout on test case" + str(i))
signal.signal(signal.SIGALRM, handler)
signal.alarm(TIMEOUT)
exec(code, globals())
signal.alarm(0)
green_text_out = green_text(f"passes {num_tests}/{num_tests} test cases")
print(f"Test {i}: {green_text_out}")
num_success += 1
except Exception:
red_text_out = red_text(f"failed but should have passed!")
print(f"Test {i}: {red_text_out}")
else:
red_text_out = red_text(f"failed!")
print(f"Test {i}: {red_text_out}")
print(f"Summary: {num_success}/{len(data)} tests passed")
print(f"Acc: {round(num_success/len(data), 3)} tests passed") | null |
14,193 | import os, json
from threading import Thread
def to_jsonl(dict_data, file_path):
with open(file_path, 'a') as file:
json_line = json.dumps(dict_data)
file.write(json_line + os.linesep) | null |
14,194 | import os
import signal
import subprocess
import json
from .executor_utils import timeout_handler
from .executor_types import ExecuteResult, Executor
from typing import List, Tuple, Optional
cargo_harness_dir = os.path.join(os.path.dirname(
os.path.realpath(__file__)), "cargo_harness")
def create_temp_project() -> Tuple[str, str]:
# get pid of the process
pid = os.getpid()
# get random number
rand = os.urandom(8).hex()
# create a temp directory
temp_dir = f"/tmp/cargo_harness-{pid}-{rand}"
# delete the temp directory if it exists
if os.path.exists(temp_dir):
os.system(f"rm -rf {temp_dir}")
os.mkdir(temp_dir)
# move the cargo harness into the temp directory
os.system(f"cp -r {cargo_harness_dir}/* {temp_dir}")
main_path = os.path.join(temp_dir, "src", "main.rs")
return temp_dir, main_path | null |
14,195 | import os
import signal
import subprocess
import json
from .executor_utils import timeout_handler
from .executor_types import ExecuteResult, Executor
from typing import List, Tuple, Optional
def indent_code(code: str, spaces: int = 4) -> str:
def write_to_file(path: str, code: str):
prelude = "fn main() {\n"
postlude = "\n}"
code = prelude + indent_code(code) + postlude
# delete the file if it exists
if os.path.exists(path):
os.remove(path)
# write the code to the file
with open(path, "w") as f:
f.write(code) | null |
14,196 | import os
import signal
import subprocess
import json
from .executor_utils import timeout_handler
from .executor_types import ExecuteResult, Executor
from typing import List, Tuple, Optional
def write_to_file_toplevel(path: str, code: str):
# delete the file if it exists
if os.path.exists(path):
os.remove(path)
# write the code to the file
with open(path, "w") as f:
f.write(code) | null |
14,197 | import os
import signal
import subprocess
import json
from .executor_utils import timeout_handler
from .executor_types import ExecuteResult, Executor
from typing import List, Tuple, Optional
def timeout_handler(_, __):
raise TimeoutError()
The provided code snippet includes necessary dependencies for implementing the `run_with_timeout` function. Write a Python function `def run_with_timeout(cmd: str, tmp_cargo_path: str, timeout: int = 5, print_debug: bool = False) -> Optional[Tuple[str, str]]` to solve the following problem:
Runs the given command with a timeout. Produces a tuple of stdout and stderr. If the command times out, returns None.
Here is the function:
def run_with_timeout(cmd: str, tmp_cargo_path: str, timeout: int = 5, print_debug: bool = False) -> Optional[Tuple[str, str]]:
"""
Runs the given command with a timeout. Produces a tuple of stdout and stderr.
If the command times out, returns None.
"""
# set up the timeout handler
signal.signal(signal.SIGALRM, timeout_handler)
signal.alarm(timeout)
# run the command
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, cwd=tmp_cargo_path)
try:
out, err = p.communicate()
# reset the timeout handler
signal.alarm(0)
except TimeoutError:
p.kill()
return None
# decode the output
out = out.decode("utf-8")
err = err.decode("utf-8")
if print_debug:
print("## RUN OUTPUTS ##")
print("STDOUT:")
print(out)
print("STDERR:")
print(err, flush=True)
return out, err | Runs the given command with a timeout. Produces a tuple of stdout and stderr. If the command times out, returns None. |
14,198 | import os
import signal
import subprocess
import json
from .executor_utils import timeout_handler
from .executor_types import ExecuteResult, Executor
from typing import List, Tuple, Optional
assert_no_panic = r"""
macro_rules! assert_eq_nopanic {
($left:expr, $right:expr) => {
std::panic::catch_unwind(|| {
assert_eq!($left, $right);
}).unwrap_or_else(|_| {});
};
() => {};
}
"""
The provided code snippet includes necessary dependencies for implementing the `transform_asserts` function. Write a Python function `def transform_asserts(code: str) -> str` to solve the following problem:
Transform all asserts into assert_eq_nopanic! asserts, inserting the macro definition at the top of the code.
Here is the function:
def transform_asserts(code: str) -> str:
"""
Transform all asserts into assert_eq_nopanic! asserts, inserting the macro
definition at the top of the code.
"""
code.replace("assert_eq!", "assert_eq_nopanic!")
return assert_no_panic + code | Transform all asserts into assert_eq_nopanic! asserts, inserting the macro definition at the top of the code. |
14,199 | import os
import signal
import subprocess
import json
from .executor_utils import timeout_handler
from .executor_types import ExecuteResult, Executor
from typing import List, Tuple, Optional
assert_no_panic = r"""
macro_rules! assert_eq_nopanic {
($left:expr, $right:expr) => {
std::panic::catch_unwind(|| {
assert_eq!($left, $right);
}).unwrap_or_else(|_| {});
};
() => {};
}
"""
The provided code snippet includes necessary dependencies for implementing the `revert_asserts` function. Write a Python function `def revert_asserts(code: str) -> str` to solve the following problem:
Revert all assert_eq_nopanic! asserts back into assert_eq! asserts.
Here is the function:
def revert_asserts(code: str) -> str:
"""
Revert all assert_eq_nopanic! asserts back into assert_eq! asserts.
"""
normal = code.replace("assert_eq_nopanic!", "assert_eq!")
# remove the macro definition
return normal[len(assert_no_panic):] | Revert all assert_eq_nopanic! asserts back into assert_eq! asserts. |
14,200 | import os
import signal
import subprocess
import json
from .executor_utils import timeout_handler
from .executor_types import ExecuteResult, Executor
from typing import List, Tuple, Optional
class CompileErr:
def __init__(self, rendered):
self.rendered = rendered
def __str__(self):
return self.rendered
def __repr__(self):
return "{" + str(self) + "}"
def grab_compile_errs(inp: str) -> List[CompileErr]:
# we get a stream of json objects, so we need to parse them one by one
objs = []
for line in inp.splitlines():
if line == "":
continue
o = json.loads(line)
if o is not None and o["reason"] == "compiler-message" and \
o["message"]["level"] == "error" and \
o["message"]["spans"] != []:
rendered = o["message"]["rendered"]
objs.append(CompileErr(rendered))
return objs | null |
14,201 | import os
import signal
import subprocess
import json
from .executor_utils import timeout_handler
from .executor_types import ExecuteResult, Executor
from typing import List, Tuple, Optional
class RuntimeErr:
def __init__(self, left, right, line, column, panic_reason):
# right and left are only used for assert_eq! errors
self.left = left
self.right = right
# NOTE: currently not using the below
self.line = line
self.column = column
self.panic_reason = panic_reason
def __str__(self):
if self.left is not None and self.right is not None:
return f"assertion failed: {self.left} == {self.right}"
else:
return self.panic_reason
def __repr__(self):
return "{" + str(self) + "}"
def grab_runtime_errs(inp: str) -> List[RuntimeErr]:
failed_asserts = []
split = inp.splitlines()
curr_left = None
panic_reason = None
for line in split:
if "fatal runtime" in line:
# we have a panic
panic_idx = line.index("fatal runtime")
panic_reason = line[panic_idx + len("fatal runtime") + 1:]
elif "panicked at" in line:
panic_idx = line.index("panicked at")
# strip source line if it exists
if "src/main.rs" in line:
line = line[:line.index("src/main.rs")]
panic_reason = line[panic_idx + len("panicked at") + 1:]
elif "left:" in line:
split = line.split("`")
if len(split) < 2:
continue
curr_left = split[1]
elif "right:" in line:
split = line.split("`")
if len(split) < 2:
continue
curr_right = split[1]
# get the line and column number
fileinto = line.split(",")[-1]
line = int(fileinto.split(":")[1])
column = int(fileinto.split(":")[2])
failed_asserts.append(RuntimeErr(
curr_left, curr_right, line, column, panic_reason))
curr_left = None
panic_reason = None
if panic_reason is not None:
failed_asserts.append(RuntimeErr(None, None, None, None, panic_reason))
return failed_asserts | null |
14,202 | import ast
import signal
import astunparse
from .executor_utils import function_with_timeout
from typing import List
from .executor_types import ExecuteResult, Executor
def get_call_str(assert_statement: str) -> str:
ast_parsed = ast.parse(assert_statement)
try:
call_str = ast_parsed.body[0].test.left # type: ignore
except:
call_str = ast_parsed.body[0].test # type: ignore
return astunparse.unparse(call_str).strip()
def function_with_timeout(func, args, timeout):
result_container = []
def wrapper():
result_container.append(func(*args))
thread = PropagatingThread(target=wrapper)
thread.start()
thread.join(timeout)
if thread.is_alive():
raise TimeoutError()
else:
return result_container[0]
def get_output(func: str, assert_statement: str, timeout: int = 5) -> str:
try:
exec(f"from typing import *\n{func}", globals())
func_call = get_call_str(assert_statement)
output = function_with_timeout(eval, (func_call, globals()), timeout)
return output
except TimeoutError:
return "TIMEOUT"
except Exception as e:
return str(e) | null |
14,203 | from .py_executor import PyExecutor
from .rs_executor import RsExecutor
from .executor_types import Executor
from .leet_executor import LeetExecutor
class PyExecutor(Executor):
def execute(self, func: str, tests: List[str], timeout: int = 5) -> ExecuteResult:
# Combine function code and assert statement
imports = 'from typing import *'
func_test_list = [f'{imports}\n{func}\n{test}' for test in tests]
# Run the tests and collect the results
success_tests = []
failed_tests = []
is_passing = True
num_tests = len(func_test_list)
for i in range(num_tests):
try:
function_with_timeout(exec, (func_test_list[i], globals()), timeout)
success_tests += [tests[i]]
except Exception:
output = get_output(func, tests[i], timeout=timeout)
failed_tests += [f"{tests[i]} # output: {output}"]
is_passing = False
state = []
for test in tests:
if test in success_tests:
state += [True]
else:
state += [False]
state = tuple(state)
feedback = "Tested passed:"
for test in success_tests:
feedback += f"\n{test}"
feedback += "\n\nTests failed:"
for test in failed_tests:
feedback += f"\n{test}"
return ExecuteResult(is_passing, feedback, state)
def evaluate(self, name: str, func: str, test: str, timeout: int = 5) -> bool:
"""
Evaluates the implementation on Human-Eval Python.
probably should be written in a dataset-agnostic way but not now
"""
code = f"""{func}
{test}
check({name})
"""
try:
function_with_timeout(exec, (code, globals()), timeout)
return True
except Exception:
return False
class RsExecutor(Executor):
def execute(self, func: str, tests: List[str], timeout: int = 5) -> ExecuteResult:
# Combine function code and assert statement
func_test_list = [f'{func}\n{test}' for test in tests]
tmp_dir, temp_file = create_temp_project()
# run cargo check --message-format=json
write_to_file(temp_file, func)
res = run_with_timeout(
"cargo check --message-format=json", tmp_dir, timeout=timeout)
assert res is not None, "Timeout in cargo check, wow"
errs = grab_compile_errs(res[0]) # (check returns stdin)
if len(errs) > 0:
# cleanup the temp directory
os.system(f"rm -rf {tmp_dir}")
state = tuple([False] * len(tests))
err_str = ""
for err in errs:
err_str += f"\n{err}"
return ExecuteResult(False, err_str, state)
# Run the tests and collect the results
tests_res: List[Tuple[bool, str]] = []
num_tests = len(func_test_list)
for i in range(num_tests):
"""
# use some sort of timeout limit to handle infinite loops
if pass, add to success tests
if fail, add to failed tests with the log from the compiler
"""
write_to_file(temp_file, func_test_list[i])
# run cargo run
res = run_with_timeout("cargo run", tmp_dir, timeout=timeout)
if res is None:
tests_res.append((False, "Timeout"))
continue
# check if we have any failed tests
errs = grab_runtime_errs(res[1])
if len(errs) > 0:
tests_res.append((False, str(errs[0])))
continue
# if we get here, the test passed
tests_res.append((True, ""))
# cleanup the temp directory
os.system(f"rm -rf {tmp_dir}")
passed_str = ""
failed_str = ""
state = []
for i, (passed, output) in enumerate(tests_res):
test = tests[i]
if passed:
passed_str += f"\n{test}"
else:
failed_str += f"\n{test} // output: {output}"
state.append(passed)
feedback = "Tested passed:"
feedback += passed_str
feedback += "\n\nTests failed:"
feedback += failed_str
is_passing = len(failed_str) == 0
return ExecuteResult(is_passing, feedback, tuple(state))
def evaluate(self, name: str, func: str, test: str, timeout: int = 5) -> bool:
"""
Evaluates the implementation on Human-Eval Rust (MultiPL-E generated,
Federico Cassano, John Gouwar, Daniel Nguyen, Sydney Nguyen, Luna Phipps-Costin, Donald Pinckney, Ming-Ho Yee, Yangtian Zi, Carolyn Jane Anderson, Molly Q Feldman, Arjun Guha, Michael Greenberg, Abhinav Jangda ).
If you use this function please cite:
title={MultiPL-E: A Scalable and Extensible Approach to Benchmarking Neural Code Generation},
author={Federico Cassano and John Gouwar and Daniel Nguyen and Sydney Nguyen and Luna Phipps-Costin and Donald Pinckney and Ming-Ho Yee and Yangtian Zi and Carolyn Jane Anderson and Molly Q Feldman and Arjun Guha and Michael Greenberg and Abhinav Jangda},
year={2022},
eprint={2208.08227},
archivePrefix={arXiv},
primaryClass={cs.LG}
})
TODO: do it actually
"""
tmp_dir, tmp_path = create_temp_project()
print(f"Evaluating\n{func + test}", flush=True)
write_to_file_toplevel(tmp_path, func + test)
res = run_with_timeout(
"cargo check --message-format=json", tmp_dir, timeout=timeout, print_debug=True)
assert res is not None, "Timeout in cargo check, wow"
errs = grab_compile_errs(res[0]) # (check returns stdin)
if len(errs) > 0:
# cleanup the temp directory
os.system(f"rm -rf {tmp_dir}")
print("Compile errors. Failed eval", flush=True)
return False
# compile and run the binary
res = run_with_timeout("cargo run", tmp_dir,
timeout=timeout, print_debug=True)
os.system(f"rm -rf {tmp_dir}")
if res is None:
print("Timeout?. Failed eval", flush=True)
return False
else:
errs = grab_runtime_errs(res[1])
if len(errs) > 0:
print("Runtime errors. Failed eval", flush=True)
return False
print("Passed eval", flush=True)
return len(errs) == 0
class Executor(ABC):
def execute(self, func: str, tests: List[str], timeout: int = 5) -> ExecuteResult:
...
def evaluate(self, name: str, func: str, test: str, timeout: int = 5) -> bool:
...
class LeetExecutor(Executor):
def __init__(self, lang, executor: Executor, formatter):
from .leetcode_env.leetcode_env.utils import SubmissionFormatter
from .leetcode_env.leetcode_env.leetcode_types import ProgrammingLanguage
from .leetcode_env.leetcode_env.environment import LeetCodeEnv
assert isinstance(formatter, SubmissionFormatter)
assert isinstance(lang, ProgrammingLanguage)
self.lang = lang
self.executor = executor
self.formatter = formatter
self.env = LeetCodeEnv()
self.name = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
def execute(self, func: str, tests: List[str], timeout: int = 5) -> ExecuteResult:
return self.executor.execute(func, tests, timeout)
def evaluate(self, name: str, func: str, test: str, timeout: int = 5) -> bool:
from .leetcode_env.leetcode_env.leetcode_types import LeetCodeSubmission
from .leetcode_env.leetcode_env.utils import id_from_slug
print(f'Timeout is {timeout} seconds')
try:
leetcode_formatted_func = self.formatter.to_leetcode(func)
except Exception as e:
print(f'Error formatting function to leetcode: {e}')
return False
print('----------------- LEETCODE SUBMISSION ------------------')
print(leetcode_formatted_func)
print('--------------------------------------------------------')
submission = LeetCodeSubmission(
code=leetcode_formatted_func,
lang=self.lang,
question_id=id_from_slug(name, self.env.api_instance),
question_slug=name,
timeout=timeout
)
status, reward, _, info = self.env.step(submission)
print('----------------- LEETCODE SUBMISSION ------------------')
print(status)
print('--------------------------------------------------------')
to_jsonl({
'name': name,
'status': status,
'reward': reward,
'info': info
}, self.name)
return reward
def executor_factory(lang: str, is_leet: bool = False) -> Executor:
if lang == "py" or lang == "python":
if is_leet:
print("Using LeetCode Python executor")
from .leetcode_env.leetcode_env.leetcode_types import ProgrammingLanguage
from .leetcode_env.leetcode_env.utils import PySubmissionFormatter, RsSubmissionFormatter
return LeetExecutor(ProgrammingLanguage.PYTHON3,
PyExecutor(),
PySubmissionFormatter)
else:
return PyExecutor()
elif lang == "rs" or lang == "rust":
if is_leet:
from .leetcode_env.leetcode_env.leetcode_types import ProgrammingLanguage
from .leetcode_env.leetcode_env.utils import PySubmissionFormatter, RsSubmissionFormatter
return LeetExecutor(ProgrammingLanguage.RUST,
RsExecutor(),
RsSubmissionFormatter)
else:
return RsExecutor()
else:
raise ValueError(f"Invalid language for executor: {lang}") | null |
14,204 | import sys
import signal
from utils import read_jsonl
from executors import RsExecutor
assert len(sys.argv) == 2, "Please provide a log file"
def red_text(text: str) -> str:
return f"\033[91m{text}\033[0m"
def green_text(text: str) -> str:
return f"\033[92m{text}\033[0m"
def count_test_cases(test_str: str) -> int:
# dumb way to do this but works
return test_str.count("assert_eq")
def read_jsonl(path: str) -> List[dict]:
if not os.path.exists(path):
raise FileNotFoundError(f"File `{path}` does not exist.")
elif not path.endswith(".jsonl"):
raise ValueError(f"File `{path}` is not a jsonl file.")
items = []
with jsonlines.open(path) as reader:
for item in reader:
items += [item]
return items
def validate_rs_results(log_path: str):
if not log_path.endswith(".jsonl"):
raise ValueError("Please provide a valid log file")
data = read_jsonl(log_path)
num_success = 0
for i, item in enumerate(data):
if item["is_solved"]:
func_impl = item["solution"]
num_tests = count_test_cases(item["test"])
rs_executor = RsExecutor()
res = rs_executor.evaluate(item["entry_point"], func_impl, item["test"])
if res:
green_text_out = green_text(f"passes {num_tests}/{num_tests} test cases")
print(f"Test {i}: {green_text_out}")
num_success += 1
else:
red_text_out = red_text(f"failed but should have passed!")
print(f"Test {i}: {red_text_out}")
else:
red_text_out = red_text(f"failed!")
print(f"Test {i}: {red_text_out}")
print(f"Summary: {num_success}/{len(data)} tests passed")
print(f"Acc: {round(num_success/len(data), 3)} tests passed") | null |
14,205 | import sys
from datasets.load import load_dataset
from utils import write_jsonl
def write_jsonl(path: str, data: List[dict], append: bool = False):
with jsonlines.open(path, mode='a' if append else 'w') as writer:
for item in data:
writer.write(item)
def download_dataset(dataset_name: str):
dataset = load_dataset("nuprl/MultiPL-E", dataset_name)
final = []
for item in dataset["test"]:
name = item["name"]
entry = "_".join(name.split("_")[2:])
print(entry)
item["entry_point"] = entry
item["test"] = item["tests"]
item["test"] = item["test"][1:] # there is some garbage at the start
del item["tests"]
final.append(item)
output_path = f"./benchmarks/{dataset_name}.jsonl"
_output_file = open(output_path, "w").close()
write_jsonl(output_path, final)
print(f"dumped to `{output_path}`") | null |
14,206 | import os
import joblib
def summarize_trial(agents):
correct = [a for a in agents if a.is_correct()]
incorrect = [a for a in agents if a.is_finished() and not a.is_correct()]
return correct, incorrect
def remove_fewshot(prompt: str) -> str:
prefix = prompt.split('Here are some examples:')[0]
suffix = prompt.split('(END OF EXAMPLES)')[1]
return prefix.strip('\n').strip() + '\n' + suffix.strip('\n').strip()
def log_trial(agents, trial_n):
correct, incorrect = summarize_trial(agents)
log = f"""
########################################
BEGIN TRIAL {trial_n}
Trial summary: Correct: {len(correct)}, Incorrect: {len(incorrect)}
#######################################
"""
log += '------------- BEGIN CORRECT AGENTS -------------\n\n'
for agent in correct:
log += remove_fewshot(agent._build_agent_prompt()) + f'\nCorrect answer: {agent.key}\n\n'
log += '------------- BEGIN INCORRECT AGENTS -----------\n\n'
for agent in incorrect:
log += remove_fewshot(agent._build_agent_prompt()) + f'\nCorrect answer: {agent.key}\n\n'
return log | null |
14,207 | import os
import joblib
def remove_fewshot(prompt: str) -> str:
prefix = prompt.split('Here are some examples:')[0]
suffix = prompt.split('(END OF EXAMPLES)')[1]
return prefix.strip('\n').strip() + '\n' + suffix.strip('\n').strip()
def summarize_react_trial(agents):
correct = [a for a in agents if a.is_correct()]
halted = [a for a in agents if a.is_halted()]
incorrect = [a for a in agents if a.is_finished() and not a.is_correct()]
return correct, incorrect, halted
def log_react_trial(agents, trial_n):
correct, incorrect, halted = summarize_react_trial(agents)
log = f"""
########################################
BEGIN TRIAL {trial_n}
Trial summary: Correct: {len(correct)}, Incorrect: {len(incorrect)}, Halted: {len(halted)}
#######################################
"""
log += '------------- BEGIN CORRECT AGENTS -------------\n\n'
for agent in correct:
log += remove_fewshot(agent._build_agent_prompt()) + f'\nCorrect answer: {agent.key}\n\n'
log += '------------- BEGIN INCORRECT AGENTS -----------\n\n'
for agent in incorrect:
log += remove_fewshot(agent._build_agent_prompt()) + f'\nCorrect answer: {agent.key}\n\n'
log += '------------- BEGIN HALTED AGENTS -----------\n\n'
for agent in halted:
log += remove_fewshot(agent._build_agent_prompt()) + f'\nCorrect answer: {agent.key}\n\n'
return log | null |
14,208 | import os
import joblib
def save_agents(agents, dir: str):
os.makedirs(dir, exist_ok=True)
for i, agent in enumerate(agents):
joblib.dump(agent, os.path.join(dir, f'{i}.joblib')) | null |
14,209 | from langchain.agents.react.base import DocstoreExplorer
from langchain.llms.base import BaseLLM
def reactLLMMock(prompt: str) -> str:
last_line = prompt.split('\n')[-1].strip()
last_action = last_line.split(' ')[0].lower()
if last_action == 'thought':
return 'It does not mention the eastern sector. So I need to look up eastern sector.'
elif last_action == 'action':
return 'Lookup[eastern sector]'
else:
raise Exception('Invalid action type') | null |
14,210 | from langchain.agents.react.base import DocstoreExplorer
from langchain.llms.base import BaseLLM
def reflectLLMMock(prompt: str) -> str:
return "Last time i should have answered correctly" | null |
14,211 | import re
import string
from typing import Tuple
import gym
from langchain import Wikipedia
from langchain.agents.react.base import DocstoreExplorer
def parse_action(string):
pattern = r'^(\w+)\[(.+)\]$'
match = re.match(pattern, string)
if match:
action_type = match.group(1)
argument = match.group(2)
return action_type, argument
else:
return None, None | null |
14,212 | import re
import string
from typing import Tuple
import gym
from langchain import Wikipedia
from langchain.agents.react.base import DocstoreExplorer
def normalize_answer(s):
def remove_articles(text):
return re.sub(r"\b(a|an|the)\b", " ", text)
def white_space_fix(text):
return " ".join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def EM(answer, key) -> bool:
return normalize_answer(answer) == normalize_answer(key) | null |
14,213 | import re, string, os
from typing import List, Union, Literal
from enum import Enum
import tiktoken
from langchain import OpenAI, Wikipedia
from langchain.llms.base import BaseLLM
from langchain.chat_models import ChatOpenAI
from langchain.chat_models.base import BaseChatModel
from langchain.schema import (
SystemMessage,
HumanMessage,
AIMessage,
)
from langchain.agents.react.base import DocstoreExplorer
from langchain.docstore.base import Docstore
from langchain.prompts import PromptTemplate
from llm import AnyOpenAILLM
from prompts import reflect_prompt, react_agent_prompt, react_reflect_agent_prompt, REFLECTION_HEADER, LAST_TRIAL_HEADER, REFLECTION_AFTER_LAST_TRIAL_HEADER
from prompts import cot_agent_prompt, cot_reflect_agent_prompt, cot_reflect_prompt, COT_INSTRUCTION, COT_REFLECT_INSTRUCTION
from fewshots import WEBTHINK_SIMPLE6, REFLECTIONS, COT, COT_REFLECT
def parse_action(string):
pattern = r'^(\w+)\[(.+)\]$'
match = re.match(pattern, string)
if match:
action_type = match.group(1)
argument = match.group(2)
return action_type, argument
else:
return None | null |
14,214 | import re, string, os
from typing import List, Union, Literal
from enum import Enum
import tiktoken
from langchain import OpenAI, Wikipedia
from langchain.llms.base import BaseLLM
from langchain.chat_models import ChatOpenAI
from langchain.chat_models.base import BaseChatModel
from langchain.schema import (
SystemMessage,
HumanMessage,
AIMessage,
)
from langchain.agents.react.base import DocstoreExplorer
from langchain.docstore.base import Docstore
from langchain.prompts import PromptTemplate
from llm import AnyOpenAILLM
from prompts import reflect_prompt, react_agent_prompt, react_reflect_agent_prompt, REFLECTION_HEADER, LAST_TRIAL_HEADER, REFLECTION_AFTER_LAST_TRIAL_HEADER
from prompts import cot_agent_prompt, cot_reflect_agent_prompt, cot_reflect_prompt, COT_INSTRUCTION, COT_REFLECT_INSTRUCTION
from fewshots import WEBTHINK_SIMPLE6, REFLECTIONS, COT, COT_REFLECT
def format_step(step: str) -> str:
return step.strip('\n').strip().replace('\n', '') | null |
14,215 | import re, string, os
from typing import List, Union, Literal
from enum import Enum
import tiktoken
from langchain import OpenAI, Wikipedia
from langchain.llms.base import BaseLLM
from langchain.chat_models import ChatOpenAI
from langchain.chat_models.base import BaseChatModel
from langchain.schema import (
SystemMessage,
HumanMessage,
AIMessage,
)
from langchain.agents.react.base import DocstoreExplorer
from langchain.docstore.base import Docstore
from langchain.prompts import PromptTemplate
from llm import AnyOpenAILLM
from prompts import reflect_prompt, react_agent_prompt, react_reflect_agent_prompt, REFLECTION_HEADER, LAST_TRIAL_HEADER, REFLECTION_AFTER_LAST_TRIAL_HEADER
from prompts import cot_agent_prompt, cot_reflect_agent_prompt, cot_reflect_prompt, COT_INSTRUCTION, COT_REFLECT_INSTRUCTION
from fewshots import WEBTHINK_SIMPLE6, REFLECTIONS, COT, COT_REFLECT
REFLECTION_HEADER = 'You have attempted to answer following question before and failed. The following reflection(s) give a plan to avoid failing to answer the question in the same way you did previously. Use them to improve your strategy of correctly answering the given question.\n'
def format_reflections(reflections: List[str],
header: str = REFLECTION_HEADER) -> str:
if reflections == []:
return ''
else:
return header + 'Reflections:\n- ' + '\n- '.join([r.strip() for r in reflections]) | null |
14,216 | import re, string, os
from typing import List, Union, Literal
from enum import Enum
import tiktoken
from langchain import OpenAI, Wikipedia
from langchain.llms.base import BaseLLM
from langchain.chat_models import ChatOpenAI
from langchain.chat_models.base import BaseChatModel
from langchain.schema import (
SystemMessage,
HumanMessage,
AIMessage,
)
from langchain.agents.react.base import DocstoreExplorer
from langchain.docstore.base import Docstore
from langchain.prompts import PromptTemplate
from llm import AnyOpenAILLM
from prompts import reflect_prompt, react_agent_prompt, react_reflect_agent_prompt, REFLECTION_HEADER, LAST_TRIAL_HEADER, REFLECTION_AFTER_LAST_TRIAL_HEADER
from prompts import cot_agent_prompt, cot_reflect_agent_prompt, cot_reflect_prompt, COT_INSTRUCTION, COT_REFLECT_INSTRUCTION
from fewshots import WEBTHINK_SIMPLE6, REFLECTIONS, COT, COT_REFLECT
gpt2_enc = tiktoken.encoding_for_model("text-davinci-003")
def truncate_scratchpad(scratchpad: str, n_tokens: int = 1600, tokenizer = gpt2_enc) -> str:
lines = scratchpad.split('\n')
observations = filter(lambda x: x.startswith('Observation'), lines)
observations_by_tokens = sorted(observations, key=lambda x: len(tokenizer.encode(x)))
while len(gpt2_enc.encode('\n'.join(lines))) > n_tokens:
largest_observation = observations_by_tokens.pop(-1)
ind = lines.index(largest_observation)
lines[ind] = largest_observation.split(':')[0] + ': [truncated wikipedia excerpt]'
return '\n'.join(lines)
LAST_TRIAL_HEADER = 'You have attempted to answer the following question before and failed. Below is the last trial you attempted to answer the question.\n'
def format_last_attempt(question: str,
scratchpad: str,
header: str = LAST_TRIAL_HEADER):
return header + f'Question: {question}\n' + truncate_scratchpad(scratchpad, tokenizer=gpt2_enc).strip('\n').strip() + '\n(END PREVIOUS TRIAL)\n' | null |
14,217 | import re, string, os
from typing import List, Union, Literal
from enum import Enum
import tiktoken
from langchain import OpenAI, Wikipedia
from langchain.llms.base import BaseLLM
from langchain.chat_models import ChatOpenAI
from langchain.chat_models.base import BaseChatModel
from langchain.schema import (
SystemMessage,
HumanMessage,
AIMessage,
)
from langchain.agents.react.base import DocstoreExplorer
from langchain.docstore.base import Docstore
from langchain.prompts import PromptTemplate
from llm import AnyOpenAILLM
from prompts import reflect_prompt, react_agent_prompt, react_reflect_agent_prompt, REFLECTION_HEADER, LAST_TRIAL_HEADER, REFLECTION_AFTER_LAST_TRIAL_HEADER
from prompts import cot_agent_prompt, cot_reflect_agent_prompt, cot_reflect_prompt, COT_INSTRUCTION, COT_REFLECT_INSTRUCTION
from fewshots import WEBTHINK_SIMPLE6, REFLECTIONS, COT, COT_REFLECT
def normalize_answer(s):
def remove_articles(text):
return re.sub(r"\b(a|an|the)\b", " ", text)
def white_space_fix(text):
return " ".join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def EM(answer, key) -> bool:
return normalize_answer(answer) == normalize_answer(key) | null |
14,218 | import os
from typing import List
import dotenv
import gym
import tiktoken
from langchain import OpenAI
from langchain.llms.base import BaseLLM
from langchain.prompts import PromptTemplate
from environment import QAEnv
from prompts import reflect_prompt, react_agent_prompt, react_reflect_agent_prompt, REFLECTION_HEADER
from fewshots import WEBTHINK_SIMPLE6, REFLECTIONS
REFLECTION_HEADER = 'You have attempted to answer following question before and failed. The following reflection(s) give a plan to avoid failing to answer the question in the same way you did previously. Use them to improve your strategy of correctly answering the given question.\n'
def format_reflections(reflections: List[str]) -> str:
if reflections == []:
return ''
else:
header = REFLECTION_HEADER
return header + 'Reflections:\n- ' + '\n- '.join([r.strip() for r in reflections]) | null |
14,219 | import os
from typing import List
import dotenv
import gym
import tiktoken
from langchain import OpenAI
from langchain.llms.base import BaseLLM
from langchain.prompts import PromptTemplate
from environment import QAEnv
from prompts import reflect_prompt, react_agent_prompt, react_reflect_agent_prompt, REFLECTION_HEADER
from fewshots import WEBTHINK_SIMPLE6, REFLECTIONS
def format_step(step: str) -> str:
return step.strip('\n').strip().replace('\n', '') | null |
14,220 | import os
import openai
from tenacity import (
retry,
stop_after_attempt, # type: ignore
wait_random_exponential, # type: ignore
)
from typing import Optional, List, Union
openai.api_key = os.getenv('OPENAI_API_KEY')
def get_completion(prompt: Union[str, List[str]], max_tokens: int = 256, stop_strs: Optional[List[str]] = None, is_batched: bool = False) -> Union[str, List[str]]:
assert (not is_batched and isinstance(prompt, str)) or (is_batched and isinstance(prompt, list))
response = openai.Completion.create(
model='text-davinci-003',
prompt=prompt,
temperature=0.0,
max_tokens=max_tokens,
top_p=1,
frequency_penalty=0.0,
presence_penalty=0.0,
stop=stop_strs,
)
if is_batched:
res: List[str] = [""] * len(prompt)
for choice in response.choices:
res[choice.index] = choice.text
return res
return response.choices[0].text | null |
14,221 | import os
import json
import argparse
from webshop_trial import run_trial
from generate_reflections import update_memory
from typing import Any, List, Dict
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--num_trials", type=int, help="The number of trials to run")
parser.add_argument("--num_envs", type=int, help="The number of environments per trial")
parser.add_argument("--run_name", type=str, help="The name of the run")
parser.add_argument("--use_memory", action='store_true', help="Allow the Agent to use memory")
parser.add_argument("--is_resume", action='store_true', help="To resume run")
parser.add_argument("--resume_dir", type=str, help="If resume, the logging directory", default="")
parser.add_argument("--start_trial_num", type=int, help="If resume, the start trial num", default=0)
args = parser.parse_args()
assert args.num_trials > 0, "Number of trials should be positive"
assert args.num_envs > 0, "Number of environments should be positive"
return args | null |
14,222 | from typing import List, Dict
def _get_base_query(base_query: str, start_info: str, memory: List[str]) -> str:
query = base_query
# add memory if it exists
if len(memory) > 0:
query += '\nYour memory for the task below:'
for i, m in enumerate(memory):
query += f'\nTrial {i}:\n{m.strip()}'
query += f"\nHere is the task:\n{start_info}"
return query | null |
14,223 | from utils import get_completion
from typing import List, Dict, Any
with open("./reflection_few_shot_examples.txt", 'r') as f:
FEW_SHOT_EXAMPLES = f.read()
def _generate_reflection_query(log_str: str, memory: List[str]) -> str:
"""Allows the Agent to reflect upon a past experience."""
scenario: str = _get_scenario(log_str)
query: str = f"""You will be given the history of a past experience in which you were placed in an environment and given a task to complete. You were unsuccessful in completing the task. Do not summarize your environment, but rather think about the strategy and path you took to attempt to complete the task. Devise a concise, new plan of action that accounts for your mistake with reference to specific actions that you should have taken. There are two examples below.
{FEW_SHOT_EXAMPLES}
Instruction: {scenario}"""
if len(memory) > 0:
query += '\n\nPlans from past attempts:\n'
for i, m in enumerate(memory):
query += f'Trial #{i}: {m}\n'
query += "\n\nNew plan:"
return query
def get_completion(prompt: str, temperature: float = 0.0, max_tokens: int = 256, stop_strs: Optional[List[str]] = None) -> str:
response = openai.Completion.create(
model='text-davinci-003',
prompt=prompt,
temperature=temperature,
max_tokens=max_tokens,
top_p=1,
frequency_penalty=0.0,
presence_penalty=0.0,
stop=stop_strs,
)
return response.choices[0].text
The provided code snippet includes necessary dependencies for implementing the `update_memory` function. Write a Python function `def update_memory(trial_log_path: str, env_configs: List[Dict[str, Any]]) -> List[Dict[str, Any]]` to solve the following problem:
Updates the given env_config with the appropriate reflections.
Here is the function:
def update_memory(trial_log_path: str, env_configs: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""Updates the given env_config with the appropriate reflections."""
with open(trial_log_path, 'r') as f:
full_log: str = f.read()
env_logs: List[str] = full_log.split('#####\n\n#####')
assert len(env_logs) == len(env_configs), print(f'bad: {len(env_logs)}, {len(env_configs)}')
for i, env in enumerate(env_configs):
# if unsolved, get reflection and update env config
if not env['is_success']:
if len(env['memory']) > 3:
memory: List[str] = env['memory'][-3:]
else:
memory: List[str] = env['memory']
reflection_query: str = _generate_reflection_query(env_logs[i], memory)
reflection: str = get_completion(reflection_query) # type: ignore
env_configs[i]['memory'] += [reflection]
return env_configs | Updates the given env_config with the appropriate reflections. |
14,224 | import os
import sys
import openai
import requests
from bs4 import BeautifulSoup
from bs4.element import Comment
from env_history import EnvironmentHistory
from typing import Any, Dict, List, Tuple
WEBSHOP_URL = "http://3.83.245.205:3000"
def clean_str(p):
return p.encode().decode("unicode-escape").encode("latin1").decode("utf-8")
def tag_visible(element):
ignore = {'style', 'script', 'head', 'title', 'meta', '[document]'}
return (
element.parent.name not in ignore and not isinstance(element, Comment)
)
def webshop_text(session, page_type, query_string='', page_num=1, asin='', options={}, subpage='', **kwargs):
if page_type == 'init':
url = (
f'{WEBSHOP_URL}/{session}'
)
if page_type == 'search':
url = (
f'{WEBSHOP_URL}/search_results/{session}/'
f'{query_string}/{page_num}'
)
elif page_type == 'item':
url = (
f'{WEBSHOP_URL}/item_page/{session}/'
f'{asin}/{query_string}/{page_num}/{options}'
)
elif page_type == 'item_sub':
url = (
f'{WEBSHOP_URL}/item_sub_page/{session}/'
f'{asin}/{query_string}/{page_num}/{subpage}/{options}'
)
elif page_type == 'end':
url = (
f'{WEBSHOP_URL}/done/{session}/'
f'{asin}/{options}'
)
# print(url)
html = requests.get(url).text # type: ignore
html_obj = BeautifulSoup(html, 'html.parser')
texts = html_obj.findAll(text=True)
visible_texts = list(filter(tag_visible, texts))
# visible_texts = [str(text).strip().strip('\\n') for text in visible_texts]
# if page_type == 'end': import pdb; pdb.set_trace()
if False:
# For `simple` mode, return just [SEP] separators
return ' [SEP] '.join(t.strip() for t in visible_texts if t != '\n')
else:
# Otherwise, return an observation with tags mapped to specific, unique separators
observation = ''
option_type = ''
options = {}
asins = []
cnt = 0
prod_cnt = 0
just_prod = 0
for t in visible_texts:
if t == '\n': continue
if t.replace('\n', '').replace('\\n', '').replace(' ', '') == '': continue
# if t.startswith('Instruction:') and page_type != 'init': continue
# print(t.parent.name, t)
if t.parent.name == 'button': # button
processed_t = f'\n[{t}] '
elif t.parent.name == 'label': # options
if f"'{t}'" in url: # type: ignore
processed_t = f'[[{t}]]'
# observation = f'You have clicked {t}.\n' + observation
else:
processed_t = f'[{t}]'
options[str(t)] = option_type
# options[option_type] = options.get(option_type, []) + [str(t)]
elif t.parent.get('class') == ["product-link"]: # product asins
processed_t = f'\n[{t}] '
if prod_cnt >= 3:
processed_t = ''
prod_cnt += 1
asins.append(str(t))
just_prod = 0
else: # regular, unclickable text
processed_t = '\n' + str(t) + ' '
if cnt < 2 and page_type != 'init': processed_t = ''
if just_prod <= 2 and prod_cnt >= 4: processed_t = ''
option_type = str(t)
cnt += 1
just_prod += 1
observation += processed_t
info = {}
if options:
info['option_types'] = options
if asins:
info['asins'] = asins
if 'Your score (min 0.0, max 1.0)' in visible_texts:
idx = visible_texts.index('Your score (min 0.0, max 1.0)')
info['reward'] = float(visible_texts[idx + 1])
observation = 'Your score (min 0.0, max 1.0): ' + (visible_texts[idx + 1])
return clean_str(observation), info | null |
14,225 | import os
import sys
import openai
import requests
from bs4 import BeautifulSoup
from bs4.element import Comment
from env_history import EnvironmentHistory
from typing import Any, Dict, List, Tuple
with open("./base_prompt.txt", 'r') as f:
BASE_PROMPT = f.read()
class webshopEnv:
def __init__(self):
def step(self, session, action):
def webshop_run(idx, env, base_prompt, memory: List[str], to_print=True) -> Tuple[EnvironmentHistory, bool]:
def run_trial(
trial_log_path: str,
world_log_path: str,
trial_idx: int,
env_configs: List[Dict[str, Any]],
use_memory: bool
) -> List[Dict[str, Any]]:
env = webshopEnv()
num_successes: int = 0
num_additional_successes: int = 0
num_envs: int = len(env_configs)
for z, env_config in enumerate(env_configs):
if env_config["is_success"]:
num_successes += 1
# log to world log
with open(world_log_path, 'a') as wf:
wf.write(f'Environment #{z} Trial #{trial_idx}: SUCCESS\n')
with open(trial_log_path, 'a') as wf:
wf.write(f'\n#####\n\nEnvironment #{z}: Success\n\n#####\n')
continue
try:
final_env_history, is_success = webshop_run(f'fixed_{z}', env, BASE_PROMPT, env_config["memory"] if use_memory else [], to_print=True)
if is_success:
status_str: str = f'Environment #{z} Trial #{trial_idx}: SUCCESS'
env_configs[z]["is_success"] = True
num_successes += 1
num_additional_successes += 1
else:
status_str: str = f'Environment #{z} Trial #{trial_idx}: FAIL'
# log env results to trial log
with open(trial_log_path, 'a') as wf:
wf.write(f'\n#####\n\nEnvironment #{z}:\n{str(final_env_history)}\n\nSTATUS: {"OK" if is_success else "FAIL"}\n\n#####\n')
except AssertionError:
status_str: str = f'Environment #{z} Trial #{trial_idx}: FAIL'
# log env results to trial log
with open(trial_log_path, 'a') as wf:
wf.write(f'\n#####\n\nEnvironment #{z}:\nAssertion Error\n\nSTATUS: FAIL\n\n#####\n')
# log to world log
with open(world_log_path, 'a') as f:
f.write(status_str + '\n')
# log trial results to trial and world logs
log_str: str = f"""
-----
SUCCESS: {num_successes}
ADDITIONAL SUCCESS: {num_additional_successes}
FAIL: {num_envs - num_successes}
TOTAL: {num_envs}
ACCURACY: {round(num_successes / num_envs, 2)}
-----"""
with open(trial_log_path, 'a') as wf:
wf.write(log_str)
with open(world_log_path, 'a') as wf:
wf.write(log_str + '\n')
return env_configs | null |
14,226 | import argparse
import sys
import os
import torch
from torch import nn, optim
from torch.utils.data import DataLoader
from torchvision import datasets, transforms, utils
from tqdm import tqdm
from vqvae import VQVAE
from scheduler import CycleScheduler
import distributed as dist
def train(epoch, loader, model, optimizer, scheduler, device):
if dist.is_primary():
loader = tqdm(loader)
criterion = nn.MSELoss()
latent_loss_weight = 0.25
sample_size = 25
mse_sum = 0
mse_n = 0
for i, (img, label) in enumerate(loader):
model.zero_grad()
img = img.to(device)
out, latent_loss = model(img)
recon_loss = criterion(out, img)
latent_loss = latent_loss.mean()
loss = recon_loss + latent_loss_weight * latent_loss
loss.backward()
if scheduler is not None:
scheduler.step()
optimizer.step()
part_mse_sum = recon_loss.item() * img.shape[0]
part_mse_n = img.shape[0]
comm = {"mse_sum": part_mse_sum, "mse_n": part_mse_n}
comm = dist.all_gather(comm)
for part in comm:
mse_sum += part["mse_sum"]
mse_n += part["mse_n"]
if dist.is_primary():
lr = optimizer.param_groups[0]["lr"]
loader.set_description(
(
f"epoch: {epoch + 1}; mse: {recon_loss.item():.5f}; "
f"latent: {latent_loss.item():.3f}; avg mse: {mse_sum / mse_n:.5f}; "
f"lr: {lr:.5f}"
)
)
if i % 100 == 0:
model.eval()
sample = img[:sample_size]
with torch.no_grad():
out, _ = model(sample)
utils.save_image(
torch.cat([sample, out], 0),
f"sample/{str(epoch + 1).zfill(5)}_{str(i).zfill(5)}.png",
nrow=sample_size,
normalize=True,
range=(-1, 1),
)
model.train() | null |
14,227 | import numpy as np
import torch
from torch import nn, optim
from torch.utils.data import DataLoader
from torchvision import datasets
from tqdm import tqdm
from pixelsnail import PixelSNAIL
def train(epoch, loader, model, optimizer, device):
loader = tqdm(loader)
criterion = nn.CrossEntropyLoss()
for i, (img, label) in enumerate(loader):
model.zero_grad()
img = img.to(device)
out = model(img)
loss = criterion(out, img)
loss.backward()
optimizer.step()
_, pred = out.max(1)
correct = (pred == img).float()
accuracy = correct.sum() / img.numel()
loader.set_description(
(f'epoch: {epoch + 1}; loss: {loss.item():.5f}; ' f'acc: {accuracy:.5f}')
) | null |
14,228 | import argparse
import pickle
import torch
from torch.utils.data import DataLoader
from torchvision import transforms
import lmdb
from tqdm import tqdm
from dataset import ImageFileDataset, CodeRow
from vqvae import VQVAE
CodeRow = namedtuple('CodeRow', ['top', 'bottom', 'filename'])
def extract(lmdb_env, loader, model, device):
index = 0
with lmdb_env.begin(write=True) as txn:
pbar = tqdm(loader)
for img, _, filename in pbar:
img = img.to(device)
_, _, _, id_t, id_b = model.encode(img)
id_t = id_t.detach().cpu().numpy()
id_b = id_b.detach().cpu().numpy()
for file, top, bottom in zip(filename, id_t, id_b):
row = CodeRow(top=top, bottom=bottom, filename=file)
txn.put(str(index).encode('utf-8'), pickle.dumps(row))
index += 1
pbar.set_description(f'inserted: {index}')
txn.put('length'.encode('utf-8'), str(index).encode('utf-8')) | null |
14,229 | import argparse
import os
import torch
from torchvision.utils import save_image
from tqdm import tqdm
from vqvae import VQVAE
from pixelsnail import PixelSNAIL
def sample_model(model, device, batch, size, temperature, condition=None):
row = torch.zeros(batch, *size, dtype=torch.int64).to(device)
cache = {}
for i in tqdm(range(size[0])):
for j in range(size[1]):
out, cache = model(row[:, : i + 1, :], condition=condition, cache=cache)
prob = torch.softmax(out[:, :, i, j] / temperature, 1)
sample = torch.multinomial(prob, 1).squeeze(-1)
row[:, i, j] = sample
return row | null |
14,230 | import argparse
import os
import torch
from torchvision.utils import save_image
from tqdm import tqdm
from vqvae import VQVAE
from pixelsnail import PixelSNAIL
class VQVAE(nn.Module):
def __init__(
self,
in_channel=3,
channel=128,
n_res_block=2,
n_res_channel=32,
embed_dim=64,
n_embed=512,
decay=0.99,
):
super().__init__()
self.enc_b = Encoder(in_channel, channel, n_res_block, n_res_channel, stride=4)
self.enc_t = Encoder(channel, channel, n_res_block, n_res_channel, stride=2)
self.quantize_conv_t = nn.Conv2d(channel, embed_dim, 1)
self.quantize_t = Quantize(embed_dim, n_embed)
self.dec_t = Decoder(
embed_dim, embed_dim, channel, n_res_block, n_res_channel, stride=2
)
self.quantize_conv_b = nn.Conv2d(embed_dim + channel, embed_dim, 1)
self.quantize_b = Quantize(embed_dim, n_embed)
self.upsample_t = nn.ConvTranspose2d(
embed_dim, embed_dim, 4, stride=2, padding=1
)
self.dec = Decoder(
embed_dim + embed_dim,
in_channel,
channel,
n_res_block,
n_res_channel,
stride=4,
)
def forward(self, input):
quant_t, quant_b, diff, _, _ = self.encode(input)
dec = self.decode(quant_t, quant_b)
return dec, diff
def encode(self, input):
enc_b = self.enc_b(input)
enc_t = self.enc_t(enc_b)
quant_t = self.quantize_conv_t(enc_t).permute(0, 2, 3, 1)
quant_t, diff_t, id_t = self.quantize_t(quant_t)
quant_t = quant_t.permute(0, 3, 1, 2)
diff_t = diff_t.unsqueeze(0)
dec_t = self.dec_t(quant_t)
enc_b = torch.cat([dec_t, enc_b], 1)
quant_b = self.quantize_conv_b(enc_b).permute(0, 2, 3, 1)
quant_b, diff_b, id_b = self.quantize_b(quant_b)
quant_b = quant_b.permute(0, 3, 1, 2)
diff_b = diff_b.unsqueeze(0)
return quant_t, quant_b, diff_t + diff_b, id_t, id_b
def decode(self, quant_t, quant_b):
upsample_t = self.upsample_t(quant_t)
quant = torch.cat([upsample_t, quant_b], 1)
dec = self.dec(quant)
return dec
def decode_code(self, code_t, code_b):
quant_t = self.quantize_t.embed_code(code_t)
quant_t = quant_t.permute(0, 3, 1, 2)
quant_b = self.quantize_b.embed_code(code_b)
quant_b = quant_b.permute(0, 3, 1, 2)
dec = self.decode(quant_t, quant_b)
return dec
class PixelSNAIL(nn.Module):
def __init__(
self,
shape,
n_class,
channel,
kernel_size,
n_block,
n_res_block,
res_channel,
attention=True,
dropout=0.1,
n_cond_res_block=0,
cond_res_channel=0,
cond_res_kernel=3,
n_out_res_block=0,
):
super().__init__()
height, width = shape
self.n_class = n_class
if kernel_size % 2 == 0:
kernel = kernel_size + 1
else:
kernel = kernel_size
self.horizontal = CausalConv2d(
n_class, channel, [kernel // 2, kernel], padding='down'
)
self.vertical = CausalConv2d(
n_class, channel, [(kernel + 1) // 2, kernel // 2], padding='downright'
)
coord_x = (torch.arange(height).float() - height / 2) / height
coord_x = coord_x.view(1, 1, height, 1).expand(1, 1, height, width)
coord_y = (torch.arange(width).float() - width / 2) / width
coord_y = coord_y.view(1, 1, 1, width).expand(1, 1, height, width)
self.register_buffer('background', torch.cat([coord_x, coord_y], 1))
self.blocks = nn.ModuleList()
for i in range(n_block):
self.blocks.append(
PixelBlock(
channel,
res_channel,
kernel_size,
n_res_block,
attention=attention,
dropout=dropout,
condition_dim=cond_res_channel,
)
)
if n_cond_res_block > 0:
self.cond_resnet = CondResNet(
n_class, cond_res_channel, cond_res_kernel, n_cond_res_block
)
out = []
for i in range(n_out_res_block):
out.append(GatedResBlock(channel, res_channel, 1))
out.extend([nn.ELU(inplace=True), WNConv2d(channel, n_class, 1)])
self.out = nn.Sequential(*out)
def forward(self, input, condition=None, cache=None):
if cache is None:
cache = {}
batch, height, width = input.shape
input = (
F.one_hot(input, self.n_class).permute(0, 3, 1, 2).type_as(self.background)
)
horizontal = shift_down(self.horizontal(input))
vertical = shift_right(self.vertical(input))
out = horizontal + vertical
background = self.background[:, :, :height, :].expand(batch, 2, height, width)
if condition is not None:
if 'condition' in cache:
condition = cache['condition']
condition = condition[:, :, :height, :]
else:
condition = (
F.one_hot(condition, self.n_class)
.permute(0, 3, 1, 2)
.type_as(self.background)
)
condition = self.cond_resnet(condition)
condition = F.interpolate(condition, scale_factor=2)
cache['condition'] = condition.detach().clone()
condition = condition[:, :, :height, :]
for block in self.blocks:
out = block(out, background, condition=condition)
out = self.out(out)
return out, cache
def load_model(model, checkpoint, device):
ckpt = torch.load(os.path.join('checkpoint', checkpoint))
if 'args' in ckpt:
args = ckpt['args']
if model == 'vqvae':
model = VQVAE()
elif model == 'pixelsnail_top':
model = PixelSNAIL(
[32, 32],
512,
args.channel,
5,
4,
args.n_res_block,
args.n_res_channel,
dropout=args.dropout,
n_out_res_block=args.n_out_res_block,
)
elif model == 'pixelsnail_bottom':
model = PixelSNAIL(
[64, 64],
512,
args.channel,
5,
4,
args.n_res_block,
args.n_res_channel,
attention=False,
dropout=args.dropout,
n_cond_res_block=args.n_cond_res_block,
cond_res_channel=args.n_res_channel,
)
if 'model' in ckpt:
ckpt = ckpt['model']
model.load_state_dict(ckpt)
model = model.to(device)
model.eval()
return model | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.