diff --git "a/raw/negative/negative_raw.jsonl" "b/raw/negative/negative_raw.jsonl" new file mode 100644--- /dev/null +++ "b/raw/negative/negative_raw.jsonl" @@ -0,0 +1,1000 @@ +{"function": "def main(hf_ckpt_path, save_path, n_experts, mp):\n \"\"\"\n Converts and saves model checkpoint files into a specified format.\n\n Args:\n hf_ckpt_path (str): Path to the directory containing the input checkpoint files.\n save_path (str): Path to the directory where the converted checkpoint files will be saved.\n n_experts (int): Total number of experts in the model.\n mp (int): Model parallelism factor.\n \n Returns:\n None\n \"\"\"\n torch.set_num_threads(8)\n n_local_experts = n_experts // mp\n state_dicts = [{} for _ in range(mp)]\n\n for file_path in tqdm(glob(os.path.join(hf_ckpt_path, \"*.safetensors\"))):\n with safe_open(file_path, framework=\"pt\", device=\"cpu\") as f:\n for name in f.keys():\n if \"model.layers.61\" in name:\n continue\n param: torch.Tensor = f.get_tensor(name)\n if name.startswith(\"model.\"):\n name = name[len(\"model.\"):]\n name = name.replace(\"self_attn\", \"attn\")\n name = name.replace(\"mlp\", \"ffn\")\n name = name.replace(\"weight_scale_inv\", \"scale\")\n name = name.replace(\"e_score_correction_bias\", \"bias\")\n key = name.split(\".\")[-2]\n assert key in mapping, f\"Key {key} not found in mapping\"\n new_key, dim = mapping[key]\n name = name.replace(key, new_key)\n for i in range(mp):\n new_param = param\n if \"experts\" in name and \"shared_experts\" not in name:\n idx = int(name.split(\".\")[-3])\n if idx < i * n_local_experts or idx >= (i + 1) * n_local_experts:\n continue\n elif dim is not None:\n assert param.size(dim) % mp == 0, f\"Dimension {dim} must be divisible by {mp}\"\n shard_size = param.size(dim) // mp\n new_param = param.narrow(dim, i * shard_size, shard_size).contiguous()\n state_dicts[i][name] = new_param\n\n os.makedirs(save_path, exist_ok=True)\n\n for i in trange(mp):\n save_file(state_dicts[i], os.path.join(save_path, f\"model{i}-mp{mp}.safetensors\"))\n\n for file_path in glob(os.path.join(hf_ckpt_path, \"*token*\")):\n new_file_path = os.path.join(save_path, os.path.basename(file_path))\n shutil.copyfile(file_path, new_file_path)", "creation_date": "2024-12-26T11:01:57Z", "repo": "deepseek-ai/DeepSeek-V3", "file_path": "inference/convert.py", "stars": 98205, "label": 0} +{"function": "def main(fp8_path, bf16_path):\n \"\"\"\n Converts FP8 weights to BF16 and saves the converted weights.\n\n This function reads FP8 weights from the specified directory, converts them to BF16,\n and saves the converted weights to another specified directory. It also updates the\n model index file to reflect the changes.\n\n Args:\n fp8_path (str): The path to the directory containing the FP8 weights and model index file.\n bf16_path (str): The path to the directory where the converted BF16 weights will be saved.\n\n Raises:\n KeyError: If a required scale_inv tensor is missing for a weight.\n\n Notes:\n - The function assumes that the FP8 weights are stored in safetensor files.\n - The function caches loaded safetensor files to optimize memory usage.\n - The function updates the model index file to remove references to scale_inv tensors.\n \"\"\"\n torch.set_default_dtype(torch.bfloat16)\n os.makedirs(bf16_path, exist_ok=True)\n model_index_file = os.path.join(fp8_path, \"model.safetensors.index.json\")\n with open(model_index_file, \"r\") as f:\n model_index = json.load(f)\n weight_map = model_index[\"weight_map\"]\n \n # Cache for loaded safetensor files\n loaded_files = {}\n fp8_weight_names = []\n\n # Helper function to get tensor from the correct file\n def get_tensor(tensor_name):\n \"\"\"\n Retrieves a tensor from the cached safetensor files or loads it from disk if not cached.\n\n Args:\n tensor_name (str): The name of the tensor to retrieve.\n\n Returns:\n torch.Tensor: The retrieved tensor.\n\n Raises:\n KeyError: If the tensor does not exist in the safetensor file.\n \"\"\"\n file_name = weight_map[tensor_name]\n if file_name not in loaded_files:\n file_path = os.path.join(fp8_path, file_name)\n loaded_files[file_name] = load_file(file_path, device=\"cuda\")\n return loaded_files[file_name][tensor_name]\n\n safetensor_files = list(glob(os.path.join(fp8_path, \"*.safetensors\")))\n safetensor_files.sort()\n for safetensor_file in tqdm(safetensor_files):\n file_name = os.path.basename(safetensor_file)\n current_state_dict = load_file(safetensor_file, device=\"cuda\")\n loaded_files[file_name] = current_state_dict\n \n new_state_dict = {}\n for weight_name, weight in current_state_dict.items():\n if weight_name.endswith(\"_scale_inv\"):\n continue\n elif weight.element_size() == 1: # FP8 weight\n scale_inv_name = f\"{weight_name}_scale_inv\"\n try:\n # Get scale_inv from the correct file\n scale_inv = get_tensor(scale_inv_name)\n fp8_weight_names.append(weight_name)\n new_state_dict[weight_name] = weight_dequant(weight, scale_inv)\n except KeyError:\n print(f\"Warning: Missing scale_inv tensor for {weight_name}, skipping conversion\")\n new_state_dict[weight_name] = weight\n else:\n new_state_dict[weight_name] = weight\n \n new_safetensor_file = os.path.join(bf16_path, file_name)\n save_file(new_state_dict, new_safetensor_file)\n \n # Memory management: keep only the 2 most recently used files\n if len(loaded_files) > 2:\n oldest_file = next(iter(loaded_files))\n del loaded_files[oldest_file]\n torch.cuda.empty_cache()\n \n # Update model index\n new_model_index_file = os.path.join(bf16_path, \"model.safetensors.index.json\")\n for weight_name in fp8_weight_names:\n scale_inv_name = f\"{weight_name}_scale_inv\"\n if scale_inv_name in weight_map:\n weight_map.pop(scale_inv_name)\n with open(new_model_index_file, \"w\") as f:\n json.dump({\"metadata\": {}, \"weight_map\": weight_map}, f, indent=2)", "creation_date": "2024-12-26T11:01:57Z", "repo": "deepseek-ai/DeepSeek-V3", "file_path": "inference/fp8_cast_bf16.py", "stars": 98205, "label": 0} +{"function": " def get_tensor(tensor_name):\n \"\"\"\n Retrieves a tensor from the cached safetensor files or loads it from disk if not cached.\n\n Args:\n tensor_name (str): The name of the tensor to retrieve.\n\n Returns:\n torch.Tensor: The retrieved tensor.\n\n Raises:\n KeyError: If the tensor does not exist in the safetensor file.\n \"\"\"\n file_name = weight_map[tensor_name]\n if file_name not in loaded_files:\n file_path = os.path.join(fp8_path, file_name)\n loaded_files[file_name] = load_file(file_path, device=\"cuda\")\n return loaded_files[file_name][tensor_name]", "creation_date": "2024-12-26T11:01:57Z", "repo": "deepseek-ai/DeepSeek-V3", "file_path": "inference/fp8_cast_bf16.py", "stars": 98205, "label": 0} +{"function": "def sample(logits, temperature: float = 1.0):\n \"\"\"\n Samples a token from the logits using temperature scaling.\n\n Args:\n logits (torch.Tensor): The logits tensor for token predictions.\n temperature (float, optional): Temperature for scaling logits. Defaults to 1.0.\n\n Returns:\n torch.Tensor: The sampled token.\n \"\"\"\n logits = logits / max(temperature, 1e-5)\n probs = torch.softmax(logits, dim=-1)\n return probs.div_(torch.empty_like(probs).exponential_(1)).argmax(dim=-1)", "creation_date": "2024-12-26T11:01:57Z", "repo": "deepseek-ai/DeepSeek-V3", "file_path": "inference/generate.py", "stars": 98205, "label": 0} +{"function": "def generate(\n model: Transformer,\n prompt_tokens: List[List[int]],\n max_new_tokens: int,\n eos_id: int,\n temperature: float = 1.0\n) -> List[List[int]]:\n \"\"\"\n Generates new tokens based on the given prompt tokens using the specified model.\n\n Args:\n model (Transformer): The transformer model used for token generation.\n prompt_tokens (List[List[int]]): A list of lists containing the prompt tokens for each sequence.\n max_new_tokens (int): The maximum number of new tokens to generate.\n eos_id (int): The end-of-sequence token ID.\n temperature (float, optional): The temperature value for sampling. Defaults to 1.0.\n\n Returns:\n List[List[int]]: A list of lists containing the generated tokens for each sequence.\n \"\"\"\n prompt_lens = [len(t) for t in prompt_tokens]\n assert max(prompt_lens) <= model.max_seq_len, f\"Prompt length exceeds model maximum sequence length (max_seq_len={model.max_seq_len})\"\n total_len = min(model.max_seq_len, max_new_tokens + max(prompt_lens))\n tokens = torch.full((len(prompt_tokens), total_len), -1, dtype=torch.long, device=\"cuda\")\n for i, t in enumerate(prompt_tokens):\n tokens[i, :len(t)] = torch.tensor(t, dtype=torch.long, device=\"cuda\")\n prev_pos = 0\n finished = torch.tensor([False] * len(prompt_tokens), device=\"cuda\")\n prompt_mask = tokens != -1\n for cur_pos in range(min(prompt_lens), total_len):\n logits = model.forward(tokens[:, prev_pos:cur_pos], prev_pos)\n if temperature > 0:\n next_token = sample(logits, temperature)\n else:\n next_token = logits.argmax(dim=-1)\n next_token = torch.where(prompt_mask[:, cur_pos], tokens[:, cur_pos], next_token)\n tokens[:, cur_pos] = next_token\n finished |= torch.logical_and(~prompt_mask[:, cur_pos], next_token == eos_id)\n prev_pos = cur_pos\n if finished.all():\n break\n completion_tokens = []\n for i, toks in enumerate(tokens.tolist()):\n toks = toks[prompt_lens[i]:prompt_lens[i]+max_new_tokens]\n if eos_id in toks:\n toks = toks[:toks.index(eos_id)]\n completion_tokens.append(toks)\n return completion_tokens", "creation_date": "2024-12-26T11:01:57Z", "repo": "deepseek-ai/DeepSeek-V3", "file_path": "inference/generate.py", "stars": 98205, "label": 0} +{"function": "def main(\n ckpt_path: str,\n config: str,\n input_file: str = \"\",\n interactive: bool = True,\n max_new_tokens: int = 100,\n temperature: float = 1.0,\n) -> None:\n \"\"\"\n Main function to load the model and perform interactive or batch text generation.\n\n Args:\n ckpt_path (str): Path to the model checkpoint directory.\n config (str): Path to the model configuration file.\n input_file (str, optional): Path to a file containing input prompts. Defaults to \"\".\n interactive (bool, optional): Whether to run in interactive mode. Defaults to True.\n max_new_tokens (int, optional): Maximum number of new tokens to generate. Defaults to 100.\n temperature (float, optional): Temperature for sampling. Defaults to 1.0.\n \"\"\"\n world_size = int(os.getenv(\"WORLD_SIZE\", \"1\"))\n rank = int(os.getenv(\"RANK\", \"0\"))\n local_rank = int(os.getenv(\"LOCAL_RANK\", \"0\"))\n if world_size > 1:\n dist.init_process_group(\"nccl\")\n global print\n if rank != 0:\n print = lambda *_, **__: None\n torch.cuda.set_device(local_rank)\n torch.set_default_dtype(torch.bfloat16)\n torch.set_num_threads(8)\n torch.manual_seed(965)\n with open(config) as f:\n args = ModelArgs(**json.load(f))\n print(args)\n with torch.device(\"cuda\"):\n model = Transformer(args)\n tokenizer = AutoTokenizer.from_pretrained(ckpt_path)\n tokenizer.decode(generate(model, [tokenizer.encode(\"DeepSeek\")], 2, -1, 1.)[0])\n load_model(model, os.path.join(ckpt_path, f\"model{rank}-mp{world_size}.safetensors\"))\n\n if interactive:\n messages = []\n while True:\n if world_size == 1:\n prompt = input(\">>> \")\n elif rank == 0:\n prompt = input(\">>> \")\n objects = [prompt]\n dist.broadcast_object_list(objects, 0)\n else:\n objects = [None]\n dist.broadcast_object_list(objects, 0)\n prompt = objects[0]\n if prompt == \"/exit\":\n break\n elif prompt == \"/clear\":\n messages.clear()\n continue\n messages.append({\"role\": \"user\", \"content\": prompt})\n prompt_tokens = tokenizer.apply_chat_template(messages, add_generation_prompt=True)\n completion_tokens = generate(model, [prompt_tokens], max_new_tokens, tokenizer.eos_token_id, temperature)\n completion = tokenizer.decode(completion_tokens[0], skip_special_tokens=True)\n print(completion)\n messages.append({\"role\": \"assistant\", \"content\": completion})\n else:\n with open(input_file) as f:\n prompts = [line.strip() for line in f.readlines()]\n assert len(prompts) <= args.max_batch_size, f\"Number of prompts exceeds maximum batch size ({args.max_batch_size})\"\n prompt_tokens = [tokenizer.apply_chat_template([{\"role\": \"user\", \"content\": prompt}], add_generation_prompt=True) for prompt in prompts]\n completion_tokens = generate(model, prompt_tokens, max_new_tokens, tokenizer.eos_token_id, temperature)\n completions = tokenizer.batch_decode(completion_tokens, skip_special_tokens=True)\n for prompt, completion in zip(prompts, completions):\n print(\"Prompt:\", prompt)\n print(\"Completion:\", completion)\n print()\n\n if world_size > 1:\n dist.destroy_process_group()", "creation_date": "2024-12-26T11:01:57Z", "repo": "deepseek-ai/DeepSeek-V3", "file_path": "inference/generate.py", "stars": 98205, "label": 0} +{"function": "def act_quant_kernel(x_ptr, y_ptr, s_ptr, BLOCK_SIZE: tl.constexpr):\n \"\"\"\n Quantizes the input tensor `x_ptr` and stores the result in `y_ptr` and the scaling factor in `s_ptr`.\n\n Args:\n x_ptr (triton.Pointer): Pointer to the input tensor.\n y_ptr (triton.Pointer): Pointer to the output tensor where quantized values will be stored.\n s_ptr (triton.Pointer): Pointer to the output tensor where scaling factors will be stored.\n BLOCK_SIZE (tl.constexpr): The size of the block to be processed by each program instance.\n\n Returns:\n None\n \"\"\"\n pid = tl.program_id(axis=0)\n offs = pid * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)\n x = tl.load(x_ptr + offs).to(tl.float32)\n s = tl.max(tl.abs(x)) / 448.\n y = x / s\n y = y.to(y_ptr.dtype.element_ty)\n tl.store(y_ptr + offs, y)\n tl.store(s_ptr + pid, s)", "creation_date": "2024-12-26T11:01:57Z", "repo": "deepseek-ai/DeepSeek-V3", "file_path": "inference/kernel.py", "stars": 98205, "label": 0} +{"function": "def act_quant(x: torch.Tensor, block_size: int = 128) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Quantizes the input tensor `x` using block-wise quantization.\n\n Args:\n x (torch.Tensor): The input tensor to be quantized. Must be contiguous and its last dimension size must be divisible by `block_size`.\n block_size (int, optional): The size of the blocks to be used for quantization. Default is 128.\n\n Returns:\n Tuple[torch.Tensor, torch.Tensor]: A tuple containing:\n - The quantized tensor with dtype `torch.float8_e4m3fn`.\n - A tensor of scaling factors with dtype `torch.float32`.\n \"\"\"\n assert x.is_contiguous(), 'Input tensor must be contiguous'\n assert x.size(-1) % block_size == 0, f'Last dimension size must be divisible by block_size (block_size={block_size})'\n y = torch.empty_like(x, dtype=torch.float8_e4m3fn)\n s = x.new_empty(*x.size()[:-1], x.size(-1) // block_size, dtype=torch.float32)\n grid = lambda meta: (triton.cdiv(x.numel(), meta['BLOCK_SIZE']), )\n act_quant_kernel[grid](x, y, s, BLOCK_SIZE=block_size)\n return y, s", "creation_date": "2024-12-26T11:01:57Z", "repo": "deepseek-ai/DeepSeek-V3", "file_path": "inference/kernel.py", "stars": 98205, "label": 0} +{"function": "def weight_dequant_kernel(x_ptr, s_ptr, y_ptr, M, N, BLOCK_SIZE: tl.constexpr):\n \"\"\"\n Dequantizes weights using the provided scaling factors and stores the result.\n\n Args:\n x_ptr (tl.pointer): Pointer to the quantized weights.\n s_ptr (tl.pointer): Pointer to the scaling factors.\n y_ptr (tl.pointer): Pointer to the output buffer for dequantized weights.\n M (int): Number of rows in the weight matrix.\n N (int): Number of columns in the weight matrix.\n BLOCK_SIZE (tl.constexpr): Size of the block for tiling.\n\n Returns:\n None\n \"\"\"\n pid_m = tl.program_id(axis=0)\n pid_n = tl.program_id(axis=1)\n n = tl.cdiv(N, BLOCK_SIZE)\n offs_m = pid_m * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)\n offs_n = pid_n * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)\n offs = offs_m[:, None] * N + offs_n[None, :]\n mask = (offs_m[:, None] < M) & (offs_n[None, :] < N)\n x = tl.load(x_ptr + offs, mask=mask).to(tl.float32)\n s = tl.load(s_ptr + pid_m * n + pid_n)\n y = x * s\n tl.store(y_ptr + offs, y, mask=mask)", "creation_date": "2024-12-26T11:01:57Z", "repo": "deepseek-ai/DeepSeek-V3", "file_path": "inference/kernel.py", "stars": 98205, "label": 0} +{"function": "def weight_dequant(x: torch.Tensor, s: torch.Tensor, block_size: int = 128) -> torch.Tensor:\n \"\"\"\n Dequantizes the given weight tensor using the provided scale tensor.\n\n Args:\n x (torch.Tensor): The quantized weight tensor of shape (M, N).\n s (torch.Tensor): The scale tensor of shape (M//block_size, N//block_size).\n block_size (int, optional): The block size to use for dequantization. Defaults to 128.\n\n Returns:\n torch.Tensor: The dequantized weight tensor of the same shape as `x`.\n\n Raises:\n AssertionError: If `x` or `s` are not contiguous or if their dimensions are not 2.\n \"\"\"\n assert x.is_contiguous() and s.is_contiguous(), 'Input tensors must be contiguous'\n assert x.dim() == 2 and s.dim() == 2, 'Input tensors must have 2 dimensions'\n M, N = x.size()\n y = torch.empty_like(x, dtype=torch.get_default_dtype())\n grid = lambda meta: (triton.cdiv(M, meta['BLOCK_SIZE']), triton.cdiv(N, meta['BLOCK_SIZE']))\n weight_dequant_kernel[grid](x, s, y, M, N, BLOCK_SIZE=block_size)\n return y", "creation_date": "2024-12-26T11:01:57Z", "repo": "deepseek-ai/DeepSeek-V3", "file_path": "inference/kernel.py", "stars": 98205, "label": 0} +{"function": "async def test_browser_close_doesnt_affect_external_httpx_clients():\n\t\"\"\"\n\tTest that Browser.close() doesn't close HTTPX clients created outside the Browser instance.\n\tThis test demonstrates the issue where Browser.close() is closing all HTTPX clients.\n\t\"\"\"\n\t# Create an external HTTPX client that should remain open\n\texternal_client = httpx.AsyncClient()\n\n\t# Create a BrowserSession instance\n\tbrowser_session = BrowserSession(browser_profile=BrowserProfile(headless=True))\n\tawait browser_session.start()\n\n\t# Close the browser (which should trigger cleanup_httpx_clients)\n\tawait browser_session.stop()\n\n\t# Check if the external client is still usable\n\ttry:\n\t\t# If the client is closed, this will raise RuntimeError\n\t\t# Using a simple HEAD request to a reliable URL\n\t\tawait external_client.head('https://www.example.com', timeout=2.0)\n\t\tclient_is_closed = False\n\texcept RuntimeError as e:\n\t\t# If we get \"Cannot send a request, as the client has been closed\"\n\t\tclient_is_closed = 'client has been closed' in str(e)\n\texcept Exception:\n\t\t# Any other exception means the client is not closed but request failed\n\t\tclient_is_closed = False\n\tfinally:\n\t\t# Always clean up our test client properly\n\t\tawait external_client.aclose()\n\n\t# Our external client should not be closed by browser.close()\n\tassert not client_is_closed, 'External HTTPX client was incorrectly closed by Browser.close()'", "creation_date": "2025-06-21T11:47:46Z", "repo": "browser-use/browser-use", "file_path": "tests/old/httpx_client_test.py", "stars": 65344, "label": 0} +{"function": "async def test_take_full_page_screenshot():\n\tbrowser_session = BrowserSession(browser_profile=BrowserProfile(headless=True, disable_security=True))\n\tawait browser_session.start()\n\ttry:\n\t\tpage = await browser_session.get_current_page()\n\t\t# Go to a test page\n\t\tawait page.goto('https://example.com')\n\n\t\tawait asyncio.sleep(3)\n\t\t# Take full page screenshot\n\t\tscreenshot_b64 = await browser_session.take_screenshot(full_page=True)\n\t\tawait asyncio.sleep(3)\n\t\t# Verify screenshot is not empty and is valid base64\n\t\tassert screenshot_b64 is not None\n\t\tassert isinstance(screenshot_b64, str)\n\t\tassert len(screenshot_b64) > 0\n\n\t\t# Test we can decode the base64 string\n\t\ttry:\n\t\t\tbase64.b64decode(screenshot_b64)\n\t\texcept Exception as e:\n\t\t\tpytest.fail(f'Failed to decode base64 screenshot: {str(e)}')\n\tfinally:\n\t\tawait browser_session.stop()", "creation_date": "2025-06-21T11:47:46Z", "repo": "browser-use/browser-use", "file_path": "tests/old/screenshot_test.py", "stars": 65344, "label": 0} +{"function": "\tdef test_get_prompt_description_no_filters(self):\n\t\t\"\"\"Test that system prompt only includes actions with no filters\"\"\"\n\t\tregistry = ActionRegistry()\n\n\t\t# Add actions with and without filters\n\t\tno_filter_action = RegisteredAction(\n\t\t\tname='no_filter_action',\n\t\t\tdescription='Action with no filters',\n\t\t\tfunction=lambda: None,\n\t\t\tparam_model=EmptyParamModel,\n\t\t\tdomains=None,\n\t\t\tpage_filter=None,\n\t\t)\n\n\t\tpage_filter_action = RegisteredAction(\n\t\t\tname='page_filter_action',\n\t\t\tdescription='Action with page filter',\n\t\t\tfunction=lambda: None,\n\t\t\tparam_model=EmptyParamModel,\n\t\t\tdomains=None,\n\t\t\tpage_filter=lambda page: True,\n\t\t)\n\n\t\tdomain_filter_action = RegisteredAction(\n\t\t\tname='domain_filter_action',\n\t\t\tdescription='Action with domain filter',\n\t\t\tfunction=lambda: None,\n\t\t\tparam_model=EmptyParamModel,\n\t\t\tdomains=['example.com'],\n\t\t\tpage_filter=None,\n\t\t)\n\n\t\tregistry.actions = {\n\t\t\t'no_filter_action': no_filter_action,\n\t\t\t'page_filter_action': page_filter_action,\n\t\t\t'domain_filter_action': domain_filter_action,\n\t\t}\n\n\t\t# System prompt (no page) should only include actions with no filters\n\t\tsystem_description = registry.get_prompt_description()\n\t\tassert 'no_filter_action' in system_description\n\t\tassert 'page_filter_action' not in system_description\n\t\tassert 'domain_filter_action' not in system_description", "creation_date": "2025-06-21T11:47:46Z", "repo": "browser-use/browser-use", "file_path": "tests/old/test_action_filters.py", "stars": 65344, "label": 0} +{"function": "\tdef test_page_filter_matching(self):\n\t\t\"\"\"Test that page filters work correctly\"\"\"\n\t\tregistry = ActionRegistry()\n\n\t\t# Create a mock page\n\t\tmock_page = MagicMock(spec=Page)\n\t\tmock_page.url = 'https://example.com/page'\n\n\t\t# Create actions with different page filters\n\t\tmatching_action = RegisteredAction(\n\t\t\tname='matching_action',\n\t\t\tdescription='Action with matching page filter',\n\t\t\tfunction=lambda: None,\n\t\t\tparam_model=EmptyParamModel,\n\t\t\tdomains=None,\n\t\t\tpage_filter=lambda page: 'example.com' in page.url,\n\t\t)\n\n\t\tnon_matching_action = RegisteredAction(\n\t\t\tname='non_matching_action',\n\t\t\tdescription='Action with non-matching page filter',\n\t\t\tfunction=lambda: None,\n\t\t\tparam_model=EmptyParamModel,\n\t\t\tdomains=None,\n\t\t\tpage_filter=lambda page: 'other.com' in page.url,\n\t\t)\n\n\t\tregistry.actions = {'matching_action': matching_action, 'non_matching_action': non_matching_action}\n\n\t\t# Page-specific description should only include matching actions\n\t\tpage_description = registry.get_prompt_description(mock_page)\n\t\tassert 'matching_action' in page_description\n\t\tassert 'non_matching_action' not in page_description", "creation_date": "2025-06-21T11:47:46Z", "repo": "browser-use/browser-use", "file_path": "tests/old/test_action_filters.py", "stars": 65344, "label": 0} +{"function": "\tdef test_domain_filter_matching(self):\n\t\t\"\"\"Test that domain filters work correctly with glob patterns\"\"\"\n\t\tregistry = ActionRegistry()\n\n\t\t# Create actions with different domain patterns\n\t\tactions = {\n\t\t\t'exact_match': RegisteredAction(\n\t\t\t\tname='exact_match',\n\t\t\t\tdescription='Exact domain match',\n\t\t\t\tfunction=lambda: None,\n\t\t\t\tparam_model=EmptyParamModel,\n\t\t\t\tdomains=['example.com'],\n\t\t\t\tpage_filter=None,\n\t\t\t),\n\t\t\t'subdomain_match': RegisteredAction(\n\t\t\t\tname='subdomain_match',\n\t\t\t\tdescription='Subdomain wildcard match',\n\t\t\t\tfunction=lambda: None,\n\t\t\t\tparam_model=EmptyParamModel,\n\t\t\t\tdomains=['*.example.com'],\n\t\t\t\tpage_filter=None,\n\t\t\t),\n\t\t\t'prefix_match': RegisteredAction(\n\t\t\t\tname='prefix_match',\n\t\t\t\tdescription='Prefix wildcard match',\n\t\t\t\tfunction=lambda: None,\n\t\t\t\tparam_model=EmptyParamModel,\n\t\t\t\tdomains=['example*'],\n\t\t\t\tpage_filter=None,\n\t\t\t),\n\t\t\t'non_matching': RegisteredAction(\n\t\t\t\tname='non_matching',\n\t\t\t\tdescription='Non-matching domain',\n\t\t\t\tfunction=lambda: None,\n\t\t\t\tparam_model=EmptyParamModel,\n\t\t\t\tdomains=['other.com'],\n\t\t\t\tpage_filter=None,\n\t\t\t),\n\t\t}\n\n\t\tregistry.actions = actions\n\n\t\t# Test exact domain match\n\t\tmock_page = MagicMock(spec=Page)\n\t\tmock_page.url = 'https://example.com/page'\n\n\t\texact_match_description = registry.get_prompt_description(mock_page)\n\t\tassert 'exact_match' in exact_match_description\n\t\tassert 'non_matching' not in exact_match_description\n\n\t\t# Test subdomain match\n\t\tmock_page.url = 'https://sub.example.com/page'\n\t\tsubdomain_match_description = registry.get_prompt_description(mock_page)\n\t\tassert 'subdomain_match' in subdomain_match_description\n\t\tassert 'exact_match' not in subdomain_match_description\n\n\t\t# Test prefix match\n\t\tmock_page.url = 'https://example123.org/page'\n\t\tprefix_match_description = registry.get_prompt_description(mock_page)\n\t\tassert 'prefix_match' in prefix_match_description", "creation_date": "2025-06-21T11:47:46Z", "repo": "browser-use/browser-use", "file_path": "tests/old/test_action_filters.py", "stars": 65344, "label": 0} +{"function": "\tdef test_domain_and_page_filter_together(self):\n\t\t\"\"\"Test that actions can be filtered by both domain and page filter\"\"\"\n\t\tregistry = ActionRegistry()\n\n\t\t# Create a mock page\n\t\tmock_page = MagicMock(spec=Page)\n\t\tmock_page.url = 'https://example.com/admin'\n\n\t\t# Actions with different combinations of filters\n\t\tactions = {\n\t\t\t'domain_only': RegisteredAction(\n\t\t\t\tname='domain_only',\n\t\t\t\tdescription='Domain filter only',\n\t\t\t\tfunction=lambda: None,\n\t\t\t\tparam_model=EmptyParamModel,\n\t\t\t\tdomains=['example.com'],\n\t\t\t\tpage_filter=None,\n\t\t\t),\n\t\t\t'page_only': RegisteredAction(\n\t\t\t\tname='page_only',\n\t\t\t\tdescription='Page filter only',\n\t\t\t\tfunction=lambda: None,\n\t\t\t\tparam_model=EmptyParamModel,\n\t\t\t\tdomains=None,\n\t\t\t\tpage_filter=lambda page: 'admin' in page.url,\n\t\t\t),\n\t\t\t'both_matching': RegisteredAction(\n\t\t\t\tname='both_matching',\n\t\t\t\tdescription='Both filters matching',\n\t\t\t\tfunction=lambda: None,\n\t\t\t\tparam_model=EmptyParamModel,\n\t\t\t\tdomains=['example.com'],\n\t\t\t\tpage_filter=lambda page: 'admin' in page.url,\n\t\t\t),\n\t\t\t'both_one_fail': RegisteredAction(\n\t\t\t\tname='both_one_fail',\n\t\t\t\tdescription='One filter fails',\n\t\t\t\tfunction=lambda: None,\n\t\t\t\tparam_model=EmptyParamModel,\n\t\t\t\tdomains=['other.com'],\n\t\t\t\tpage_filter=lambda page: 'admin' in page.url,\n\t\t\t),\n\t\t}\n\n\t\tregistry.actions = actions\n\n\t\t# Check that only actions with matching filters are included\n\t\tdescription = registry.get_prompt_description(mock_page)\n\t\tassert 'domain_only' in description # Domain matches\n\t\tassert 'page_only' in description # Page filter matches\n\t\tassert 'both_matching' in description # Both filters match\n\t\tassert 'both_one_fail' not in description # Domain filter fails\n\n\t\t# Test with different URL where page filter fails\n\t\tmock_page.url = 'https://example.com/dashboard'\n\t\tdescription = registry.get_prompt_description(mock_page)\n\t\tassert 'domain_only' in description # Domain matches\n\t\tassert 'page_only' not in description # Page filter fails\n\t\tassert 'both_matching' not in description # Page filter fails\n\t\tassert 'both_one_fail' not in description # Domain filter fails", "creation_date": "2025-06-21T11:47:46Z", "repo": "browser-use/browser-use", "file_path": "tests/old/test_action_filters.py", "stars": 65344, "label": 0} +{"function": "\tasync def test_registry_action_decorator(self):\n\t\t\"\"\"Test the action decorator with filters\"\"\"\n\t\tregistry = Registry()\n\n\t\t# Define actions with different filters\n\t\t@registry.action(\n\t\t\tdescription='No filter action',\n\t\t)\n\t\tdef no_filter_action():\n\t\t\tpass\n\n\t\t@registry.action(description='Domain filter action', domains=['example.com'])\n\t\tdef domain_filter_action():\n\t\t\tpass\n\n\t\t@registry.action(description='Page filter action', page_filter=lambda page: 'admin' in page.url)\n\t\tdef page_filter_action():\n\t\t\tpass\n\n\t\t# Check that system prompt only includes the no_filter_action\n\t\tsystem_description = registry.get_prompt_description()\n\t\tassert 'No filter action' in system_description\n\t\tassert 'Domain filter action' not in system_description\n\t\tassert 'Page filter action' not in system_description\n\n\t\t# Check that page-specific prompt includes the right actions\n\t\tmock_page = MagicMock(spec=Page)\n\t\tmock_page.url = 'https://example.com/admin'\n\n\t\tpage_description = registry.get_prompt_description(mock_page)\n\t\tassert 'Domain filter action' in page_description\n\t\tassert 'Page filter action' in page_description", "creation_date": "2025-06-21T11:47:46Z", "repo": "browser-use/browser-use", "file_path": "tests/old/test_action_filters.py", "stars": 65344, "label": 0} +{"function": "\tasync def test_action_model_creation(self):\n\t\t\"\"\"Test that action models are created correctly with filters\"\"\"\n\t\tregistry = Registry()\n\n\t\t# Define actions with different filters\n\t\t@registry.action(\n\t\t\tdescription='No filter action',\n\t\t)\n\t\tdef no_filter_action():\n\t\t\tpass\n\n\t\t@registry.action(description='Domain filter action', domains=['example.com'])\n\t\tdef domain_filter_action():\n\t\t\tpass\n\n\t\t@registry.action(description='Page filter action', page_filter=lambda page: 'admin' in page.url)\n\t\tdef page_filter_action():\n\t\t\tpass\n\n\t\t@registry.action(description='Both filters action', domains=['example.com'], page_filter=lambda page: 'admin' in page.url)\n\t\tdef both_filters_action():\n\t\t\tpass\n\n\t\t# Initial action model should only include no_filter_action\n\t\tinitial_model = registry.create_action_model()\n\t\tassert 'no_filter_action' in initial_model.model_fields\n\t\tassert 'domain_filter_action' not in initial_model.model_fields\n\t\tassert 'page_filter_action' not in initial_model.model_fields\n\t\tassert 'both_filters_action' not in initial_model.model_fields\n\n\t\t# Action model with matching page should include all matching actions\n\t\tmock_page = MagicMock(spec=Page)\n\t\tmock_page.url = 'https://example.com/admin'\n\n\t\tpage_model = registry.create_action_model(page=mock_page)\n\t\tassert 'no_filter_action' in page_model.model_fields\n\t\tassert 'domain_filter_action' in page_model.model_fields\n\t\tassert 'page_filter_action' in page_model.model_fields\n\t\tassert 'both_filters_action' in page_model.model_fields\n\n\t\t# Action model with non-matching domain should exclude domain-filtered actions\n\t\tmock_page.url = 'https://other.com/admin'\n\t\tnon_matching_domain_model = registry.create_action_model(page=mock_page)\n\t\tassert 'no_filter_action' in non_matching_domain_model.model_fields\n\t\tassert 'domain_filter_action' not in non_matching_domain_model.model_fields\n\t\tassert 'page_filter_action' in non_matching_domain_model.model_fields\n\t\tassert 'both_filters_action' not in non_matching_domain_model.model_fields\n\n\t\t# Action model with non-matching page filter should exclude page-filtered actions\n\t\tmock_page.url = 'https://example.com/dashboard'\n\t\tnon_matching_page_model = registry.create_action_model(page=mock_page)\n\t\tassert 'no_filter_action' in non_matching_page_model.model_fields\n\t\tassert 'domain_filter_action' in non_matching_page_model.model_fields\n\t\tassert 'page_filter_action' not in non_matching_page_model.model_fields\n\t\tassert 'both_filters_action' not in non_matching_page_model.model_fields", "creation_date": "2025-06-21T11:47:46Z", "repo": "browser-use/browser-use", "file_path": "tests/old/test_action_filters.py", "stars": 65344, "label": 0} +{"function": "\t\tdef no_filter_action():\n\t\t\tpass", "creation_date": "2025-06-21T11:47:46Z", "repo": "browser-use/browser-use", "file_path": "tests/old/test_action_filters.py", "stars": 65344, "label": 0} +{"function": "\t\tdef domain_filter_action():\n\t\t\tpass", "creation_date": "2025-06-21T11:47:46Z", "repo": "browser-use/browser-use", "file_path": "tests/old/test_action_filters.py", "stars": 65344, "label": 0} +{"function": "def check_node_version():\n \"\"\"Check if Node.js version is sufficient for building the extension.\"\"\"\n try:\n result = subprocess.run(\n ['node', '--version'], capture_output=True, text=True, check=True\n )\n version_str = result.stdout.strip()\n # Extract major version number (e.g., \"v12.22.9\" -> 12)\n major_version = int(version_str.lstrip('v').split('.')[0])\n return major_version >= 18 # Align with frontend actual usage (18.20.1)\n except (subprocess.CalledProcessError, FileNotFoundError, ValueError):\n return False", "creation_date": "2025-07-03T20:42:06Z", "repo": "All-Hands-AI/OpenHands", "file_path": "build_vscode.py", "stars": 60337, "label": 0} +{"function": "def build_vscode_extension():\n \"\"\"Builds the VS Code extension.\"\"\"\n vsix_path = VSCODE_EXTENSION_DIR / VSIX_FILENAME\n\n # Check if VSCode extension build is disabled via environment variable\n if os.environ.get('SKIP_VSCODE_BUILD', '').lower() in ('1', 'true', 'yes'):\n print('--- Skipping VS Code extension build (SKIP_VSCODE_BUILD is set) ---')\n if vsix_path.exists():\n print(f'--- Using existing VS Code extension: {vsix_path} ---')\n else:\n print('--- No pre-built VS Code extension found ---')\n return\n\n # Check Node.js version - if insufficient, use pre-built extension as fallback\n if not check_node_version():\n print('--- Warning: Node.js version < 18 detected or Node.js not found ---')\n print('--- Skipping VS Code extension build (requires Node.js >= 18) ---')\n print('--- Using pre-built extension if available ---')\n\n if not vsix_path.exists():\n print('--- Warning: No pre-built VS Code extension found ---')\n print('--- VS Code extension will not be available ---')\n else:\n print(f'--- Using pre-built VS Code extension: {vsix_path} ---')\n return\n\n print(f'--- Building VS Code extension in {VSCODE_EXTENSION_DIR} ---')\n\n try:\n # Ensure npm dependencies are installed\n print('--- Running npm install for VS Code extension ---')\n subprocess.run(\n ['npm', 'install'],\n cwd=VSCODE_EXTENSION_DIR,\n check=True,\n shell=os.name == 'nt',\n )\n\n # Package the extension\n print(f'--- Packaging VS Code extension ({VSIX_FILENAME}) ---')\n subprocess.run(\n ['npm', 'run', 'package-vsix'],\n cwd=VSCODE_EXTENSION_DIR,\n check=True,\n shell=os.name == 'nt',\n )\n\n # Verify the generated .vsix file exists\n if not vsix_path.exists():\n raise FileNotFoundError(\n f'VS Code extension package not found after build: {vsix_path}'\n )\n\n print(f'--- VS Code extension built successfully: {vsix_path} ---')\n\n except subprocess.CalledProcessError as e:\n print(f'--- Warning: Failed to build VS Code extension: {e} ---')\n print('--- Continuing without building extension ---')\n if not vsix_path.exists():\n print('--- Warning: No pre-built VS Code extension found ---')\n print('--- VS Code extension will not be available ---')", "creation_date": "2025-07-03T20:42:06Z", "repo": "All-Hands-AI/OpenHands", "file_path": "build_vscode.py", "stars": 60337, "label": 0} +{"function": "def build(setup_kwargs):\n \"\"\"\n This function is called by Poetry during the build process.\n `setup_kwargs` is a dictionary that will be passed to `setuptools.setup()`.\n \"\"\"\n print('--- Running custom Poetry build script (build_vscode.py) ---')\n\n # Build the VS Code extension and place the .vsix file\n build_vscode_extension()\n\n # Poetry will handle including files based on pyproject.toml `include` patterns.\n # Ensure openhands/integrations/vscode/*.vsix is included there.\n\n print('--- Custom Poetry build script (build_vscode.py) finished ---')", "creation_date": "2025-07-03T20:42:06Z", "repo": "All-Hands-AI/OpenHands", "file_path": "build_vscode.py", "stars": 60337, "label": 0} +{"function": " def __init__(\n self,\n config: OpenHandsConfig,\n event_stream: EventStream,\n sid: str = 'default',\n plugins: list[PluginRequirement] | None = None,\n env_vars: dict[str, str] | None = None,\n status_callback: Callable | None = None,\n attach_to_existing: bool = False,\n headless_mode: bool = True,\n user_id: str | None = None,\n git_provider_tokens: PROVIDER_TOKEN_TYPE | None = None,\n ):\n # Read Runloop API key from environment variable\n runloop_api_key = os.getenv('RUNLOOP_API_KEY')\n if not runloop_api_key:\n raise ValueError('RUNLOOP_API_KEY environment variable is required for Runloop runtime')\n \n self.devbox: DevboxView | None = None\n self.config = config\n self.runloop_api_client = Runloop(\n bearer_token=runloop_api_key,\n )\n self.container_name = CONTAINER_NAME_PREFIX + sid\n super().__init__(\n config,\n event_stream,\n sid,\n plugins,\n env_vars,\n status_callback,\n attach_to_existing,\n headless_mode,\n user_id,\n git_provider_tokens,\n )\n # Buffer for container logs\n self._vscode_url: str | None = None", "creation_date": "2025-06-26T11:39:39Z", "repo": "All-Hands-AI/OpenHands", "file_path": "third_party/runtime/impl/runloop/runloop_runtime.py", "stars": 60337, "label": 0} +{"function": " def action_execution_server_url(self):\n return self.api_url", "creation_date": "2025-06-26T11:39:39Z", "repo": "All-Hands-AI/OpenHands", "file_path": "third_party/runtime/impl/runloop/runloop_runtime.py", "stars": 60337, "label": 0} +{"function": " def _wait_for_devbox(self, devbox: DevboxView) -> DevboxView:\n \"\"\"Pull devbox status until it is running\"\"\"\n if devbox == 'running':\n return devbox\n\n devbox = self.runloop_api_client.devboxes.retrieve(id=devbox.id)\n if devbox.status != 'running':\n raise ConnectionRefusedError('Devbox is not running')\n\n # Devbox is connected and running\n logging.debug(f'devbox.id={devbox.id} is running')\n return devbox", "creation_date": "2025-06-26T11:39:39Z", "repo": "All-Hands-AI/OpenHands", "file_path": "third_party/runtime/impl/runloop/runloop_runtime.py", "stars": 60337, "label": 0} +{"function": " def _create_new_devbox(self) -> DevboxView:\n # Note: Runloop connect\n start_command = get_action_execution_server_startup_command(\n server_port=self._sandbox_port,\n plugins=self.plugins,\n app_config=self.config,\n )\n\n # Add some additional commands based on our image\n # NB: start off as root, action_execution_server will ultimately choose user but expects all context\n # (ie browser) to be installed as root\n # Convert start_command list to a single command string with additional setup\n start_command_str = (\n 'export MAMBA_ROOT_PREFIX=/openhands/micromamba && '\n 'cd /openhands/code && '\n '/openhands/micromamba/bin/micromamba run -n openhands poetry config virtualenvs.path /openhands/poetry && '\n + ' '.join(start_command)\n )\n entrypoint = f\"sudo bash -c '{start_command_str}'\"\n\n devbox = self.runloop_api_client.devboxes.create(\n entrypoint=entrypoint,\n name=self.sid,\n environment_variables={'DEBUG': 'true'} if self.config.debug else {},\n prebuilt='openhands',\n launch_parameters=LaunchParameters(\n available_ports=[self._sandbox_port, self._vscode_port],\n resource_size_request='LARGE',\n launch_commands=[\n f'mkdir -p {self.config.workspace_mount_path_in_sandbox}'\n ],\n ),\n metadata={'container-name': self.container_name},\n )\n return self._wait_for_devbox(devbox)", "creation_date": "2025-06-26T11:39:39Z", "repo": "All-Hands-AI/OpenHands", "file_path": "third_party/runtime/impl/runloop/runloop_runtime.py", "stars": 60337, "label": 0} +{"function": " async def connect(self):\n self.set_runtime_status(RuntimeStatus.STARTING_RUNTIME)\n\n if self.attach_to_existing:\n active_devboxes = self.runloop_api_client.devboxes.list(\n status='running'\n ).devboxes\n self.devbox = next(\n (devbox for devbox in active_devboxes if devbox.name == self.sid), None\n )\n\n if self.devbox is None:\n self.devbox = self._create_new_devbox()\n\n # Create tunnel - this will return a stable url, so is safe to call if we are attaching to existing\n tunnel = self.runloop_api_client.devboxes.create_tunnel(\n id=self.devbox.id,\n port=self._sandbox_port,\n )\n\n self.api_url = tunnel.url\n logger.info(f'Container started. Server url: {self.api_url}')\n\n # End Runloop connect\n # NOTE: Copied from DockerRuntime\n logger.info('Waiting for client to become ready...')\n self.set_runtime_status(RuntimeStatus.STARTING_RUNTIME)\n self._wait_until_alive()\n\n if not self.attach_to_existing:\n self.setup_initial_env()\n\n logger.info(\n f'Container initialized with plugins: {[plugin.name for plugin in self.plugins]}'\n )\n self.set_runtime_status(RuntimeStatus.READY)", "creation_date": "2025-06-26T11:39:39Z", "repo": "All-Hands-AI/OpenHands", "file_path": "third_party/runtime/impl/runloop/runloop_runtime.py", "stars": 60337, "label": 0} +{"function": " def _wait_until_alive(self):\n super().check_if_alive()", "creation_date": "2025-06-26T11:39:39Z", "repo": "All-Hands-AI/OpenHands", "file_path": "third_party/runtime/impl/runloop/runloop_runtime.py", "stars": 60337, "label": 0} +{"function": " def close(self, rm_all_containers: bool | None = True):\n super().close()\n\n if self.attach_to_existing:\n return\n\n if self.devbox:\n self.runloop_api_client.devboxes.shutdown(self.devbox.id)", "creation_date": "2025-06-26T11:39:39Z", "repo": "All-Hands-AI/OpenHands", "file_path": "third_party/runtime/impl/runloop/runloop_runtime.py", "stars": 60337, "label": 0} +{"function": "def test_version() -> None:\n result = subprocess.run(\n [\"python\", \"-m\", \"markitdown\", \"--version\"], capture_output=True, text=True\n )\n\n assert result.returncode == 0, f\"CLI exited with error: {result.stderr}\"\n assert __version__ in result.stdout, f\"Version not found in output: {result.stdout}\"", "creation_date": "2025-03-12T18:08:06Z", "repo": "microsoft/markitdown", "file_path": "packages/markitdown/tests/test_cli_misc.py", "stars": 60239, "label": 0} +{"function": "def test_invalid_flag() -> None:\n result = subprocess.run(\n [\"python\", \"-m\", \"markitdown\", \"--foobar\"], capture_output=True, text=True\n )\n\n assert result.returncode != 0, f\"CLI exited with error: {result.stderr}\"\n assert (\n \"unrecognized arguments\" in result.stderr\n ), \"Expected 'unrecognized arguments' to appear in STDERR\"\n assert \"SYNTAX\" in result.stderr, \"Expected 'SYNTAX' to appear in STDERR\"", "creation_date": "2025-03-12T18:08:06Z", "repo": "microsoft/markitdown", "file_path": "packages/markitdown/tests/test_cli_misc.py", "stars": 60239, "label": 0} +{"function": "def shared_tmp_dir(tmp_path_factory):\n return tmp_path_factory.mktemp(\"pytest_tmp\")", "creation_date": "2025-03-12T18:08:06Z", "repo": "microsoft/markitdown", "file_path": "packages/markitdown/tests/test_cli_vectors.py", "stars": 60239, "label": 0} +{"function": "def test_output_to_stdout(shared_tmp_dir, test_vector) -> None:\n \"\"\"Test that the CLI outputs to stdout correctly.\"\"\"\n\n result = subprocess.run(\n [\n \"python\",\n \"-m\",\n \"markitdown\",\n os.path.join(TEST_FILES_DIR, test_vector.filename),\n ],\n capture_output=True,\n text=True,\n )\n\n assert result.returncode == 0, f\"CLI exited with error: {result.stderr}\"\n for test_string in test_vector.must_include:\n assert test_string in result.stdout\n for test_string in test_vector.must_not_include:\n assert test_string not in result.stdout", "creation_date": "2025-03-12T18:08:06Z", "repo": "microsoft/markitdown", "file_path": "packages/markitdown/tests/test_cli_vectors.py", "stars": 60239, "label": 0} +{"function": "def test_output_to_file(shared_tmp_dir, test_vector) -> None:\n \"\"\"Test that the CLI outputs to a file correctly.\"\"\"\n\n output_file = os.path.join(shared_tmp_dir, test_vector.filename + \".output\")\n result = subprocess.run(\n [\n \"python\",\n \"-m\",\n \"markitdown\",\n \"-o\",\n output_file,\n os.path.join(TEST_FILES_DIR, test_vector.filename),\n ],\n capture_output=True,\n text=True,\n )\n\n assert result.returncode == 0, f\"CLI exited with error: {result.stderr}\"\n assert os.path.exists(output_file), f\"Output file not created: {output_file}\"\n\n with open(output_file, \"r\") as f:\n output_data = f.read()\n for test_string in test_vector.must_include:\n assert test_string in output_data\n for test_string in test_vector.must_not_include:\n assert test_string not in output_data\n\n os.remove(output_file)\n assert not os.path.exists(output_file), f\"Output file not deleted: {output_file}\"", "creation_date": "2025-03-12T18:08:06Z", "repo": "microsoft/markitdown", "file_path": "packages/markitdown/tests/test_cli_vectors.py", "stars": 60239, "label": 0} +{"function": "def test_input_from_stdin_without_hints(shared_tmp_dir, test_vector) -> None:\n \"\"\"Test that the CLI readds from stdin correctly.\"\"\"\n\n test_input = b\"\"\n with open(os.path.join(TEST_FILES_DIR, test_vector.filename), \"rb\") as stream:\n test_input = stream.read()\n\n result = subprocess.run(\n [\n \"python\",\n \"-m\",\n \"markitdown\",\n os.path.join(TEST_FILES_DIR, test_vector.filename),\n ],\n input=test_input,\n capture_output=True,\n text=False,\n )\n\n stdout = result.stdout.decode(locale.getpreferredencoding())\n assert (\n result.returncode == 0\n ), f\"CLI exited with error: {result.stderr.decode('utf-8')}\"\n for test_string in test_vector.must_include:\n assert test_string in stdout\n for test_string in test_vector.must_not_include:\n assert test_string not in stdout", "creation_date": "2025-03-12T18:08:06Z", "repo": "microsoft/markitdown", "file_path": "packages/markitdown/tests/test_cli_vectors.py", "stars": 60239, "label": 0} +{"function": "def test_convert_url(shared_tmp_dir, test_vector):\n \"\"\"Test the conversion of a stream with no stream info.\"\"\"\n # Note: tmp_dir is not used here, but is needed to match the signature\n\n time.sleep(1) # Ensure we don't hit rate limits\n result = subprocess.run(\n [\"python\", \"-m\", \"markitdown\", TEST_FILES_URL + \"/\" + test_vector.filename],\n capture_output=True,\n text=False,\n )\n\n stdout = result.stdout.decode(locale.getpreferredencoding())\n assert result.returncode == 0, f\"CLI exited with error: {result.stderr}\"\n for test_string in test_vector.must_include:\n assert test_string in stdout\n for test_string in test_vector.must_not_include:\n assert test_string not in stdout", "creation_date": "2025-03-12T18:08:06Z", "repo": "microsoft/markitdown", "file_path": "packages/markitdown/tests/test_cli_vectors.py", "stars": 60239, "label": 0} +{"function": "def test_output_to_file_with_data_uris(shared_tmp_dir, test_vector) -> None:\n \"\"\"Test CLI functionality when keep_data_uris is enabled\"\"\"\n\n output_file = os.path.join(shared_tmp_dir, test_vector.filename + \".output\")\n result = subprocess.run(\n [\n \"python\",\n \"-m\",\n \"markitdown\",\n \"--keep-data-uris\",\n \"-o\",\n output_file,\n os.path.join(TEST_FILES_DIR, test_vector.filename),\n ],\n capture_output=True,\n text=True,\n )\n\n assert result.returncode == 0, f\"CLI exited with error: {result.stderr}\"\n assert os.path.exists(output_file), f\"Output file not created: {output_file}\"\n\n with open(output_file, \"r\") as f:\n output_data = f.read()\n for test_string in test_vector.must_include:\n assert test_string in output_data\n for test_string in test_vector.must_not_include:\n assert test_string not in output_data\n\n os.remove(output_file)\n assert not os.path.exists(output_file), f\"Output file not deleted: {output_file}\"", "creation_date": "2025-03-12T18:08:06Z", "repo": "microsoft/markitdown", "file_path": "packages/markitdown/tests/test_cli_vectors.py", "stars": 60239, "label": 0} +{"function": "def validate_strings(result, expected_strings, exclude_strings=None):\n \"\"\"Validate presence or absence of specific strings.\"\"\"\n text_content = result.text_content.replace(\"\\\\\", \"\")\n for string in expected_strings:\n assert string in text_content\n if exclude_strings:\n for string in exclude_strings:\n assert string not in text_content", "creation_date": "2025-03-12T18:08:06Z", "repo": "microsoft/markitdown", "file_path": "packages/markitdown/tests/test_module_misc.py", "stars": 60239, "label": 0} +{"function": "def test_stream_info_operations() -> None:\n \"\"\"Test operations performed on StreamInfo objects.\"\"\"\n\n stream_info_original = StreamInfo(\n mimetype=\"mimetype.1\",\n extension=\"extension.1\",\n charset=\"charset.1\",\n filename=\"filename.1\",\n local_path=\"local_path.1\",\n url=\"url.1\",\n )\n\n # Check updating all attributes by keyword\n keywords = [\"mimetype\", \"extension\", \"charset\", \"filename\", \"local_path\", \"url\"]\n for keyword in keywords:\n updated_stream_info = stream_info_original.copy_and_update(\n **{keyword: f\"{keyword}.2\"}\n )\n\n # Make sure the targted attribute is updated\n assert getattr(updated_stream_info, keyword) == f\"{keyword}.2\"\n\n # Make sure the other attributes are unchanged\n for k in keywords:\n if k != keyword:\n assert getattr(stream_info_original, k) == getattr(\n updated_stream_info, k\n )\n\n # Check updating all attributes by passing a new StreamInfo object\n keywords = [\"mimetype\", \"extension\", \"charset\", \"filename\", \"local_path\", \"url\"]\n for keyword in keywords:\n updated_stream_info = stream_info_original.copy_and_update(\n StreamInfo(**{keyword: f\"{keyword}.2\"})\n )\n\n # Make sure the targted attribute is updated\n assert getattr(updated_stream_info, keyword) == f\"{keyword}.2\"\n\n # Make sure the other attributes are unchanged\n for k in keywords:\n if k != keyword:\n assert getattr(stream_info_original, k) == getattr(\n updated_stream_info, k\n )\n\n # Check mixing and matching\n updated_stream_info = stream_info_original.copy_and_update(\n StreamInfo(extension=\"extension.2\", filename=\"filename.2\"),\n mimetype=\"mimetype.3\",\n charset=\"charset.3\",\n )\n assert updated_stream_info.extension == \"extension.2\"\n assert updated_stream_info.filename == \"filename.2\"\n assert updated_stream_info.mimetype == \"mimetype.3\"\n assert updated_stream_info.charset == \"charset.3\"\n assert updated_stream_info.local_path == \"local_path.1\"\n assert updated_stream_info.url == \"url.1\"\n\n # Check multiple StreamInfo objects\n updated_stream_info = stream_info_original.copy_and_update(\n StreamInfo(extension=\"extension.4\", filename=\"filename.5\"),\n StreamInfo(mimetype=\"mimetype.6\", charset=\"charset.7\"),\n )\n assert updated_stream_info.extension == \"extension.4\"\n assert updated_stream_info.filename == \"filename.5\"\n assert updated_stream_info.mimetype == \"mimetype.6\"\n assert updated_stream_info.charset == \"charset.7\"\n assert updated_stream_info.local_path == \"local_path.1\"\n assert updated_stream_info.url == \"url.1\"", "creation_date": "2025-03-12T18:08:06Z", "repo": "microsoft/markitdown", "file_path": "packages/markitdown/tests/test_module_misc.py", "stars": 60239, "label": 0} +{"function": "def copy_to_shm(file: str):\n if file.startswith(\"/dev/shm/\"):\n # Nothing to do, the file is already in shared memory.\n yield file\n return\n\n tmp_dir = \"/dev/shm/\"\n fd, tmp_path = tempfile.mkstemp(dir=tmp_dir)\n try:\n shutil.copyfile(file, tmp_path)\n yield tmp_path\n finally:\n os.remove(tmp_path)\n os.close(fd)", "creation_date": "2024-03-17T18:11:31Z", "repo": "xai-org/grok-1", "file_path": "checkpoint.py", "stars": 50362, "label": 0} +{"function": "def copy_from_shm(file: str):\n tmp_dir = \"/dev/shm/\"\n fd, tmp_path = tempfile.mkstemp(dir=tmp_dir)\n try:\n yield tmp_path\n shutil.copyfile(tmp_path, file)\n finally:\n os.remove(tmp_path)\n os.close(fd)", "creation_date": "2024-03-17T18:11:31Z", "repo": "xai-org/grok-1", "file_path": "checkpoint.py", "stars": 50362, "label": 0} +{"function": "def fast_unpickle(path: str) -> Any:\n with copy_to_shm(path) as tmp_path:\n with open(tmp_path, \"rb\") as f:\n return pickle.load(f)", "creation_date": "2024-03-17T18:11:31Z", "repo": "xai-org/grok-1", "file_path": "checkpoint.py", "stars": 50362, "label": 0} +{"function": "def fast_pickle(obj: Any, path: str) -> None:\n with copy_from_shm(path) as tmp_path:\n with open(tmp_path, \"wb\") as f:\n pickle.dump(obj, f)", "creation_date": "2024-03-17T18:11:31Z", "repo": "xai-org/grok-1", "file_path": "checkpoint.py", "stars": 50362, "label": 0} +{"function": "def load_tensors(shaped_arrays, directory, mesh_config, tensor_indices=None):\n \"\"\"Loads a set of arrays.\"\"\"\n pool = ThreadPoolExecutor(max_workers=32)\n fs = list()\n num_tensors = 0\n num_replicas = 1\n data_model_shards = math.prod(mesh_config)\n if tensor_indices is None:\n iterator = enumerate(shaped_arrays)\n else:\n iterator = zip(tensor_indices, shaped_arrays)\n for i, t in iterator:\n if (i % num_replicas) == ((jax.process_index() // data_model_shards) % num_replicas):\n idx = (\n jax.process_index() // (num_replicas * data_model_shards) * data_model_shards\n + jax.process_index() % data_model_shards\n )\n fs.append(\n pool.submit(fast_unpickle, os.path.join(directory, f\"tensor{i:05d}_{idx:03d}\"))\n )\n num_tensors += 1\n else:\n fs.append(pool.submit(np.zeros, t.shape, dtype=t.dtype))\n wait(fs)\n return [f.result() for f in fs]", "creation_date": "2024-03-17T18:11:31Z", "repo": "xai-org/grok-1", "file_path": "checkpoint.py", "stars": 50362, "label": 0} +{"function": "def path_tuple_to_string(path: tuple) -> str:\n pieces = []\n for elem in path:\n if isinstance(elem, jax.tree_util.DictKey):\n pieces.append(elem.key)\n elif isinstance(elem, jax.tree_util.GetAttrKey):\n pieces.append(elem.name)\n else:\n assert isinstance(elem, (jax.tree_util.FlattenedIndexKey, jax.tree_util.SequenceKey))\n return \"/\".join(pieces)", "creation_date": "2024-03-17T18:11:31Z", "repo": "xai-org/grok-1", "file_path": "checkpoint.py", "stars": 50362, "label": 0} +{"function": "def get_load_path_str(\n init_path_str: str,\n load_rename_rules: Optional[list[tuple[str, str]]] = None,\n load_exclude_rules: Optional[list[str]] = None,\n) -> Optional[str]:\n # Exclusion\n if load_exclude_rules is not None:\n for search_pattern in load_exclude_rules:\n if re.search(search_pattern, init_path_str):\n return None\n\n # Renaming\n load_path_str = init_path_str\n if load_rename_rules is not None:\n for search_pattern, replacement_pattern in load_rename_rules:\n if re.search(search_pattern, load_path_str):\n load_path_str = re.sub(search_pattern, replacement_pattern, load_path_str)\n break\n\n return load_path_str", "creation_date": "2024-03-17T18:11:31Z", "repo": "xai-org/grok-1", "file_path": "checkpoint.py", "stars": 50362, "label": 0} +{"function": "def replace_with_load_state(\n init_state: Any,\n load_state: Any,\n load_rename_rules: Optional[list[tuple[str, str]]] = None,\n load_exclude_rules: Optional[list[str]] = None,\n mesh_config: tuple = (1, 1),\n) -> Any:\n flatten_load, _ = jax.tree_util.tree_flatten_with_path(load_state)\n flatten_init, structure_init = jax.tree_util.tree_flatten_with_path(init_state)\n load_map = {path_tuple_to_string(path): tensor for path, tensor in flatten_load}\n\n replaced = []\n num_replicas = 1\n data_model_shards = math.prod(mesh_config)\n for i, (init_path, tensor) in enumerate(flatten_init):\n init_path_str = path_tuple_to_string(init_path)\n load_path_str = get_load_path_str(init_path_str, load_rename_rules, load_exclude_rules)\n if load_path_str is None:\n rank_logger.info(f\"Excluded from restore: {init_path_str}.\")\n replaced.append(tensor)\n elif load_path_str in load_map:\n if load_path_str == init_path_str:\n rank_logger.info(f\"Restored from ckpt: {init_path_str}.\")\n else:\n rank_logger.info(f\"Restored from ckpt: {init_path_str} <-- {load_path_str}.\")\n replaced.append(load_map[load_path_str])\n else:\n rank_logger.info(f\"Not found in ckpt: {init_path_str}.\")\n if (i % num_replicas) == ((jax.process_index() // data_model_shards) % num_replicas):\n replaced.append(tensor)\n else:\n replaced.append(np.zeros_like(tensor))\n\n return jax.tree_util.tree_unflatten(structure_init, replaced)", "creation_date": "2024-03-17T18:11:31Z", "repo": "xai-org/grok-1", "file_path": "checkpoint.py", "stars": 50362, "label": 0} +{"function": "def restore(\n checkpoint_path: str,\n state_shapes: Any,\n mesh,\n between_hosts_config,\n params_only,\n state_sharding,\n init_state: Optional[Any] = None,\n) -> Any:\n ckpt_path = os.path.join(checkpoint_path, \"ckpt-0\")\n\n rank_logger.info(\"Loading checkpoint at {}\".format(ckpt_path))\n ckpt_shapes = state_shapes\n ckpt_shapes_with_path, structure = jax.tree_util.tree_flatten_with_path(ckpt_shapes)\n\n ckpt_shapes_flat = [elem[1] for elem in ckpt_shapes_with_path]\n loaded_tensors = load_tensors(ckpt_shapes_flat, ckpt_path, between_hosts_config)\n\n state = jax.tree_util.tree_unflatten(structure, loaded_tensors)\n\n # Sanity check to give a better error message.\n ckpt_keys = set(state.params.keys())\n code_keys = set(state_sharding.params.keys())\n\n if ckpt_keys != code_keys and init_state is None:\n missing_in_ckpt = code_keys - ckpt_keys\n missing_locally = ckpt_keys - code_keys\n raise ValueError(\n \"Parameters in the code are not matching checkpoint parameters.\\n\"\n \"Params missing in checkpoint: {}\\nParams missing in code: {}\".format(\n missing_in_ckpt, missing_locally\n )\n )\n state_sharding = jax.tree_util.tree_map(\n lambda x: jax.sharding.PartitionSpec() if x is None else x,\n state_sharding,\n is_leaf=lambda x: x is None,\n )\n state = multihost_utils.host_local_array_to_global_array(state, mesh, state_sharding)\n if params_only:\n state = state.params\n return state", "creation_date": "2024-03-17T18:11:31Z", "repo": "xai-org/grok-1", "file_path": "checkpoint.py", "stars": 50362, "label": 0} +{"function": "def _match(qs, ks):\n \"\"\"Return True if regexes in qs match any window of strings in tuple ks.\"\"\"\n # compile regexes and force complete match\n qts = tuple(map(lambda x: re.compile(x + \"$\"), qs))\n for i in range(len(ks) - len(qs) + 1):\n matches = [x.match(y) for x, y in zip(qts, ks[i:])]\n if matches and all(matches):\n return True\n return False", "creation_date": "2024-03-17T18:11:31Z", "repo": "xai-org/grok-1", "file_path": "model.py", "stars": 50362, "label": 0} +{"function": "def init_session_state() -> None:\n \"\"\"Initialize Streamlit session state with default values.\"\"\"\n defaults = {\n \"initialized\": False,\n \"qdrant_url\": \"\",\n \"qdrant_api_key\": \"\",\n \"openai_api_key\": \"\",\n \"setup_complete\": False,\n \"client\": None,\n \"embedding_model\": None,\n \"processor_agent\": None,\n \"tts_agent\": None,\n \"selected_voice\": \"coral\",\n \"processed_documents\": []\n }\n \n for key, value in defaults.items():\n if key not in st.session_state:\n st.session_state[key] = value", "creation_date": "2025-04-17T21:01:23Z", "repo": "Shubhamsaboo/awesome-llm-apps", "file_path": "voice_ai_agents/voice_rag_openaisdk/rag_voice.py", "stars": 49856, "label": 0} +{"function": "def setup_sidebar() -> None:\n \"\"\"Configure sidebar with API settings and voice options.\"\"\"\n with st.sidebar:\n st.title(\"\ud83d\udd11 Configuration\")\n st.markdown(\"---\")\n \n st.session_state.qdrant_url = st.text_input(\n \"Qdrant URL\",\n value=st.session_state.qdrant_url,\n type=\"password\"\n )\n st.session_state.qdrant_api_key = st.text_input(\n \"Qdrant API Key\",\n value=st.session_state.qdrant_api_key,\n type=\"password\"\n )\n st.session_state.openai_api_key = st.text_input(\n \"OpenAI API Key\",\n value=st.session_state.openai_api_key,\n type=\"password\"\n )\n \n st.markdown(\"---\")\n st.markdown(\"### \ud83c\udfa4 Voice Settings\")\n voices = [\"alloy\", \"ash\", \"ballad\", \"coral\", \"echo\", \"fable\", \"onyx\", \"nova\", \"sage\", \"shimmer\", \"verse\"]\n st.session_state.selected_voice = st.selectbox(\n \"Select Voice\",\n options=voices,\n index=voices.index(st.session_state.selected_voice),\n help=\"Choose the voice for the audio response\"\n )", "creation_date": "2025-04-17T21:01:23Z", "repo": "Shubhamsaboo/awesome-llm-apps", "file_path": "voice_ai_agents/voice_rag_openaisdk/rag_voice.py", "stars": 49856, "label": 0} +{"function": "def setup_qdrant() -> Tuple[QdrantClient, TextEmbedding]:\n \"\"\"Initialize Qdrant client and embedding model.\"\"\"\n if not all([st.session_state.qdrant_url, st.session_state.qdrant_api_key]):\n raise ValueError(\"Qdrant credentials not provided\")\n \n client = QdrantClient(\n url=st.session_state.qdrant_url,\n api_key=st.session_state.qdrant_api_key\n )\n \n embedding_model = TextEmbedding()\n test_embedding = list(embedding_model.embed([\"test\"]))[0]\n embedding_dim = len(test_embedding)\n \n try:\n client.create_collection(\n collection_name=COLLECTION_NAME,\n vectors_config=VectorParams(\n size=embedding_dim,\n distance=Distance.COSINE\n )\n )\n except Exception as e:\n if \"already exists\" not in str(e):\n raise e\n \n return client, embedding_model", "creation_date": "2025-04-17T21:01:23Z", "repo": "Shubhamsaboo/awesome-llm-apps", "file_path": "voice_ai_agents/voice_rag_openaisdk/rag_voice.py", "stars": 49856, "label": 0} +{"function": "def process_pdf(file) -> List:\n \"\"\"Process PDF file and split into chunks with metadata.\"\"\"\n try:\n with tempfile.NamedTemporaryFile(delete=False, suffix='.pdf') as tmp_file:\n tmp_file.write(file.getvalue())\n loader = PyPDFLoader(tmp_file.name)\n documents = loader.load()\n \n # Add source metadata\n for doc in documents:\n doc.metadata.update({\n \"source_type\": \"pdf\",\n \"file_name\": file.name,\n \"timestamp\": datetime.now().isoformat()\n })\n \n text_splitter = RecursiveCharacterTextSplitter(\n chunk_size=1000,\n chunk_overlap=200\n )\n return text_splitter.split_documents(documents)\n except Exception as e:\n st.error(f\"\ud83d\udcc4 PDF processing error: {str(e)}\")\n return []", "creation_date": "2025-04-17T21:01:23Z", "repo": "Shubhamsaboo/awesome-llm-apps", "file_path": "voice_ai_agents/voice_rag_openaisdk/rag_voice.py", "stars": 49856, "label": 0} +{"function": "def store_embeddings(\n client: QdrantClient,\n embedding_model: TextEmbedding,\n documents: List,\n collection_name: str\n) -> None:\n \"\"\"Store document embeddings in Qdrant.\"\"\"\n for doc in documents:\n embedding = list(embedding_model.embed([doc.page_content]))[0]\n client.upsert(\n collection_name=collection_name,\n points=[\n models.PointStruct(\n id=str(uuid.uuid4()),\n vector=embedding.tolist(),\n payload={\n \"content\": doc.page_content,\n **doc.metadata\n }\n )\n ]\n )", "creation_date": "2025-04-17T21:01:23Z", "repo": "Shubhamsaboo/awesome-llm-apps", "file_path": "voice_ai_agents/voice_rag_openaisdk/rag_voice.py", "stars": 49856, "label": 0} +{"function": "def setup_agents(openai_api_key: str) -> Tuple[Agent, Agent]:\n \"\"\"Initialize the processor and TTS agents.\"\"\"\n os.environ[\"OPENAI_API_KEY\"] = openai_api_key\n \n processor_agent = Agent(\n name=\"Documentation Processor\",\n instructions=\"\"\"You are a helpful documentation assistant. Your task is to:\n 1. Analyze the provided documentation content\n 2. Answer the user's question clearly and concisely\n 3. Include relevant examples when available\n 4. Cite the source files when referencing specific content\n 5. Keep responses natural and conversational\n 6. Format your response in a way that's easy to speak out loud\"\"\",\n model=\"gpt-4o\"\n )\n\n tts_agent = Agent(\n name=\"Text-to-Speech Agent\",\n instructions=\"\"\"You are a text-to-speech agent. Your task is to:\n 1. Convert the processed documentation response into natural speech\n 2. Maintain proper pacing and emphasis\n 3. Handle technical terms clearly\n 4. Keep the tone professional but friendly\n 5. Use appropriate pauses for better comprehension\n 6. Ensure the speech is clear and well-articulated\"\"\",\n model=\"gpt-4o\"\n )\n \n return processor_agent, tts_agent", "creation_date": "2025-04-17T21:01:23Z", "repo": "Shubhamsaboo/awesome-llm-apps", "file_path": "voice_ai_agents/voice_rag_openaisdk/rag_voice.py", "stars": 49856, "label": 0} +{"function": "async def process_query(\n query: str,\n client: QdrantClient,\n embedding_model: TextEmbedding,\n collection_name: str,\n openai_api_key: str,\n voice: str\n) -> Dict:\n \"\"\"Process user query and generate voice response.\"\"\"\n try:\n st.info(\"\ud83d\udd04 Step 1: Generating query embedding and searching documents...\")\n # Get query embedding and search\n query_embedding = list(embedding_model.embed([query]))[0]\n st.write(f\"Generated embedding of size: {len(query_embedding)}\")\n \n search_response = client.query_points(\n collection_name=collection_name,\n query=query_embedding.tolist(),\n limit=3,\n with_payload=True\n )\n \n search_results = search_response.points if hasattr(search_response, 'points') else []\n st.write(f\"Found {len(search_results)} relevant documents\")\n \n if not search_results:\n raise Exception(\"No relevant documents found in the vector database\")\n \n st.info(\"\ud83d\udd04 Step 2: Preparing context from search results...\")\n # Prepare context from search results\n context = \"Based on the following documentation:\\n\\n\"\n for i, result in enumerate(search_results, 1):\n payload = result.payload\n if not payload:\n continue\n content = payload.get('content', '')\n source = payload.get('file_name', 'Unknown Source')\n context += f\"From {source}:\\n{content}\\n\\n\"\n st.write(f\"Document {i} from: {source}\")\n \n context += f\"\\nUser Question: {query}\\n\\n\"\n context += \"Please provide a clear, concise answer that can be easily spoken out loud.\"\n \n st.info(\"\ud83d\udd04 Step 3: Setting up agents...\")\n # Setup agents if not already done\n if not st.session_state.processor_agent or not st.session_state.tts_agent:\n processor_agent, tts_agent = setup_agents(openai_api_key)\n st.session_state.processor_agent = processor_agent\n st.session_state.tts_agent = tts_agent\n st.write(\"Initialized new processor and TTS agents\")\n else:\n st.write(\"Using existing agents\")\n \n st.info(\"\ud83d\udd04 Step 4: Generating text response...\")\n # Generate text response using processor agent\n processor_result = await Runner.run(st.session_state.processor_agent, context)\n text_response = processor_result.final_output\n st.write(f\"Generated text response of length: {len(text_response)}\")\n \n st.info(\"\ud83d\udd04 Step 5: Generating voice instructions...\")\n # Generate voice instructions using TTS agent\n tts_result = await Runner.run(st.session_state.tts_agent, text_response)\n voice_instructions = tts_result.final_output\n st.write(f\"Generated voice instructions of length: {len(voice_instructions)}\")\n \n st.info(\"\ud83d\udd04 Step 6: Generating and playing audio...\")\n # Generate and play audio with streaming\n async_openai = AsyncOpenAI(api_key=openai_api_key)\n \n # First create streaming response\n async with async_openai.audio.speech.with_streaming_response.create(\n model=\"gpt-4o-mini-tts\",\n voice=voice,\n input=text_response,\n instructions=voice_instructions,\n response_format=\"pcm\",\n ) as stream_response:\n st.write(\"Starting audio playback...\")\n # Play audio directly using LocalAudioPlayer\n await LocalAudioPlayer().play(stream_response)\n st.write(\"Audio playback complete\")\n \n st.write(\"Generating downloadable MP3 version...\")\n # Also save as MP3 for download\n audio_response = await async_openai.audio.speech.create(\n model=\"gpt-4o-mini-tts\",\n voice=voice,\n input=text_response,\n instructions=voice_instructions,\n response_format=\"mp3\"\n )\n \n temp_dir = tempfile.gettempdir()\n audio_path = os.path.join(temp_dir, f\"response_{uuid.uuid4()}.mp3\")\n \n with open(audio_path, \"wb\") as f:\n f.write(audio_response.content)\n st.write(f\"Saved MP3 file to: {audio_path}\")\n \n st.success(\"\u2705 Query processing complete!\")\n return {\n \"status\": \"success\",\n \"text_response\": text_response,\n \"voice_instructions\": voice_instructions,\n \"audio_path\": audio_path,\n \"sources\": [r.payload.get('file_name', 'Unknown Source') for r in search_results if r.payload]\n }\n \n except Exception as e:\n st.error(f\"\u274c Error during query processing: {str(e)}\")\n return {\n \"status\": \"error\",\n \"error\": str(e),\n \"query\": query\n }", "creation_date": "2025-04-17T21:01:23Z", "repo": "Shubhamsaboo/awesome-llm-apps", "file_path": "voice_ai_agents/voice_rag_openaisdk/rag_voice.py", "stars": 49856, "label": 0} +{"function": "def main() -> None:\n \"\"\"Main application function.\"\"\"\n st.set_page_config(\n page_title=\"Voice RAG Agent\",\n page_icon=\"\ud83c\udf99\ufe0f\",\n layout=\"wide\"\n )\n \n init_session_state()\n setup_sidebar()\n \n st.title(\"\ud83c\udf99\ufe0f Voice RAG Agent\")\n st.info(\"Get voice-powered answers to your documentation questions by configuring your API keys and uploading PDF documents. Then, simply ask questions to receive both text and voice responses!\")\n \n # File upload section\n uploaded_file = st.file_uploader(\"Upload PDF\", type=[\"pdf\"])\n \n if uploaded_file:\n file_name = uploaded_file.name\n if file_name not in st.session_state.processed_documents:\n with st.spinner('Processing PDF...'):\n try:\n # Setup Qdrant if not already done\n if not st.session_state.client:\n client, embedding_model = setup_qdrant()\n st.session_state.client = client\n st.session_state.embedding_model = embedding_model\n \n # Process and store document\n documents = process_pdf(uploaded_file)\n if documents:\n store_embeddings(\n st.session_state.client,\n st.session_state.embedding_model,\n documents,\n COLLECTION_NAME\n )\n st.session_state.processed_documents.append(file_name)\n st.success(f\"\u2705 Added PDF: {file_name}\")\n st.session_state.setup_complete = True\n except Exception as e:\n st.error(f\"Error processing document: {str(e)}\")\n \n # Display processed documents\n if st.session_state.processed_documents:\n st.sidebar.header(\"\ud83d\udcda Processed Documents\")\n for doc in st.session_state.processed_documents:\n st.sidebar.text(f\"\ud83d\udcc4 {doc}\")\n \n # Query interface\n query = st.text_input(\n \"What would you like to know about the documentation?\",\n placeholder=\"e.g., How do I authenticate API requests?\",\n disabled=not st.session_state.setup_complete\n )\n \n if query and st.session_state.setup_complete:\n with st.status(\"Processing your query...\", expanded=True) as status:\n try:\n result = asyncio.run(process_query(\n query,\n st.session_state.client,\n st.session_state.embedding_model,\n COLLECTION_NAME,\n st.session_state.openai_api_key,\n st.session_state.selected_voice\n ))\n \n if result[\"status\"] == \"success\":\n status.update(label=\"\u2705 Query processed!\", state=\"complete\")\n \n st.markdown(\"### Response:\")\n st.write(result[\"text_response\"])\n \n if \"audio_path\" in result:\n st.markdown(f\"### \ud83d\udd0a Audio Response (Voice: {st.session_state.selected_voice})\")\n st.audio(result[\"audio_path\"], format=\"audio/mp3\", start_time=0)\n \n with open(result[\"audio_path\"], \"rb\") as audio_file:\n audio_bytes = audio_file.read()\n st.download_button(\n label=\"\ud83d\udce5 Download Audio Response\",\n data=audio_bytes,\n file_name=f\"voice_response_{st.session_state.selected_voice}.mp3\",\n mime=\"audio/mp3\"\n )\n \n st.markdown(\"### Sources:\")\n for source in result[\"sources\"]:\n st.markdown(f\"- {source}\")\n else:\n status.update(label=\"\u274c Error processing query\", state=\"error\")\n st.error(f\"Error: {result.get('error', 'Unknown error occurred')}\")\n \n except Exception as e:\n status.update(label=\"\u274c Error processing query\", state=\"error\")\n st.error(f\"Error processing query: {str(e)}\")\n \n elif not st.session_state.setup_complete:\n st.info(\"\ud83d\udc48 Please configure the system and upload documents first!\")", "creation_date": "2025-04-17T21:01:23Z", "repo": "Shubhamsaboo/awesome-llm-apps", "file_path": "voice_ai_agents/voice_rag_openaisdk/rag_voice.py", "stars": 49856, "label": 0} +{"function": "def init_session_state():\n defaults = {\n \"initialized\": False,\n \"qdrant_url\": \"\",\n \"qdrant_api_key\": \"\",\n \"firecrawl_api_key\": \"\",\n \"openai_api_key\": \"\",\n \"doc_url\": \"\",\n \"setup_complete\": False,\n \"client\": None,\n \"embedding_model\": None,\n \"processor_agent\": None,\n \"tts_agent\": None,\n \"selected_voice\": \"coral\"\n }\n \n for key, value in defaults.items():\n if key not in st.session_state:\n st.session_state[key] = value", "creation_date": "2025-04-17T21:01:23Z", "repo": "Shubhamsaboo/awesome-llm-apps", "file_path": "voice_ai_agents/customer_support_voice_agent/customer_support_voice_agent.py", "stars": 49856, "label": 0} +{"function": "def sidebar_config():\n with st.sidebar:\n st.title(\"\ud83d\udd11 Configuration\")\n st.markdown(\"---\")\n \n st.session_state.qdrant_url = st.text_input(\n \"Qdrant URL\",\n value=st.session_state.qdrant_url,\n type=\"password\"\n )\n st.session_state.qdrant_api_key = st.text_input(\n \"Qdrant API Key\",\n value=st.session_state.qdrant_api_key,\n type=\"password\"\n )\n st.session_state.firecrawl_api_key = st.text_input(\n \"Firecrawl API Key\",\n value=st.session_state.firecrawl_api_key,\n type=\"password\"\n )\n st.session_state.openai_api_key = st.text_input(\n \"OpenAI API Key\",\n value=st.session_state.openai_api_key,\n type=\"password\"\n )\n \n st.markdown(\"---\")\n st.session_state.doc_url = st.text_input(\n \"Documentation URL\",\n value=st.session_state.doc_url,\n placeholder=\"https://docs.example.com\"\n )\n \n st.markdown(\"---\")\n st.markdown(\"### \ud83c\udfa4 Voice Settings\")\n voices = [\"alloy\", \"ash\", \"ballad\", \"coral\", \"echo\", \"fable\", \"onyx\", \"nova\", \"sage\", \"shimmer\", \"verse\"]\n st.session_state.selected_voice = st.selectbox(\n \"Select Voice\",\n options=voices,\n index=voices.index(st.session_state.selected_voice),\n help=\"Choose the voice for the audio response\"\n )\n \n if st.button(\"Initialize System\", type=\"primary\"):\n if all([\n st.session_state.qdrant_url,\n st.session_state.qdrant_api_key,\n st.session_state.firecrawl_api_key,\n st.session_state.openai_api_key,\n st.session_state.doc_url\n ]):\n progress_placeholder = st.empty()\n with progress_placeholder.container():\n try:\n st.markdown(\"\ud83d\udd04 Setting up Qdrant connection...\")\n client, embedding_model = setup_qdrant_collection(\n st.session_state.qdrant_url,\n st.session_state.qdrant_api_key\n )\n st.session_state.client = client\n st.session_state.embedding_model = embedding_model\n st.markdown(\"\u2705 Qdrant setup complete!\")\n \n st.markdown(\"\ud83d\udd04 Crawling documentation pages...\")\n pages = crawl_documentation(\n st.session_state.firecrawl_api_key,\n st.session_state.doc_url\n )\n st.markdown(f\"\u2705 Crawled {len(pages)} documentation pages!\")\n \n store_embeddings(\n client,\n embedding_model,\n pages,\n \"docs_embeddings\"\n )\n \n processor_agent, tts_agent = setup_agents(\n st.session_state.openai_api_key\n )\n st.session_state.processor_agent = processor_agent\n st.session_state.tts_agent = tts_agent\n \n st.session_state.setup_complete = True\n st.success(\"\u2705 System initialized successfully!\")\n \n except Exception as e:\n st.error(f\"Error during setup: {str(e)}\")\n else:\n st.error(\"Please fill in all the required fields!\")", "creation_date": "2025-04-17T21:01:23Z", "repo": "Shubhamsaboo/awesome-llm-apps", "file_path": "voice_ai_agents/customer_support_voice_agent/customer_support_voice_agent.py", "stars": 49856, "label": 0} +{"function": "def is_empty(*items): # \u4efb\u610f\u4e00\u9879\u4e0d\u4e3a\u7a7a\u8fd4\u56deFalse\r\n for item in items:\r\n if item is not None and item != \"\":\r\n return False\r\n return True\r", "creation_date": "2024-01-19T06:08:31Z", "repo": "RVC-Boss/GPT-SoVITS", "file_path": "api.py", "stars": 48682, "label": 0} +{"function": "def is_full(*items): # \u4efb\u610f\u4e00\u9879\u4e3a\u7a7a\u8fd4\u56deFalse\r\n for item in items:\r\n if item is None or item == \"\":\r\n return False\r\n return True\r", "creation_date": "2024-01-19T06:08:31Z", "repo": "RVC-Boss/GPT-SoVITS", "file_path": "api.py", "stars": 48682, "label": 0} +{"function": "def clean_hifigan_model():\r\n global hifigan_model\r\n if hifigan_model:\r\n hifigan_model = hifigan_model.cpu()\r\n hifigan_model = None\r\n try:\r\n torch.cuda.empty_cache()\r\n except:\r\n pass\r", "creation_date": "2024-01-19T06:08:31Z", "repo": "RVC-Boss/GPT-SoVITS", "file_path": "api.py", "stars": 48682, "label": 0} +{"function": "def clean_bigvgan_model():\r\n global bigvgan_model\r\n if bigvgan_model:\r\n bigvgan_model = bigvgan_model.cpu()\r\n bigvgan_model = None\r\n try:\r\n torch.cuda.empty_cache()\r\n except:\r\n pass\r", "creation_date": "2024-01-19T06:08:31Z", "repo": "RVC-Boss/GPT-SoVITS", "file_path": "api.py", "stars": 48682, "label": 0} +{"function": "def clean_sv_cn_model():\r\n global sv_cn_model\r\n if sv_cn_model:\r\n sv_cn_model.embedding_model = sv_cn_model.embedding_model.cpu()\r\n sv_cn_model = None\r\n try:\r\n torch.cuda.empty_cache()\r\n except:\r\n pass\r", "creation_date": "2024-01-19T06:08:31Z", "repo": "RVC-Boss/GPT-SoVITS", "file_path": "api.py", "stars": 48682, "label": 0} +{"function": "def init_bigvgan():\r\n global bigvgan_model, hifigan_model, sv_cn_model\r\n from BigVGAN import bigvgan\r\n\r\n bigvgan_model = bigvgan.BigVGAN.from_pretrained(\r\n \"%s/GPT_SoVITS/pretrained_models/models--nvidia--bigvgan_v2_24khz_100band_256x\" % (now_dir,),\r\n use_cuda_kernel=False,\r\n ) # if True, RuntimeError: Ninja is required to load C++ extensions\r\n # remove weight norm in the model and set to eval mode\r\n bigvgan_model.remove_weight_norm()\r\n bigvgan_model = bigvgan_model.eval()\r\n\r\n if is_half == True:\r\n bigvgan_model = bigvgan_model.half().to(device)\r\n else:\r\n bigvgan_model = bigvgan_model.to(device)\r", "creation_date": "2024-01-19T06:08:31Z", "repo": "RVC-Boss/GPT-SoVITS", "file_path": "api.py", "stars": 48682, "label": 0} +{"function": "def init_hifigan():\r\n global hifigan_model, bigvgan_model, sv_cn_model\r\n hifigan_model = Generator(\r\n initial_channel=100,\r\n resblock=\"1\",\r\n resblock_kernel_sizes=[3, 7, 11],\r\n resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5], [1, 3, 5]],\r\n upsample_rates=[10, 6, 2, 2, 2],\r\n upsample_initial_channel=512,\r\n upsample_kernel_sizes=[20, 12, 4, 4, 4],\r\n gin_channels=0,\r\n is_bias=True,\r\n )\r\n hifigan_model.eval()\r\n hifigan_model.remove_weight_norm()\r\n state_dict_g = torch.load(\r\n \"%s/GPT_SoVITS/pretrained_models/gsv-v4-pretrained/vocoder.pth\" % (now_dir,),\r\n map_location=\"cpu\",\r\n weights_only=False,\r\n )\r\n print(\"loading vocoder\", hifigan_model.load_state_dict(state_dict_g))\r\n if is_half == True:\r\n hifigan_model = hifigan_model.half().to(device)\r\n else:\r\n hifigan_model = hifigan_model.to(device)\r", "creation_date": "2024-01-19T06:08:31Z", "repo": "RVC-Boss/GPT-SoVITS", "file_path": "api.py", "stars": 48682, "label": 0} +{"function": "def init_sv_cn():\r\n global hifigan_model, bigvgan_model, sv_cn_model\r\n sv_cn_model = SV(device, is_half)\r", "creation_date": "2024-01-19T06:08:31Z", "repo": "RVC-Boss/GPT-SoVITS", "file_path": "api.py", "stars": 48682, "label": 0} +{"function": "def resample(audio_tensor, sr0, sr1, device):\r\n global resample_transform_dict\r\n key = \"%s-%s-%s\" % (sr0, sr1, str(device))\r\n if key not in resample_transform_dict:\r\n resample_transform_dict[key] = torchaudio.transforms.Resample(sr0, sr1).to(device)\r\n return resample_transform_dict[key](audio_tensor)\r", "creation_date": "2024-01-19T06:08:31Z", "repo": "RVC-Boss/GPT-SoVITS", "file_path": "api.py", "stars": 48682, "label": 0} +{"function": "def norm_spec(x):\r\n return (x - spec_min) / (spec_max - spec_min) * 2 - 1\r", "creation_date": "2024-01-19T06:08:31Z", "repo": "RVC-Boss/GPT-SoVITS", "file_path": "api.py", "stars": 48682, "label": 0} +{"function": "async def main():\n # Parse command line arguments\n parser = argparse.ArgumentParser(description=\"Run Manus agent with a prompt\")\n parser.add_argument(\n \"--prompt\", type=str, required=False, help=\"Input prompt for the agent\"\n )\n args = parser.parse_args()\n\n # Create and initialize Manus agent\n agent = await Manus.create()\n try:\n # Use command line prompt if provided, otherwise ask for input\n prompt = args.prompt if args.prompt else input(\"Enter your prompt: \")\n if not prompt.strip():\n logger.warning(\"Empty prompt provided.\")\n return\n\n logger.warning(\"Processing your request...\")\n await agent.run(prompt)\n logger.info(\"Request processing completed.\")\n except KeyboardInterrupt:\n logger.warning(\"Operation interrupted.\")\n finally:\n # Ensure agent resources are cleaned up before exiting\n await agent.cleanup()", "creation_date": "2025-03-06T14:57:07Z", "repo": "FoundationAgents/OpenManus", "file_path": "main.py", "stars": 47887, "label": 0} +{"function": "async def run_flow():\n agents = {\n \"manus\": Manus(),\n }\n if config.run_flow_config.use_data_analysis_agent:\n agents[\"data_analysis\"] = DataAnalysis()\n try:\n prompt = input(\"Enter your prompt: \")\n\n if prompt.strip().isspace() or not prompt:\n logger.warning(\"Empty prompt provided.\")\n return\n\n flow = FlowFactory.create_flow(\n flow_type=FlowType.PLANNING,\n agents=agents,\n )\n logger.warning(\"Processing your request...\")\n\n try:\n start_time = time.time()\n result = await asyncio.wait_for(\n flow.execute(prompt),\n timeout=3600, # 60 minute timeout for the entire execution\n )\n elapsed_time = time.time() - start_time\n logger.info(f\"Request processed in {elapsed_time:.2f} seconds\")\n logger.info(result)\n except asyncio.TimeoutError:\n logger.error(\"Request processing timed out after 1 hour\")\n logger.info(\n \"Operation terminated due to timeout. Please try a simpler request.\"\n )\n\n except KeyboardInterrupt:\n logger.info(\"Operation cancelled by user.\")\n except Exception as e:\n logger.error(f\"Error: {str(e)}\")", "creation_date": "2025-03-06T17:11:08Z", "repo": "FoundationAgents/OpenManus", "file_path": "run_flow.py", "stars": 47887, "label": 0} +{"function": "def parse_args() -> argparse.Namespace:\n \"\"\"Parse command line arguments.\"\"\"\n parser = argparse.ArgumentParser(description=\"Run the MCP Agent\")\n parser.add_argument(\n \"--connection\",\n \"-c\",\n choices=[\"stdio\", \"sse\"],\n default=\"stdio\",\n help=\"Connection type: stdio or sse\",\n )\n parser.add_argument(\n \"--server-url\",\n default=\"http://127.0.0.1:8000/sse\",\n help=\"URL for SSE connection\",\n )\n parser.add_argument(\n \"--interactive\", \"-i\", action=\"store_true\", help=\"Run in interactive mode\"\n )\n parser.add_argument(\"--prompt\", \"-p\", help=\"Single prompt to execute and exit\")\n return parser.parse_args()", "creation_date": "2025-03-19T17:10:04Z", "repo": "FoundationAgents/OpenManus", "file_path": "run_mcp.py", "stars": 47887, "label": 0} +{"function": "async def run_mcp() -> None:\n \"\"\"Main entry point for the MCP runner.\"\"\"\n args = parse_args()\n runner = MCPRunner()\n\n try:\n await runner.initialize(args.connection, args.server_url)\n\n if args.prompt:\n await runner.run_single_prompt(args.prompt)\n elif args.interactive:\n await runner.run_interactive()\n else:\n await runner.run_default()\n\n except KeyboardInterrupt:\n logger.info(\"Program interrupted by user\")\n except Exception as e:\n logger.error(f\"Error running MCPAgent: {str(e)}\", exc_info=True)\n sys.exit(1)\n finally:\n await runner.cleanup()", "creation_date": "2025-03-19T17:10:04Z", "repo": "FoundationAgents/OpenManus", "file_path": "run_mcp.py", "stars": 47887, "label": 0} +{"function": " def __init__(self):\n self.root_path = config.root_path\n self.server_reference = config.mcp_config.server_reference\n self.agent = MCPAgent()", "creation_date": "2025-03-19T17:10:04Z", "repo": "FoundationAgents/OpenManus", "file_path": "run_mcp.py", "stars": 47887, "label": 0} +{"function": " async def initialize(\n self,\n connection_type: str,\n server_url: str | None = None,\n ) -> None:\n \"\"\"Initialize the MCP agent with the appropriate connection.\"\"\"\n logger.info(f\"Initializing MCPAgent with {connection_type} connection...\")\n\n if connection_type == \"stdio\":\n await self.agent.initialize(\n connection_type=\"stdio\",\n command=sys.executable,\n args=[\"-m\", self.server_reference],\n )\n else: # sse\n await self.agent.initialize(connection_type=\"sse\", server_url=server_url)\n\n logger.info(f\"Connected to MCP server via {connection_type}\")", "creation_date": "2025-03-19T17:10:04Z", "repo": "FoundationAgents/OpenManus", "file_path": "run_mcp.py", "stars": 47887, "label": 0} +{"function": " async def run_interactive(self) -> None:\n \"\"\"Run the agent in interactive mode.\"\"\"\n print(\"\\nMCP Agent Interactive Mode (type 'exit' to quit)\\n\")\n while True:\n user_input = input(\"\\nEnter your request: \")\n if user_input.lower() in [\"exit\", \"quit\", \"q\"]:\n break\n response = await self.agent.run(user_input)\n print(f\"\\nAgent: {response}\")", "creation_date": "2025-03-19T17:10:04Z", "repo": "FoundationAgents/OpenManus", "file_path": "run_mcp.py", "stars": 47887, "label": 0} +{"function": " async def run_single_prompt(self, prompt: str) -> None:\n \"\"\"Run the agent with a single prompt.\"\"\"\n await self.agent.run(prompt)", "creation_date": "2025-03-19T17:10:04Z", "repo": "FoundationAgents/OpenManus", "file_path": "run_mcp.py", "stars": 47887, "label": 0} +{"function": " async def run_default(self) -> None:\n \"\"\"Run the agent in default mode.\"\"\"\n prompt = input(\"Enter your prompt: \")\n if not prompt.strip():\n logger.warning(\"Empty prompt provided.\")\n return\n\n logger.warning(\"Processing your request...\")\n await self.agent.run(prompt)\n logger.info(\"Request processing completed.\")", "creation_date": "2025-03-19T17:10:04Z", "repo": "FoundationAgents/OpenManus", "file_path": "run_mcp.py", "stars": 47887, "label": 0} +{"function": " async def cleanup(self) -> None:\n \"\"\"Clean up agent resources.\"\"\"\n await self.agent.cleanup()\n logger.info(\"Session ended\")", "creation_date": "2025-03-19T17:10:04Z", "repo": "FoundationAgents/OpenManus", "file_path": "run_mcp.py", "stars": 47887, "label": 0} +{"function": "def test_docker_deployment(version=\"basic\"):\n tester = Crawl4AiTester(\n # base_url=\"http://localhost:11235\" ,\n base_url=\"https://crawl4ai-sby74.ondigitalocean.app\",\n api_token=\"test\",\n )\n print(f\"Testing Crawl4AI Docker {version} version\")\n\n # Health check with timeout and retry\n max_retries = 5\n for i in range(max_retries):\n try:\n health = requests.get(f\"{tester.base_url}/health\", timeout=10)\n print(\"Health check:\", health.json())\n break\n except requests.exceptions.RequestException:\n if i == max_retries - 1:\n print(f\"Failed to connect after {max_retries} attempts\")\n sys.exit(1)\n print(f\"Waiting for service to start (attempt {i+1}/{max_retries})...\")\n time.sleep(5)\n\n # Test cases based on version\n test_basic_crawl(tester)\n test_basic_crawl(tester)\n test_basic_crawl_sync(tester)", "creation_date": "2024-11-17T07:30:56Z", "repo": "unclecode/crawl4ai", "file_path": "tests/docker_example.py", "stars": 47768, "label": 0} +{"function": "def test_basic_crawl(tester: Crawl4AiTester):\n print(\"\\n=== Testing Basic Crawl ===\")\n request = {\n \"urls\": [\"https://www.nbcnews.com/business\"],\n \"priority\": 10,\n \"session_id\": \"test\",\n }\n\n result = tester.submit_and_wait(request)\n print(f\"Basic crawl result length: {len(result['result']['markdown'])}\")\n assert result[\"result\"][\"success\"]\n assert len(result[\"result\"][\"markdown\"]) > 0", "creation_date": "2024-11-17T07:30:56Z", "repo": "unclecode/crawl4ai", "file_path": "tests/docker_example.py", "stars": 47768, "label": 0} +{"function": "def test_basic_crawl_sync(tester: Crawl4AiTester):\n print(\"\\n=== Testing Basic Crawl (Sync) ===\")\n request = {\n \"urls\": [\"https://www.nbcnews.com/business\"],\n \"priority\": 10,\n \"session_id\": \"test\",\n }\n\n result = tester.submit_sync(request)\n print(f\"Basic crawl result length: {len(result['result']['markdown'])}\")\n assert result[\"status\"] == \"completed\"\n assert result[\"result\"][\"success\"]\n assert len(result[\"result\"][\"markdown\"]) > 0", "creation_date": "2024-11-17T07:30:56Z", "repo": "unclecode/crawl4ai", "file_path": "tests/docker_example.py", "stars": 47768, "label": 0} +{"function": "def test_js_execution(tester: Crawl4AiTester):\n print(\"\\n=== Testing JS Execution ===\")\n request = {\n \"urls\": [\"https://www.nbcnews.com/business\"],\n \"priority\": 8,\n \"js_code\": [\n \"const loadMoreButton = Array.from(document.querySelectorAll('button')).find(button => button.textContent.includes('Load More')); loadMoreButton && loadMoreButton.click();\"\n ],\n \"wait_for\": \"article.tease-card:nth-child(10)\",\n \"crawler_params\": {\"headless\": True},\n }\n\n result = tester.submit_and_wait(request)\n print(f\"JS execution result length: {len(result['result']['markdown'])}\")\n assert result[\"result\"][\"success\"]", "creation_date": "2024-11-17T07:30:56Z", "repo": "unclecode/crawl4ai", "file_path": "tests/docker_example.py", "stars": 47768, "label": 0} +{"function": "def test_css_selector(tester: Crawl4AiTester):\n print(\"\\n=== Testing CSS Selector ===\")\n request = {\n \"urls\": [\"https://www.nbcnews.com/business\"],\n \"priority\": 7,\n \"css_selector\": \".wide-tease-item__description\",\n \"crawler_params\": {\"headless\": True},\n \"extra\": {\"word_count_threshold\": 10},\n }\n\n result = tester.submit_and_wait(request)\n print(f\"CSS selector result length: {len(result['result']['markdown'])}\")\n assert result[\"result\"][\"success\"]", "creation_date": "2024-11-17T07:30:56Z", "repo": "unclecode/crawl4ai", "file_path": "tests/docker_example.py", "stars": 47768, "label": 0} +{"function": "def test_structured_extraction(tester: Crawl4AiTester):\n print(\"\\n=== Testing Structured Extraction ===\")\n schema = {\n \"name\": \"Coinbase Crypto Prices\",\n \"baseSelector\": \".cds-tableRow-t45thuk\",\n \"fields\": [\n {\n \"name\": \"crypto\",\n \"selector\": \"td:nth-child(1) h2\",\n \"type\": \"text\",\n },\n {\n \"name\": \"symbol\",\n \"selector\": \"td:nth-child(1) p\",\n \"type\": \"text\",\n },\n {\n \"name\": \"price\",\n \"selector\": \"td:nth-child(2)\",\n \"type\": \"text\",\n },\n ],\n }\n\n request = {\n \"urls\": [\"https://www.coinbase.com/explore\"],\n \"priority\": 9,\n \"extraction_config\": {\"type\": \"json_css\", \"params\": {\"schema\": schema}},\n }\n\n result = tester.submit_and_wait(request)\n extracted = json.loads(result[\"result\"][\"extracted_content\"])\n print(f\"Extracted {len(extracted)} items\")\n print(\"Sample item:\", json.dumps(extracted[0], indent=2))\n assert result[\"result\"][\"success\"]\n assert len(extracted) > 0", "creation_date": "2024-11-17T07:30:56Z", "repo": "unclecode/crawl4ai", "file_path": "tests/docker_example.py", "stars": 47768, "label": 0} +{"function": "def test_llm_extraction(tester: Crawl4AiTester):\n print(\"\\n=== Testing LLM Extraction ===\")\n schema = {\n \"type\": \"object\",\n \"properties\": {\n \"model_name\": {\n \"type\": \"string\",\n \"description\": \"Name of the OpenAI model.\",\n },\n \"input_fee\": {\n \"type\": \"string\",\n \"description\": \"Fee for input token for the OpenAI model.\",\n },\n \"output_fee\": {\n \"type\": \"string\",\n \"description\": \"Fee for output token for the OpenAI model.\",\n },\n },\n \"required\": [\"model_name\", \"input_fee\", \"output_fee\"],\n }\n\n request = {\n \"urls\": [\"https://openai.com/api/pricing\"],\n \"priority\": 8,\n \"extraction_config\": {\n \"type\": \"llm\",\n \"params\": {\n \"provider\": \"openai/gpt-4o-mini\",\n \"api_token\": os.getenv(\"OPENAI_API_KEY\"),\n \"schema\": schema,\n \"extraction_type\": \"schema\",\n \"instruction\": \"\"\"From the crawled content, extract all mentioned model names along with their fees for input and output tokens.\"\"\",\n },\n },\n \"crawler_params\": {\"word_count_threshold\": 1},\n }\n\n try:\n result = tester.submit_and_wait(request)\n extracted = json.loads(result[\"result\"][\"extracted_content\"])\n print(f\"Extracted {len(extracted)} model pricing entries\")\n print(\"Sample entry:\", json.dumps(extracted[0], indent=2))\n assert result[\"result\"][\"success\"]\n except Exception as e:\n print(f\"LLM extraction test failed (might be due to missing API key): {str(e)}\")", "creation_date": "2024-11-17T07:30:56Z", "repo": "unclecode/crawl4ai", "file_path": "tests/docker_example.py", "stars": 47768, "label": 0} +{"function": "def test_llm_with_ollama(tester: Crawl4AiTester):\n print(\"\\n=== Testing LLM with Ollama ===\")\n schema = {\n \"type\": \"object\",\n \"properties\": {\n \"article_title\": {\n \"type\": \"string\",\n \"description\": \"The main title of the news article\",\n },\n \"summary\": {\n \"type\": \"string\",\n \"description\": \"A brief summary of the article content\",\n },\n \"main_topics\": {\n \"type\": \"array\",\n \"items\": {\"type\": \"string\"},\n \"description\": \"Main topics or themes discussed in the article\",\n },\n },\n }\n\n request = {\n \"urls\": [\"https://www.nbcnews.com/business\"],\n \"priority\": 8,\n \"extraction_config\": {\n \"type\": \"llm\",\n \"params\": {\n \"provider\": \"ollama/llama2\",\n \"schema\": schema,\n \"extraction_type\": \"schema\",\n \"instruction\": \"Extract the main article information including title, summary, and main topics.\",\n },\n },\n \"extra\": {\"word_count_threshold\": 1},\n \"crawler_params\": {\"verbose\": True},\n }\n\n try:\n result = tester.submit_and_wait(request)\n extracted = json.loads(result[\"result\"][\"extracted_content\"])\n print(\"Extracted content:\", json.dumps(extracted, indent=2))\n assert result[\"result\"][\"success\"]\n except Exception as e:\n print(f\"Ollama extraction test failed: {str(e)}\")", "creation_date": "2024-11-17T07:30:56Z", "repo": "unclecode/crawl4ai", "file_path": "tests/docker_example.py", "stars": 47768, "label": 0} +{"function": "def test_cosine_extraction(tester: Crawl4AiTester):\n print(\"\\n=== Testing Cosine Extraction ===\")\n request = {\n \"urls\": [\"https://www.nbcnews.com/business\"],\n \"priority\": 8,\n \"extraction_config\": {\n \"type\": \"cosine\",\n \"params\": {\n \"semantic_filter\": \"business finance economy\",\n \"word_count_threshold\": 10,\n \"max_dist\": 0.2,\n \"top_k\": 3,\n },\n },\n }\n\n try:\n result = tester.submit_and_wait(request)\n extracted = json.loads(result[\"result\"][\"extracted_content\"])\n print(f\"Extracted {len(extracted)} text clusters\")\n print(\"First cluster tags:\", extracted[0][\"tags\"])\n assert result[\"result\"][\"success\"]\n except Exception as e:\n print(f\"Cosine extraction test failed: {str(e)}\")", "creation_date": "2024-11-17T07:30:56Z", "repo": "unclecode/crawl4ai", "file_path": "tests/docker_example.py", "stars": 47768, "label": 0} +{"function": "def test_screenshot(tester: Crawl4AiTester):\n print(\"\\n=== Testing Screenshot ===\")\n request = {\n \"urls\": [\"https://www.nbcnews.com/business\"],\n \"priority\": 5,\n \"screenshot\": True,\n \"crawler_params\": {\"headless\": True},\n }\n\n result = tester.submit_and_wait(request)\n print(\"Screenshot captured:\", bool(result[\"result\"][\"screenshot\"]))\n\n if result[\"result\"][\"screenshot\"]:\n # Save screenshot\n screenshot_data = base64.b64decode(result[\"result\"][\"screenshot\"])\n with open(\"test_screenshot.jpg\", \"wb\") as f:\n f.write(screenshot_data)\n print(\"Screenshot saved as test_screenshot.jpg\")\n\n assert result[\"result\"][\"success\"]", "creation_date": "2024-11-17T07:30:56Z", "repo": "unclecode/crawl4ai", "file_path": "tests/docker_example.py", "stars": 47768, "label": 0} +{"function": "def get_version():\n command = [\"git\", \"describe\", \"--tags\"]\n try:\n version = subprocess.check_output(command).decode().strip()\n version_parts = version.split(\"-\")\n if len(version_parts) > 1 and version_parts[0].startswith(\"mineru\"):\n return version_parts[1]\n else:\n raise ValueError(f\"Invalid version tag {version}. Expected format is mineru--released.\")\n except Exception as e:\n print(e)\n return \"0.0.0\"", "creation_date": "2024-06-04T03:33:57Z", "repo": "opendatalab/MinerU", "file_path": "update_version.py", "stars": 38959, "label": 0} +{"function": "def write_version_to_commons(version):\n commons_path = os.path.join(os.path.dirname(__file__), 'mineru', 'version.py')\n with open(commons_path, 'w') as f:\n f.write(f'__version__ = \"{version}\"\\n')", "creation_date": "2024-06-04T03:33:57Z", "repo": "opendatalab/MinerU", "file_path": "update_version.py", "stars": 38959, "label": 0} +{"function": "def delete_file(path):\n \"\"\"delete file.\"\"\"\n if not os.path.exists(path):\n if os.path.isfile(path):\n try:\n os.remove(path)\n print(f\"File '{path}' deleted.\")\n except TypeError as e:\n print(f\"Error deleting file '{path}': {e}\")\n elif os.path.isdir(path):\n try:\n shutil.rmtree(path)\n print(f\"Directory '{path}' and its contents deleted.\")\n except TypeError as e:\n print(f\"Error deleting directory '{path}': {e}\")", "creation_date": "2024-09-12T08:23:33Z", "repo": "opendatalab/MinerU", "file_path": "tests/clean_coverage.py", "stars": 38959, "label": 0} +{"function": "def get_covrage():\n \"\"\"get covrage\"\"\"\n # \u53d1\u9001\u8bf7\u6c42\u83b7\u53d6\u7f51\u9875\u5185\u5bb9\n html_content = open(\"htmlcov/index.html\", \"r\", encoding=\"utf-8\").read()\n soup = BeautifulSoup(html_content, 'html.parser')\n\n # \u67e5\u627e\u5305\u542b\"pc_cov\"\u7684span\u6807\u7b7e\n pc_cov_span = soup.find('span', class_='pc_cov')\n\n # \u63d0\u53d6\u767e\u5206\u6bd4\u503c\n percentage_value = pc_cov_span.text.strip()\n percentage_float = float(percentage_value.rstrip('%'))\n print (\"percentage_float:\", percentage_float)\n assert percentage_float >= 0.2", "creation_date": "2024-08-28T06:34:21Z", "repo": "opendatalab/MinerU", "file_path": "tests/get_coverage.py", "stars": 38959, "label": 0} +{"function": "def test_list_max(list_input: list, target_num) -> None:\n \"\"\"\n list_input: \u8f93\u5165\u5217\u8868\u5143\u7d20\uff0c\u5143\u7d20\u5747\u4e3a\u6570\u5b57\u7c7b\u578b\n \"\"\"\n assert target_num == mymax(list_input)", "creation_date": "2024-09-12T07:58:27Z", "repo": "opendatalab/MinerU", "file_path": "tests/unittest/test_unit.py", "stars": 38959, "label": 0} +{"function": "def test_join_path(path_input: list, target_path: str) -> None:\n \"\"\"\n path_input: \u8f93\u5165path\u7684\u5217\u8868\uff0c\u5217\u8868\u5143\u7d20\u5747\u4e3a\u5b57\u7b26\u4e32\n \"\"\"\n assert target_path == join_path(*path_input)", "creation_date": "2024-09-12T07:58:27Z", "repo": "opendatalab/MinerU", "file_path": "tests/unittest/test_unit.py", "stars": 38959, "label": 0} +{"function": "def test_get_top_percent_list(num_list: list, percent: float, target_num_list: list) -> None:\n \"\"\"\n num_list: \u6570\u5b57\u5217\u8868\uff0c\u5217\u8868\u5143\u7d20\u4e3a\u6570\u5b57\n percent: \u5360\u6bd4\uff0cfloat, \u5411\u4e0b\u53d6\u8bc1\n \"\"\"\n assert target_num_list == get_top_percent_list(num_list, percent)", "creation_date": "2024-09-12T07:58:27Z", "repo": "opendatalab/MinerU", "file_path": "tests/unittest/test_unit.py", "stars": 38959, "label": 0} +{"function": "def test_parse_s3path(s3_path: str, target_data: str):\n \"\"\"\n s3_path: s3\u8def\u5f84\n \u5982\u679c\u4e3a\u65e0\u6548\u8def\u5f84\uff0c\u5219\u8fd4\u56de\u5bf9\u5e94\u7684bucket\u540d\u5b57\u548c\u5176\u4f59\u90e8\u5206\n \u5982\u679c\u4e3a\u5f02\u5e38\u8def\u5f84 \u4f8b\u5982\uff1afile2.txt\uff0c\u5219\u62a5\u5f02\u5e38\n \"\"\"\n bucket_name, key = parse_s3path(s3_path)\n assert target_data == bucket_name", "creation_date": "2024-09-12T07:58:27Z", "repo": "opendatalab/MinerU", "file_path": "tests/unittest/test_unit.py", "stars": 38959, "label": 0} +{"function": "def test_is_in_or_part_overlap(box1: tuple, box2: tuple, target_bool: bool) -> None:\n \"\"\"\n box1: \u5750\u6807\u6570\u7ec4\n box2: \u5750\u6807\u6570\u7ec4\n \"\"\"\n assert target_bool == _is_in_or_part_overlap(box1, box2)", "creation_date": "2024-09-12T07:58:27Z", "repo": "opendatalab/MinerU", "file_path": "tests/unittest/test_unit.py", "stars": 38959, "label": 0} +{"function": "def test_is_in_or_part_overlap_with_area_ratio(box1: tuple, box2: tuple, target_bool: bool) -> None:\n out_bool = _is_in_or_part_overlap_with_area_ratio(box1, box2)\n assert target_bool == out_bool", "creation_date": "2024-09-12T07:58:27Z", "repo": "opendatalab/MinerU", "file_path": "tests/unittest/test_unit.py", "stars": 38959, "label": 0} +{"function": " def test_handles_single_rate_limit(self, mock_get, mock_sleep):\n \"\"\"Test that API retries once after a 429 and succeeds.\"\"\"\n # Setup mock responses: first 429, then 200\n mock_429_response = Mock()\n mock_429_response.status_code = 429\n \n mock_200_response = Mock()\n mock_200_response.status_code = 200\n mock_200_response.text = \"Success\"\n \n mock_get.side_effect = [mock_429_response, mock_200_response]\n \n # Call the function\n headers = {\"X-API-KEY\": \"test-key\"}\n url = \"https://api.financialdatasets.ai/test\"\n \n result = _make_api_request(url, headers)\n \n # Verify behavior\n assert result.status_code == 200\n assert result.text == \"Success\"\n \n # Verify requests.get was called twice\n assert mock_get.call_count == 2\n mock_get.assert_has_calls([\n call(url, headers=headers),\n call(url, headers=headers)\n ])\n \n # Verify sleep was called once with 60 seconds (first retry)\n mock_sleep.assert_called_once_with(60)", "creation_date": "2025-06-16T21:41:05Z", "repo": "virattt/ai-hedge-fund", "file_path": "tests/test_api_rate_limiting.py", "stars": 38051, "label": 0} +{"function": " def test_handles_multiple_rate_limits(self, mock_get, mock_sleep):\n \"\"\"Test that API retries multiple times after 429s.\"\"\"\n # Setup mock responses: three 429s, then 200\n mock_429_response = Mock()\n mock_429_response.status_code = 429\n \n mock_200_response = Mock()\n mock_200_response.status_code = 200\n mock_200_response.text = \"Success\"\n \n mock_get.side_effect = [\n mock_429_response, \n mock_429_response, \n mock_429_response, \n mock_200_response\n ]\n \n # Call the function\n headers = {\"X-API-KEY\": \"test-key\"}\n url = \"https://api.financialdatasets.ai/test\"\n \n result = _make_api_request(url, headers)\n \n # Verify behavior\n assert result.status_code == 200\n assert result.text == \"Success\"\n \n # Verify requests.get was called 4 times\n assert mock_get.call_count == 4\n \n # Verify sleep was called 3 times with linear backoff: 60s, 90s, 120s\n assert mock_sleep.call_count == 3\n expected_calls = [call(60), call(90), call(120)]\n mock_sleep.assert_has_calls(expected_calls)", "creation_date": "2025-06-16T21:41:05Z", "repo": "virattt/ai-hedge-fund", "file_path": "tests/test_api_rate_limiting.py", "stars": 38051, "label": 0} +{"function": " def test_handles_post_rate_limiting(self, mock_post, mock_sleep):\n \"\"\"Test that POST requests handle rate limiting.\"\"\"\n # Setup mock responses: first 429, then 200\n mock_429_response = Mock()\n mock_429_response.status_code = 429\n \n mock_200_response = Mock()\n mock_200_response.status_code = 200\n mock_200_response.text = \"Success\"\n \n mock_post.side_effect = [mock_429_response, mock_200_response]\n \n # Call the function with POST method\n headers = {\"X-API-KEY\": \"test-key\"}\n url = \"https://api.financialdatasets.ai/test\"\n json_data = {\"test\": \"data\"}\n \n result = _make_api_request(url, headers, method=\"POST\", json_data=json_data)\n \n # Verify behavior\n assert result.status_code == 200\n assert result.text == \"Success\"\n \n # Verify requests.post was called twice\n assert mock_post.call_count == 2\n mock_post.assert_has_calls([\n call(url, headers=headers, json=json_data),\n call(url, headers=headers, json=json_data)\n ])\n \n # Verify sleep was called once with 60 seconds (first retry)\n mock_sleep.assert_called_once_with(60)", "creation_date": "2025-06-16T21:41:05Z", "repo": "virattt/ai-hedge-fund", "file_path": "tests/test_api_rate_limiting.py", "stars": 38051, "label": 0} +{"function": " def test_ignores_other_errors(self, mock_get, mock_sleep):\n \"\"\"Test that non-429 errors are returned without retrying.\"\"\"\n # Setup mock response: 500 error\n mock_500_response = Mock()\n mock_500_response.status_code = 500\n mock_500_response.text = \"Internal Server Error\"\n \n mock_get.return_value = mock_500_response\n \n # Call the function\n headers = {\"X-API-KEY\": \"test-key\"}\n url = \"https://api.financialdatasets.ai/test\"\n \n result = _make_api_request(url, headers)\n \n # Verify behavior\n assert result.status_code == 500\n assert result.text == \"Internal Server Error\"\n \n # Verify requests.get was called only once\n assert mock_get.call_count == 1\n \n # Verify sleep was never called\n mock_sleep.assert_not_called()", "creation_date": "2025-06-16T21:41:05Z", "repo": "virattt/ai-hedge-fund", "file_path": "tests/test_api_rate_limiting.py", "stars": 38051, "label": 0} +{"function": " def test_normal_success_requests(self, mock_get, mock_sleep):\n \"\"\"Test that successful requests return immediately without retry.\"\"\"\n # Setup mock response: 200 success\n mock_200_response = Mock()\n mock_200_response.status_code = 200\n mock_200_response.text = \"Success\"\n \n mock_get.return_value = mock_200_response\n \n # Call the function\n headers = {\"X-API-KEY\": \"test-key\"}\n url = \"https://api.financialdatasets.ai/test\"\n \n result = _make_api_request(url, headers)\n \n # Verify behavior\n assert result.status_code == 200\n assert result.text == \"Success\"\n \n # Verify requests.get was called only once\n assert mock_get.call_count == 1\n \n # Verify sleep was never called\n mock_sleep.assert_not_called()", "creation_date": "2025-06-16T21:41:05Z", "repo": "virattt/ai-hedge-fund", "file_path": "tests/test_api_rate_limiting.py", "stars": 38051, "label": 0} +{"function": " def test_full_integration(self, mock_get, mock_sleep, mock_cache):\n \"\"\"Test that get_prices function properly handles rate limiting.\"\"\"\n # Mock cache to return None (cache miss)\n mock_cache.get_prices.return_value = None\n \n # Setup mock responses: first 429, then 200 with valid data\n mock_429_response = Mock()\n mock_429_response.status_code = 429\n \n mock_200_response = Mock()\n mock_200_response.status_code = 200\n mock_200_response.json.return_value = {\n \"ticker\": \"AAPL\",\n \"prices\": [\n {\n \"time\": \"2024-01-01T00:00:00Z\",\n \"open\": 100.0,\n \"close\": 101.0,\n \"high\": 102.0,\n \"low\": 99.0,\n \"volume\": 1000\n }\n ]\n }\n \n mock_get.side_effect = [mock_429_response, mock_200_response]\n \n # Set environment variable for API key\n with patch.dict(os.environ, {\"FINANCIAL_DATASETS_API_KEY\": \"test-key\"}):\n # Call get_prices\n result = get_prices(\"AAPL\", \"2024-01-01\", \"2024-01-02\")\n \n # Verify the function succeeded and returned data\n assert len(result) == 1\n assert result[0].open == 100.0\n assert result[0].close == 101.0\n \n # Verify rate limiting behavior\n assert mock_get.call_count == 2\n mock_sleep.assert_called_once_with(60)\n \n # Verify cache operations\n mock_cache.get_prices.assert_called_once()\n mock_cache.set_prices.assert_called_once()", "creation_date": "2025-06-16T21:41:05Z", "repo": "virattt/ai-hedge-fund", "file_path": "tests/test_api_rate_limiting.py", "stars": 38051, "label": 0} +{"function": " def test_max_retries_exceeded(self, mock_get, mock_sleep):\n \"\"\"Test that function stops retrying after max_retries and returns final 429.\"\"\"\n # Setup mock responses: all 429s (exceeds max retries)\n mock_429_response = Mock()\n mock_429_response.status_code = 429\n mock_429_response.text = \"Too Many Requests\"\n \n mock_get.return_value = mock_429_response\n \n # Call the function with max_retries=2\n headers = {\"X-API-KEY\": \"test-key\"}\n url = \"https://api.financialdatasets.ai/test\"\n \n result = _make_api_request(url, headers, max_retries=2)\n \n # Verify final 429 is returned\n assert result.status_code == 429\n assert result.text == \"Too Many Requests\"\n \n # Verify requests.get was called 3 times (1 initial + 2 retries)\n assert mock_get.call_count == 3\n \n # Verify sleep was called 2 times with linear backoff: 60s, 90s\n assert mock_sleep.call_count == 2\n expected_calls = [call(60), call(90)]\n mock_sleep.assert_has_calls(expected_calls)", "creation_date": "2025-06-16T21:41:05Z", "repo": "virattt/ai-hedge-fund", "file_path": "tests/test_api_rate_limiting.py", "stars": 38051, "label": 0} +{"function": " def __init__(\n self,\n agent: Callable,\n tickers: list[str],\n start_date: str,\n end_date: str,\n initial_capital: float,\n model_name: str = \"gpt-4.1\",\n model_provider: str = \"OpenAI\",\n selected_analysts: list[str] = [],\n initial_margin_requirement: float = 0.0,\n ):\n \"\"\"\n :param agent: The trading agent (Callable).\n :param tickers: List of tickers to backtest.\n :param start_date: Start date string (YYYY-MM-DD).\n :param end_date: End date string (YYYY-MM-DD).\n :param initial_capital: Starting portfolio cash.\n :param model_name: Which LLM model name to use (gpt-4, etc).\n :param model_provider: Which LLM provider (OpenAI, etc).\n :param selected_analysts: List of analyst names or IDs to incorporate.\n :param initial_margin_requirement: The margin ratio (e.g. 0.5 = 50%).\n \"\"\"\n self.agent = agent\n self.tickers = tickers\n self.start_date = start_date\n self.end_date = end_date\n self.initial_capital = initial_capital\n self.model_name = model_name\n self.model_provider = model_provider\n self.selected_analysts = selected_analysts\n\n # Initialize portfolio with support for long/short positions\n self.portfolio_values = []\n self.portfolio = {\n \"cash\": initial_capital,\n \"margin_used\": 0.0, # total margin usage across all short positions\n \"margin_requirement\": initial_margin_requirement, # The margin ratio required for shorts\n \"positions\": {ticker: {\"long\": 0, \"short\": 0, \"long_cost_basis\": 0.0, \"short_cost_basis\": 0.0, \"short_margin_used\": 0.0} for ticker in tickers}, # Number of shares held long # Number of shares held short # Average cost basis per share (long) # Average cost basis per share (short) # Dollars of margin used for this ticker's short\n \"realized_gains\": {\n ticker: {\n \"long\": 0.0, # Realized gains from long positions\n \"short\": 0.0, # Realized gains from short positions\n }\n for ticker in tickers\n },\n }", "creation_date": "2024-11-29T17:44:07Z", "repo": "virattt/ai-hedge-fund", "file_path": "src/backtester.py", "stars": 38051, "label": 0} +{"function": " def execute_trade(self, ticker: str, action: str, quantity: float, current_price: float):\n \"\"\"\n Execute trades with support for both long and short positions.\n `quantity` is the number of shares the agent wants to buy/sell/short/cover.\n We will only trade integer shares to keep it simple.\n \"\"\"\n if quantity <= 0:\n return 0\n\n quantity = int(quantity) # force integer shares\n position = self.portfolio[\"positions\"][ticker]\n\n if action == \"buy\":\n cost = quantity * current_price\n if cost <= self.portfolio[\"cash\"]:\n # Weighted average cost basis for the new total\n old_shares = position[\"long\"]\n old_cost_basis = position[\"long_cost_basis\"]\n new_shares = quantity\n total_shares = old_shares + new_shares\n\n if total_shares > 0:\n total_old_cost = old_cost_basis * old_shares\n total_new_cost = cost\n position[\"long_cost_basis\"] = (total_old_cost + total_new_cost) / total_shares\n\n position[\"long\"] += quantity\n self.portfolio[\"cash\"] -= cost\n return quantity\n else:\n # Calculate maximum affordable quantity\n max_quantity = int(self.portfolio[\"cash\"] / current_price)\n if max_quantity > 0:\n cost = max_quantity * current_price\n old_shares = position[\"long\"]\n old_cost_basis = position[\"long_cost_basis\"]\n total_shares = old_shares + max_quantity\n\n if total_shares > 0:\n total_old_cost = old_cost_basis * old_shares\n total_new_cost = cost\n position[\"long_cost_basis\"] = (total_old_cost + total_new_cost) / total_shares\n\n position[\"long\"] += max_quantity\n self.portfolio[\"cash\"] -= cost\n return max_quantity\n return 0\n\n elif action == \"sell\":\n # You can only sell as many as you own\n quantity = min(quantity, position[\"long\"])\n if quantity > 0:\n # Realized gain/loss using average cost basis\n avg_cost_per_share = position[\"long_cost_basis\"] if position[\"long\"] > 0 else 0\n realized_gain = (current_price - avg_cost_per_share) * quantity\n self.portfolio[\"realized_gains\"][ticker][\"long\"] += realized_gain\n\n position[\"long\"] -= quantity\n self.portfolio[\"cash\"] += quantity * current_price\n\n if position[\"long\"] == 0:\n position[\"long_cost_basis\"] = 0.0\n\n return quantity\n\n elif action == \"short\":\n \"\"\"\n Typical short sale flow:\n 1) Receive proceeds = current_price * quantity\n 2) Post margin_required = proceeds * margin_ratio\n 3) Net effect on cash = +proceeds - margin_required\n \"\"\"\n proceeds = current_price * quantity\n margin_required = proceeds * self.portfolio[\"margin_requirement\"]\n if margin_required <= self.portfolio[\"cash\"]:\n # Weighted average short cost basis\n old_short_shares = position[\"short\"]\n old_cost_basis = position[\"short_cost_basis\"]\n new_shares = quantity\n total_shares = old_short_shares + new_shares\n\n if total_shares > 0:\n total_old_cost = old_cost_basis * old_short_shares\n total_new_cost = current_price * new_shares\n position[\"short_cost_basis\"] = (total_old_cost + total_new_cost) / total_shares\n\n position[\"short\"] += quantity\n\n # Update margin usage\n position[\"short_margin_used\"] += margin_required\n self.portfolio[\"margin_used\"] += margin_required\n\n # Increase cash by proceeds, then subtract the required margin\n self.portfolio[\"cash\"] += proceeds\n self.portfolio[\"cash\"] -= margin_required\n return quantity\n else:\n # Calculate maximum shortable quantity\n margin_ratio = self.portfolio[\"margin_requirement\"]\n if margin_ratio > 0:\n max_quantity = int(self.portfolio[\"cash\"] / (current_price * margin_ratio))\n else:\n max_quantity = 0\n\n if max_quantity > 0:\n proceeds = current_price * max_quantity\n margin_required = proceeds * margin_ratio\n\n old_short_shares = position[\"short\"]\n old_cost_basis = position[\"short_cost_basis\"]\n total_shares = old_short_shares + max_quantity\n\n if total_shares > 0:\n total_old_cost = old_cost_basis * old_short_shares\n total_new_cost = current_price * max_quantity\n position[\"short_cost_basis\"] = (total_old_cost + total_new_cost) / total_shares\n\n position[\"short\"] += max_quantity\n position[\"short_margin_used\"] += margin_required\n self.portfolio[\"margin_used\"] += margin_required\n\n self.portfolio[\"cash\"] += proceeds\n self.portfolio[\"cash\"] -= margin_required\n return max_quantity\n return 0\n\n elif action == \"cover\":\n \"\"\"\n When covering shares:\n 1) Pay cover cost = current_price * quantity\n 2) Release a proportional share of the margin\n 3) Net effect on cash = -cover_cost + released_margin\n \"\"\"\n quantity = min(quantity, position[\"short\"])\n if quantity > 0:\n cover_cost = quantity * current_price\n avg_short_price = position[\"short_cost_basis\"] if position[\"short\"] > 0 else 0\n realized_gain = (avg_short_price - current_price) * quantity\n\n if position[\"short\"] > 0:\n portion = quantity / position[\"short\"]\n else:\n portion = 1.0\n\n margin_to_release = portion * position[\"short_margin_used\"]\n\n position[\"short\"] -= quantity\n position[\"short_margin_used\"] -= margin_to_release\n self.portfolio[\"margin_used\"] -= margin_to_release\n\n # Pay the cost to cover, but get back the released margin\n self.portfolio[\"cash\"] += margin_to_release\n self.portfolio[\"cash\"] -= cover_cost\n\n self.portfolio[\"realized_gains\"][ticker][\"short\"] += realized_gain\n\n if position[\"short\"] == 0:\n position[\"short_cost_basis\"] = 0.0\n position[\"short_margin_used\"] = 0.0\n\n return quantity\n\n return 0", "creation_date": "2024-11-29T17:44:07Z", "repo": "virattt/ai-hedge-fund", "file_path": "src/backtester.py", "stars": 38051, "label": 0} +{"function": " def calculate_portfolio_value(self, current_prices):\n \"\"\"\n Calculate total portfolio value, including:\n - cash\n - market value of long positions\n - unrealized gains/losses for short positions\n \"\"\"\n total_value = self.portfolio[\"cash\"]\n\n for ticker in self.tickers:\n position = self.portfolio[\"positions\"][ticker]\n price = current_prices[ticker]\n\n # Long position value\n long_value = position[\"long\"] * price\n total_value += long_value\n\n # Short position unrealized PnL = short_shares * (short_cost_basis - current_price)\n if position[\"short\"] > 0:\n total_value -= position[\"short\"] * price\n\n return total_value", "creation_date": "2024-11-29T17:44:07Z", "repo": "virattt/ai-hedge-fund", "file_path": "src/backtester.py", "stars": 38051, "label": 0} +{"function": "def get_all_fonts():\n fonts = []\n for root, dirs, files in os.walk(font_dir):\n for file in files:\n if file.endswith(\".ttf\") or file.endswith(\".ttc\"):\n fonts.append(file)\n fonts.sort()\n return fonts", "creation_date": "2024-03-11T08:37:49Z", "repo": "harry0703/MoneyPrinterTurbo", "file_path": "webui/Main.py", "stars": 37967, "label": 0} +{"function": "def get_all_songs():\n songs = []\n for root, dirs, files in os.walk(song_dir):\n for file in files:\n if file.endswith(\".mp3\"):\n songs.append(file)\n return songs", "creation_date": "2024-03-11T08:37:49Z", "repo": "harry0703/MoneyPrinterTurbo", "file_path": "webui/Main.py", "stars": 37967, "label": 0} +{"function": "def open_task_folder(task_id):\n try:\n sys = platform.system()\n path = os.path.join(root_dir, \"storage\", \"tasks\", task_id)\n if os.path.exists(path):\n if sys == \"Windows\":\n os.system(f\"start {path}\")\n if sys == \"Darwin\":\n os.system(f\"open {path}\")\n except Exception as e:\n logger.error(e)", "creation_date": "2024-03-11T08:37:49Z", "repo": "harry0703/MoneyPrinterTurbo", "file_path": "webui/Main.py", "stars": 37967, "label": 0} +{"function": "def scroll_to_bottom():\n js = \"\"\"\n \n \"\"\"\n st.components.v1.html(js, height=0, width=0)", "creation_date": "2024-03-11T08:37:49Z", "repo": "harry0703/MoneyPrinterTurbo", "file_path": "webui/Main.py", "stars": 37967, "label": 0} +{"function": "def init_log():\n logger.remove()\n _lvl = \"DEBUG\"\n\n def format_record(record):\n # \u83b7\u53d6\u65e5\u5fd7\u8bb0\u5f55\u4e2d\u7684\u6587\u4ef6\u5168\u8def\u5f84\n file_path = record[\"file\"].path\n # \u5c06\u7edd\u5bf9\u8def\u5f84\u8f6c\u6362\u4e3a\u76f8\u5bf9\u4e8e\u9879\u76ee\u6839\u76ee\u5f55\u7684\u8def\u5f84\n relative_path = os.path.relpath(file_path, root_dir)\n # \u66f4\u65b0\u8bb0\u5f55\u4e2d\u7684\u6587\u4ef6\u8def\u5f84\n record[\"file\"].path = f\"./{relative_path}\"\n # \u8fd4\u56de\u4fee\u6539\u540e\u7684\u683c\u5f0f\u5b57\u7b26\u4e32\n # \u60a8\u53ef\u4ee5\u6839\u636e\u9700\u8981\u8c03\u6574\u8fd9\u91cc\u7684\u683c\u5f0f\n record[\"message\"] = record[\"message\"].replace(root_dir, \".\")\n\n _format = (\n \"{time:%Y-%m-%d %H:%M:%S} | \"\n + \"{level} | \"\n + '\"{file.path}:{line}\": {function} '\n + \"- {message}\"\n + \"\\n\"\n )\n return _format\n\n logger.add(\n sys.stdout,\n level=_lvl,\n format=format_record,\n colorize=True,\n )", "creation_date": "2024-03-11T08:37:49Z", "repo": "harry0703/MoneyPrinterTurbo", "file_path": "webui/Main.py", "stars": 37967, "label": 0} +{"function": "def tr(key):\n loc = locales.get(st.session_state[\"ui_language\"], {})\n return loc.get(\"Translation\", {}).get(key, key)", "creation_date": "2024-03-11T08:37:49Z", "repo": "harry0703/MoneyPrinterTurbo", "file_path": "webui/Main.py", "stars": 37967, "label": 0} +{"function": " def format_record(record):\n # \u83b7\u53d6\u65e5\u5fd7\u8bb0\u5f55\u4e2d\u7684\u6587\u4ef6\u5168\u8def\u5f84\n file_path = record[\"file\"].path\n # \u5c06\u7edd\u5bf9\u8def\u5f84\u8f6c\u6362\u4e3a\u76f8\u5bf9\u4e8e\u9879\u76ee\u6839\u76ee\u5f55\u7684\u8def\u5f84\n relative_path = os.path.relpath(file_path, root_dir)\n # \u66f4\u65b0\u8bb0\u5f55\u4e2d\u7684\u6587\u4ef6\u8def\u5f84\n record[\"file\"].path = f\"./{relative_path}\"\n # \u8fd4\u56de\u4fee\u6539\u540e\u7684\u683c\u5f0f\u5b57\u7b26\u4e32\n # \u60a8\u53ef\u4ee5\u6839\u636e\u9700\u8981\u8c03\u6574\u8fd9\u91cc\u7684\u683c\u5f0f\n record[\"message\"] = record[\"message\"].replace(root_dir, \".\")\n\n _format = (\n \"{time:%Y-%m-%d %H:%M:%S} | \"\n + \"{level} | \"\n + '\"{file.path}:{line}\": {function} '\n + \"- {message}\"\n + \"\\n\"\n )\n return _format", "creation_date": "2024-03-11T08:37:49Z", "repo": "harry0703/MoneyPrinterTurbo", "file_path": "webui/Main.py", "stars": 37967, "label": 0} +{"function": " def log_received(msg):\n if config.ui[\"hide_log\"]:\n return\n with log_container:\n log_records.append(msg)\n st.code(\"\\n\".join(log_records))", "creation_date": "2024-03-11T08:37:49Z", "repo": "harry0703/MoneyPrinterTurbo", "file_path": "webui/Main.py", "stars": 37967, "label": 0} +{"function": " def get_keys_from_config(cfg_key):\n api_keys = config.app.get(cfg_key, [])\n if isinstance(api_keys, str):\n api_keys = [api_keys]\n api_key = \", \".join(api_keys)\n return api_key", "creation_date": "2024-03-11T08:37:49Z", "repo": "harry0703/MoneyPrinterTurbo", "file_path": "webui/Main.py", "stars": 37967, "label": 0} +{"function": " def save_keys_to_config(cfg_key, value):\n value = value.replace(\" \", \"\")\n if value:\n config.app[cfg_key] = value.split(\",\")", "creation_date": "2024-03-11T08:37:49Z", "repo": "harry0703/MoneyPrinterTurbo", "file_path": "webui/Main.py", "stars": 37967, "label": 0} +{"function": " def __init__(self, seed):\n self.seed = seed\n self.state = None", "creation_date": "2024-06-23T12:22:23Z", "repo": "2noise/ChatTTS", "file_path": "tools/seeder/ctx.py", "stars": 37121, "label": 0} +{"function": " def __enter__(self):\n self.state = torch.random.get_rng_state()\n torch.manual_seed(self.seed)", "creation_date": "2024-06-23T12:22:23Z", "repo": "2noise/ChatTTS", "file_path": "tools/seeder/ctx.py", "stars": 37121, "label": 0} +{"function": " def __exit__(self, type, value, traceback):\n torch.random.set_rng_state(self.state)", "creation_date": "2024-06-23T12:22:23Z", "repo": "2noise/ChatTTS", "file_path": "tools/seeder/ctx.py", "stars": 37121, "label": 0} +{"function": "def normalizer_en_nemo_text() -> Callable[[str], str]:\n from nemo_text_processing.text_normalization.normalize import Normalizer\n\n return partial(\n Normalizer(input_case=\"cased\", lang=\"en\").normalize,\n verbose=False,\n punct_post_process=True,\n )", "creation_date": "2024-06-24T08:28:14Z", "repo": "2noise/ChatTTS", "file_path": "tools/normalizer/en.py", "stars": 37121, "label": 0} +{"function": "def normalizer_zh_tn() -> Callable[[str], str]:\n from tn.chinese.normalizer import Normalizer\n\n return Normalizer(remove_interjections=False).normalize", "creation_date": "2024-06-24T08:28:14Z", "repo": "2noise/ChatTTS", "file_path": "tools/normalizer/zh.py", "stars": 37121, "label": 0} +{"function": "def get_logger(name: str, lv=logging.INFO, remove_exist=False, format_root=False):\n logger = logging.getLogger(name)\n logger.setLevel(lv)\n if remove_exist and logger.hasHandlers():\n logger.handlers.clear()\n if not logger.hasHandlers():\n syslog = logging.StreamHandler()\n syslog.setFormatter(Formatter())\n logger.addHandler(syslog)\n else:\n for h in logger.handlers:\n h.setFormatter(Formatter())\n if format_root:\n for h in logger.root.handlers:\n h.setFormatter(Formatter())\n return logger", "creation_date": "2024-06-21T16:47:56Z", "repo": "2noise/ChatTTS", "file_path": "tools/logger/log.py", "stars": 37121, "label": 0} +{"function": " def __init__(self, color=platform.system().lower() != \"windows\"):\n # https://stackoverflow.com/questions/2720319/python-figure-out-local-timezone\n self.tz = datetime.now(timezone.utc).astimezone().tzinfo\n self.color = color", "creation_date": "2024-06-21T16:47:56Z", "repo": "2noise/ChatTTS", "file_path": "tools/logger/log.py", "stars": 37121, "label": 0} +{"function": " def format(self, record: logging.LogRecord):\n logstr = \"[\" + datetime.now(self.tz).strftime(\"%z %Y%m%d %H:%M:%S\") + \"] [\"\n if self.color:\n logstr += log_level_color_code.get(record.levelno, colorCodeInfo)\n logstr += log_level_msg_str.get(record.levelno, record.levelname)\n if self.color:\n logstr += colorReset\n if sys.version_info >= (3, 9):\n fn = record.filename.removesuffix(\".py\")\n elif record.filename.endswith(\".py\"):\n fn = record.filename[:-3]\n logstr += f\"] {str(record.name)} | {fn} | {str(record.msg)%record.args}\"\n return logstr", "creation_date": "2024-06-21T16:47:56Z", "repo": "2noise/ChatTTS", "file_path": "tools/logger/log.py", "stars": 37121, "label": 0} +{"function": " def __init__(self, api_key, base_url, model):\n self.client = OpenAI(\n api_key=api_key,\n base_url=base_url,\n )\n self.model = model", "creation_date": "2024-06-24T13:11:56Z", "repo": "2noise/ChatTTS", "file_path": "tools/llm/llm.py", "stars": 37121, "label": 0} +{"function": " def call(self, user_question, temperature=0.3, prompt_version=\"kimi\", **kwargs):\n\n completion = self.client.chat.completions.create(\n model=self.model,\n messages=prompt_dict[prompt_version]\n + [\n {\"role\": \"user\", \"content\": user_question},\n ],\n temperature=temperature,\n **kwargs\n )\n return completion.choices[0].message.content", "creation_date": "2024-06-24T13:11:56Z", "repo": "2noise/ChatTTS", "file_path": "tools/llm/llm.py", "stars": 37121, "label": 0} +{"function": "def test_audio_path():\n return Path(\"./tests/data/audio/sample_10s.mp3\")", "creation_date": "2025-06-23T12:47:26Z", "repo": "docling-project/docling", "file_path": "tests/test_asr_pipeline.py", "stars": 34154, "label": 0} +{"function": "def get_asr_converter():\n \"\"\"Create a DocumentConverter configured for ASR with whisper_turbo model.\"\"\"\n pipeline_options = AsrPipelineOptions()\n pipeline_options.asr_options = asr_model_specs.WHISPER_TINY\n\n converter = DocumentConverter(\n format_options={\n InputFormat.AUDIO: AudioFormatOption(\n pipeline_cls=AsrPipeline,\n pipeline_options=pipeline_options,\n )\n }\n )\n return converter", "creation_date": "2025-06-23T12:47:26Z", "repo": "docling-project/docling", "file_path": "tests/test_asr_pipeline.py", "stars": 34154, "label": 0} +{"function": "def test_asr_pipeline_conversion(test_audio_path):\n \"\"\"Test ASR pipeline conversion using whisper_turbo model on sample_10s.mp3.\"\"\"\n # Check if the test audio file exists\n assert test_audio_path.exists(), f\"Test audio file not found: {test_audio_path}\"\n\n converter = get_asr_converter()\n\n # Convert the audio file\n doc_result: ConversionResult = converter.convert(test_audio_path)\n\n # Verify conversion was successful\n assert doc_result.status == ConversionStatus.SUCCESS, (\n f\"Conversion failed with status: {doc_result.status}\"\n )\n\n # Verify we have a document\n assert doc_result.document is not None, \"No document was created\"\n\n # Verify we have text content (transcribed audio)\n texts = doc_result.document.texts\n assert len(texts) > 0, \"No text content found in transcribed audio\"\n\n # Print transcribed text for verification (optional, for debugging)\n print(f\"Transcribed text from {test_audio_path.name}:\")\n for i, text_item in enumerate(texts):\n print(f\" {i + 1}: {text_item.text}\")", "creation_date": "2025-06-23T12:47:26Z", "repo": "docling-project/docling", "file_path": "tests/test_asr_pipeline.py", "stars": 34154, "label": 0} +{"function": "def _get_backend(fname):\n in_doc = InputDocument(\n path_or_stream=fname,\n format=InputFormat.ASCIIDOC,\n backend=AsciiDocBackend,\n )\n\n doc_backend = in_doc._backend\n return doc_backend", "creation_date": "2024-10-23T14:14:26Z", "repo": "docling-project/docling", "file_path": "tests/test_backend_asciidoc.py", "stars": 34154, "label": 0} +{"function": "def test_parse_picture():\n line = (\n \"image::images/example1.png[Example Image, width=200, height=150, align=center]\"\n )\n res = AsciiDocBackend._parse_picture(line)\n assert res\n assert res.get(\"width\", 0) == \"200\"\n assert res.get(\"height\", 0) == \"150\"\n assert res.get(\"uri\", \"\") == \"images/example1.png\"\n\n line = \"image::renamed-bookmark.png[Renamed bookmark]\"\n res = AsciiDocBackend._parse_picture(line)\n assert res\n assert \"width\" not in res\n assert \"height\" not in res\n assert res.get(\"uri\", \"\") == \"renamed-bookmark.png\"", "creation_date": "2024-10-23T14:14:26Z", "repo": "docling-project/docling", "file_path": "tests/test_backend_asciidoc.py", "stars": 34154, "label": 0} +{"function": "def test_asciidocs_examples():\n fnames = sorted(glob.glob(\"./tests/data/asciidoc/*.asciidoc\"))\n\n for fname in fnames:\n print(f\"reading {fname}\")\n\n bname = os.path.basename(fname)\n gname = os.path.join(\"./tests/data/groundtruth/docling_v2/\", bname + \".md\")\n\n doc_backend = _get_backend(Path(fname))\n doc = doc_backend.convert()\n\n pred_itdoc = doc._export_to_indented_text(max_text_len=16)\n print(\"\\n\\n\", pred_itdoc)\n\n pred_mddoc = doc.export_to_markdown()\n print(\"\\n\\n\", pred_mddoc)\n\n if os.path.exists(gname):\n with open(gname) as fr:\n fr.read()\n\n # assert pred_mddoc == true_mddoc, \"pred_mddoc!=true_mddoc for asciidoc\"\n else:\n with open(gname, \"w\") as fw:\n fw.write(pred_mddoc)\n\n # print(\"\\n\\n\", doc.export_to_markdown())\n\n assert True", "creation_date": "2024-10-23T14:14:26Z", "repo": "docling-project/docling", "file_path": "tests/test_backend_asciidoc.py", "stars": 34154, "label": 0} +{"function": "def get_csv_paths():\n # Define the directory you want to search\n directory = Path(\"./tests/data/csv/\")\n\n # List all CSV files in the directory and its subdirectories\n return sorted(directory.rglob(\"*.csv\"))", "creation_date": "2025-02-14T07:55:09Z", "repo": "docling-project/docling", "file_path": "tests/test_backend_csv.py", "stars": 34154, "label": 0} +{"function": "def get_csv_path(name: str):\n # Return the matching CSV file path\n return Path(f\"./tests/data/csv/{name}.csv\")", "creation_date": "2025-02-14T07:55:09Z", "repo": "docling-project/docling", "file_path": "tests/test_backend_csv.py", "stars": 34154, "label": 0} +{"function": "def get_converter():\n converter = DocumentConverter(allowed_formats=[InputFormat.CSV])\n\n return converter", "creation_date": "2025-02-14T07:55:09Z", "repo": "docling-project/docling", "file_path": "tests/test_backend_csv.py", "stars": 34154, "label": 0} +{"function": "def test_e2e_valid_csv_conversions():\n valid_csv_paths = get_csv_paths()\n converter = get_converter()\n\n for csv_path in valid_csv_paths:\n print(f\"converting {csv_path}\")\n\n gt_path = csv_path.parent.parent / \"groundtruth\" / \"docling_v2\" / csv_path.name\n if csv_path.stem in (\n \"csv-too-few-columns\",\n \"csv-too-many-columns\",\n \"csv-inconsistent-header\",\n ):\n with warns(UserWarning, match=\"Inconsistent column lengths\"):\n conv_result: ConversionResult = converter.convert(csv_path)\n else:\n conv_result: ConversionResult = converter.convert(csv_path)\n\n doc: DoclingDocument = conv_result.document\n\n pred_md: str = doc.export_to_markdown()\n assert verify_export(pred_md, str(gt_path) + \".md\"), \"export to md\"\n\n pred_itxt: str = doc._export_to_indented_text(\n max_text_len=70, explicit_tables=False\n )\n assert verify_export(pred_itxt, str(gt_path) + \".itxt\"), (\n \"export to indented-text\"\n )\n\n assert verify_document(\n pred_doc=doc,\n gtfile=str(gt_path) + \".json\",\n generate=GENERATE,\n ), \"export to json\"", "creation_date": "2025-02-14T07:55:09Z", "repo": "docling-project/docling", "file_path": "tests/test_backend_csv.py", "stars": 34154, "label": 0} +{"function": " def __init__(self, translator=None):\n self.translator = translator\n self.accounts_file = 'cursor_accounts.txt'", "creation_date": "2025-05-01T05:25:16Z", "repo": "yeongpin/cursor-free-vip", "file_path": "account_manager.py", "stars": 32150, "label": 0} +{"function": " def save_account_info(self, email, password, token, total_usage):\n \"\"\"Save account information to file\"\"\"\n try:\n with open(self.accounts_file, 'a', encoding='utf-8') as f:\n f.write(f\"\\n{'='*50}\\n\")\n f.write(f\"Email: {email}\\n\")\n f.write(f\"Password: {password}\\n\")\n f.write(f\"Token: {token}\\n\")\n f.write(f\"Usage Limit: {total_usage}\\n\")\n f.write(f\"{'='*50}\\n\")\n \n print(f\"{Fore.GREEN}{EMOJI['SUCCESS']} {self.translator.get('register.account_info_saved') if self.translator else 'Account information saved'}...{Style.RESET_ALL}\")\n return True\n \n except Exception as e:\n error_msg = self.translator.get('register.save_account_info_failed', error=str(e)) if self.translator else f'Failed to save account information: {str(e)}'\n print(f\"{Fore.RED}{EMOJI['ERROR']} {error_msg}{Style.RESET_ALL}\")\n return False", "creation_date": "2025-05-01T05:25:16Z", "repo": "yeongpin/cursor-free-vip", "file_path": "account_manager.py", "stars": 32150, "label": 0} +{"function": " def get_last_email_domain(self):\n \"\"\"Get the domain from the last used email\"\"\"\n try:\n if not os.path.exists(self.accounts_file):\n return None\n \n # Only read the last 1KB of data from the file\n with open(self.accounts_file, 'rb') as f:\n # Get file size\n f.seek(0, os.SEEK_END)\n file_size = f.tell()\n \n if file_size == 0:\n return None\n \n # Determine the number of bytes to read, maximum 1KB\n read_size = min(1024, file_size)\n \n # Move to the appropriate position to start reading\n f.seek(file_size - read_size)\n \n # Read the end data\n data = f.read(read_size).decode('utf-8', errors='ignore')\n \n # Split by lines and search in reverse\n lines = data.split('\\n')\n for line in reversed(lines):\n if line.strip().startswith('Email:'):\n email = line.split('Email:')[1].strip()\n # Extract domain part (after @)\n if '@' in email:\n return email.split('@')[1]\n return None\n \n # If no email is found in the last 1KB\n return None\n \n except Exception as e:\n error_msg = self.translator.get('account.get_last_email_domain_failed', error=str(e)) if self.translator else f'Failed to get the last used email domain: {str(e)}'\n print(f\"{Fore.RED}{EMOJI['ERROR']} {error_msg}{Style.RESET_ALL}\")\n return None", "creation_date": "2025-05-01T05:25:16Z", "repo": "yeongpin/cursor-free-vip", "file_path": "account_manager.py", "stars": 32150, "label": 0} +{"function": " def suggest_email(self, first_name, last_name):\n \"\"\"Generate a suggested email based on first and last name with the last used domain\"\"\"\n try:\n # Get the last used email domain\n domain = self.get_last_email_domain()\n if not domain:\n return None\n \n # Generate email prefix from first and last name (lowercase)\n email_prefix = f\"{first_name.lower()}.{last_name.lower()}\"\n \n # Combine prefix and domain\n suggested_email = f\"{email_prefix}@{domain}\"\n \n return suggested_email\n \n except Exception as e:\n error_msg = self.translator.get('account.suggest_email_failed', error=str(e)) if self.translator else f'Failed to suggest email: {str(e)}'\n print(f\"{Fore.RED}{EMOJI['ERROR']} {error_msg}{Style.RESET_ALL}\")\n return None", "creation_date": "2025-05-01T05:25:16Z", "repo": "yeongpin/cursor-free-vip", "file_path": "account_manager.py", "stars": 32150, "label": 0} +{"function": "def progress_bar(progress, total, prefix=\"\", length=50):\n filled = int(length * progress // total)\n bar = \"\u2588\" * filled + \"\u2591\" * (length - filled)\n percent = f\"{100 * progress / total:.1f}\"\n print(f\"\\r{prefix} |{bar}| {percent}% Complete\", end=\"\", flush=True)\n if progress == total:\n print()", "creation_date": "2025-01-14T06:47:41Z", "repo": "yeongpin/cursor-free-vip", "file_path": "build.py", "stars": 32150, "label": 0} +{"function": "def simulate_progress(message, duration=1.0, steps=20):\n print(f\"\\033[94m{message}\\033[0m\")\n for i in range(steps + 1):\n time.sleep(duration / steps)\n progress_bar(i, steps, prefix=\"Progress:\", length=40)", "creation_date": "2025-01-14T06:47:41Z", "repo": "yeongpin/cursor-free-vip", "file_path": "build.py", "stars": 32150, "label": 0} +{"function": "def build():\n # Clean screen\n os.system(\"cls\" if platform.system().lower() == \"windows\" else \"clear\")\n \n # Display logo\n print_logo()\n \n # Clean PyInstaller cache\n print(\"\\033[93m\ud83e\uddf9 Cleaning build cache...\\033[0m\")\n if os.path.exists('build'):\n shutil.rmtree('build')\n \n # Reload environment variables to ensure getting the latest version\n load_dotenv(override=True)\n version = os.getenv('VERSION', '1.0.0')\n print(f\"\\033[93m\ud83d\udce6 Building version: v{version}\\033[0m\")\n\n try:\n simulate_progress(\"Preparing build environment...\", 0.5)\n \n loading = LoadingAnimation()\n loading.start(\"Building in progress\")\n \n # Set output name based on system type\n system = platform.system().lower()\n if system == \"windows\":\n os_type = \"windows\"\n ext = \".exe\"\n elif system == \"linux\":\n os_type = \"linux\"\n ext = \"\"\n else: # Darwin\n os_type = \"mac\"\n ext = \"\"\n \n output_name = f\"CursorFreeVIP_{version}_{os_type}\"\n \n # Build command\n build_command = f'pyinstaller --clean --noconfirm build.spec'\n output_path = os.path.join('dist', f'{output_name}{ext}')\n \n os.system(build_command)\n \n loading.stop()\n\n if os.path.exists(output_path):\n print(f\"\\n\\033[92m\u2705 Build completed!\")\n print(f\"\ud83d\udce6 Executable file located: {output_path}\\033[0m\")\n else:\n print(\"\\n\\033[91m\u274c Build failed: Output file not found\\033[0m\")\n return False\n\n except Exception as e:\n if loading:\n loading.stop()\n print(f\"\\n\\033[91m\u274c Build process error: {str(e)}\\033[0m\")\n return False\n\n return True", "creation_date": "2025-01-14T06:47:41Z", "repo": "yeongpin/cursor-free-vip", "file_path": "build.py", "stars": 32150, "label": 0} +{"function": " def __init__(self):\n self.is_running = False\n self.animation_thread = None", "creation_date": "2025-01-14T06:47:41Z", "repo": "yeongpin/cursor-free-vip", "file_path": "build.py", "stars": 32150, "label": 0} +{"function": " def start(self, message=\"Building\"):\n self.is_running = True\n self.animation_thread = threading.Thread(target=self._animate, args=(message,))\n self.animation_thread.start()", "creation_date": "2025-01-14T06:47:41Z", "repo": "yeongpin/cursor-free-vip", "file_path": "build.py", "stars": 32150, "label": 0} +{"function": " def stop(self):\n self.is_running = False\n if self.animation_thread:\n self.animation_thread.join()\n print(\"\\r\" + \" \" * 70 + \"\\r\", end=\"\", flush=True)", "creation_date": "2025-01-14T06:47:41Z", "repo": "yeongpin/cursor-free-vip", "file_path": "build.py", "stars": 32150, "label": 0} +{"function": "def run_yapf(target):\n if os.path.isfile(target):\n files = [target]\n else:\n files = [os.path.join(root, file) for root, _, files in os.walk(target) for file in files if file.endswith('.py')]\n\n for file in files:\n try:\n command = [\"yapf\", \"-i\", file]\n subprocess.run(command, check=True, capture_output=True, text=True)\n print(f\"Formatted: {file}\")\n except subprocess.CalledProcessError as e:\n print(f\"Error formatting {file}: {e.stderr}\")", "creation_date": "2024-07-28T00:01:37Z", "repo": "exo-explore/exo", "file_path": "format.py", "stars": 28956, "label": 0} +{"function": "def main():\n if len(sys.argv) < 2:\n print(\"Usage: python3 format.py e.g. python3 format.py ./exo\")\n sys.exit(1)\n\n target = sys.argv[1]\n run_yapf(target)\n print(\"Formatting completed.\")", "creation_date": "2024-07-28T00:01:37Z", "repo": "exo-explore/exo", "file_path": "format.py", "stars": 28956, "label": 0} +{"function": "def _add_gpu_requires():\n global install_requires\n # Add Nvidia-GPU\n try:\n out = subprocess.run(['nvidia-smi', '--query-gpu=name', '--format=csv,noheader'], shell=True, text=True, capture_output=True, check=False)\n if out.returncode == 0:\n install_requires.extend(extras_require[\"nvidia-gpu\"])\n except subprocess.CalledProcessError:\n pass\n\n # Add AMD-GPU\n # This will mostly work only on Linux, amd/rocm-smi is not yet supported on Windows\n try:\n out = subprocess.run(['amd-smi', 'list', '--csv'], shell=True, text=True, capture_output=True, check=False)\n if out.returncode == 0:\n install_requires.extend(extras_require[\"amd-gpu\"])\n except:\n out = subprocess.run(['rocm-smi', 'list', '--csv'], shell=True, text=True, capture_output=True, check=False)\n if out.returncode == 0:\n install_requires.extend(extras_require[\"amd-gpu\"])\n finally:\n pass", "creation_date": "2024-07-17T11:21:46Z", "repo": "exo-explore/exo", "file_path": "setup.py", "stars": 28956, "label": 0} +{"function": "def expand_engine_lists(engine_lists):\n def map_engine(engine):\n return inference_engine_classes.get(engine, engine) # Return original name if not found\n\n return [[map_engine(engine) for engine in sublist]\n for sublist in engine_lists]", "creation_date": "2024-11-18T16:52:06Z", "repo": "exo-explore/exo", "file_path": "test/test_model_helpers.py", "stars": 28956, "label": 0} +{"function": " def map_engine(engine):\n return inference_engine_classes.get(engine, engine) # Return original name if not found", "creation_date": "2024-11-18T16:52:06Z", "repo": "exo-explore/exo", "file_path": "test/test_model_helpers.py", "stars": 28956, "label": 0} +{"function": " def test_get_supported_models(self):\n for case in test_cases:\n with self.subTest(f\"{case.name}_short_names\"):\n result = get_supported_models(case.engine_lists)\n self._verify_results(case, result)\n\n with self.subTest(f\"{case.name}_class_names\"):\n class_name_lists = expand_engine_lists(case.engine_lists)\n result = get_supported_models(class_name_lists)\n self._verify_results(case, result)", "creation_date": "2024-11-18T16:52:06Z", "repo": "exo-explore/exo", "file_path": "test/test_model_helpers.py", "stars": 28956, "label": 0} +{"function": " def _verify_results(self, case, result):\n if case.expected_models_contains:\n for model in case.expected_models_contains:\n self.assertIn(model, result)\n\n if case.min_count:\n self.assertGreater(len(result), case.min_count)\n\n if case.exact_count is not None:\n self.assertEqual(len(result), case.exact_count)\n\n # Special case for distinct lists test\n if case.name == \"distinct_engine_lists\":\n self.assertLess(len(result), 15)\n self.assertNotIn(\"mistral-nemo\", result)\n\n if case.max_count:\n self.assertLess(len(result), case.max_count)", "creation_date": "2024-11-18T16:52:06Z", "repo": "exo-explore/exo", "file_path": "test/test_model_helpers.py", "stars": 28956, "label": 0} +{"function": "def test_tokenizer(name, tokenizer, verbose=False):\n print(f\"--- {name} ({tokenizer.__class__.__name__}) ---\")\n text = \"Hello! How can I assist you today? Let me know if you need help with something or just want to chat.\"\n encoded = tokenizer.encode(text)\n decoded = tokenizer.decode(encoded)\n\n print(f\"{encoded=}\")\n print(f\"{decoded=}\")\n\n reconstructed = \"\"\n for token in encoded:\n if verbose:\n print(f\"{token=}\")\n print(f\"{tokenizer.decode([token])=}\")\n reconstructed += tokenizer.decode([token])\n print(f\"{reconstructed=}\")\n\n strip_tokens = lambda s: s.lstrip(tokenizer.decode([tokenizer.bos_token_id])).rstrip(tokenizer.decode([tokenizer.eos_token_id]))\n assert text == strip_tokens(decoded) == strip_tokens(reconstructed)", "creation_date": "2024-08-23T15:35:33Z", "repo": "exo-explore/exo", "file_path": "test/test_tokenizers.py", "stars": 28956, "label": 0} +{"function": "def run():\n site_packages = site.getsitepackages()[0]\n base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n baseimages_dir = os.path.join(base_dir, \"exo\", \"apputil\", \"baseimages\")\n \n command = [\n f\"{sys.executable}\", \"-m\", \"nuitka\", \"exo/main.py\",\n \"--company-name=exolabs\",\n \"--product-name=exo\",\n \"--output-dir=dist\",\n \"--follow-imports\",\n \"--standalone\",\n \"--output-filename=exo\",\n \"--python-flag=no_site\",\n \"--onefile\",\n f\"--include-data-dir={baseimages_dir}=exo/apputil/baseimages\"\n ]\n\n if sys.platform == \"darwin\": \n command.extend([\n \"--macos-app-name=exo\",\n \"--macos-app-mode=gui\",\n \"--macos-app-version=0.0.1\",\n \"--macos-signed-app-name=net.exolabs.exo\",\n \"--include-distribution-meta=mlx\",\n \"--include-module=mlx._reprlib_fix\",\n \"--include-module=mlx._os_warning\",\n \"--include-distribution-meta=huggingface_hub\",\n \"--include-module=huggingface_hub.repocard\",\n f\"--include-data-files={site_packages}/mlx/lib/mlx.metallib=mlx/lib/mlx.metallib\",\n f\"--include-data-files={site_packages}/mlx/lib/mlx.metallib=./mlx.metallib\",\n \"--include-distribution-meta=pygments\",\n \"--nofollow-import-to=tinygrad\"\n ])\n inference_modules = [\n name for _, name, _ in pkgutil.iter_modules(['exo/inference/mlx/models'])\n ]\n for module in inference_modules:\n command.append(f\"--include-module=exo.inference.mlx.models.{module}\")\n elif sys.platform == \"win32\": \n command.extend([\n \"--windows-icon-from-ico=docs/exo-logo-win.ico\",\n \"--file-version=0.0.1\",\n \"--product-version=0.0.1\"\n ])\n elif sys.platform.startswith(\"linux\"): \n command.extend([\n \"--include-distribution-metadata=pygments\",\n \"--linux-icon=docs/exo-rounded.png\"\n ])\n try:\n subprocess.run(command, check=True)\n print(\"Build completed!\")\n except subprocess.CalledProcessError as e:\n print(f\"An error occurred: {e}\")", "creation_date": "2024-11-18T16:47:17Z", "repo": "exo-explore/exo", "file_path": "scripts/build_exo.py", "stars": 28956, "label": 0} +{"function": "def is_docstring(t):\n return t.type == token.STRING and t.string.startswith('\"\"\"') and t.line.strip().startswith('\"\"\"')", "creation_date": "2024-11-25T09:24:28Z", "repo": "exo-explore/exo", "file_path": "extra/line_counter.py", "stars": 28956, "label": 0} +{"function": "def main(\n ckpt_dir: str,\n tokenizer_path: str,\n temperature: float = 0.6,\n top_p: float = 0.9,\n max_seq_len: int = 512,\n max_batch_size: int = 4,\n max_gen_len: Optional[int] = None,\n):\n \"\"\"\n Examples to run with the models finetuned for chat. Prompts correspond of chat\n turns between the user and assistant with the final one always being the user.\n\n An optional system prompt at the beginning to control how the model should respond\n is also supported.\n\n The context window of llama3 models is 8192 tokens, so `max_seq_len` needs to be <= 8192.\n\n `max_gen_len` is optional because finetuned models are able to stop generations naturally.\n \"\"\"\n generator = Llama.build(\n ckpt_dir=ckpt_dir,\n tokenizer_path=tokenizer_path,\n max_seq_len=max_seq_len,\n max_batch_size=max_batch_size,\n )\n\n dialogs: List[Dialog] = [\n [{\"role\": \"user\", \"content\": \"what is the recipe of mayonnaise?\"}],\n [\n {\"role\": \"user\", \"content\": \"I am going to Paris, what should I see?\"},\n {\n \"role\": \"assistant\",\n \"content\": \"\"\"\\\nParis, the capital of France, is known for its stunning architecture, art museums, historical landmarks, and romantic atmosphere. Here are some of the top attractions to see in Paris:\n\n1. The Eiffel Tower: The iconic Eiffel Tower is one of the most recognizable landmarks in the world and offers breathtaking views of the city.\n2. The Louvre Museum: The Louvre is one of the world's largest and most famous museums, housing an impressive collection of art and artifacts, including the Mona Lisa.\n3. Notre-Dame Cathedral: This beautiful cathedral is one of the most famous landmarks in Paris and is known for its Gothic architecture and stunning stained glass windows.\n\nThese are just a few of the many attractions that Paris has to offer. With so much to see and do, it's no wonder that Paris is one of the most popular tourist destinations in the world.\"\"\",\n },\n {\"role\": \"user\", \"content\": \"What is so great about #1?\"},\n ],\n [\n {\"role\": \"system\", \"content\": \"Always answer with Haiku\"},\n {\"role\": \"user\", \"content\": \"I am going to Paris, what should I see?\"},\n ],\n [\n {\n \"role\": \"system\",\n \"content\": \"Always answer with emojis\",\n },\n {\"role\": \"user\", \"content\": \"How to go from Beijing to NY?\"},\n ],\n ]\n results = generator.chat_completion(\n dialogs,\n max_gen_len=max_gen_len,\n temperature=temperature,\n top_p=top_p,\n )\n\n for dialog, result in zip(dialogs, results):\n for msg in dialog:\n print(f\"{msg['role'].capitalize()}: {msg['content']}\\n\")\n print(\n f\"> {result['generation']['role'].capitalize()}: {result['generation']['content']}\"\n )\n print(\"\\n==================================\\n\")", "creation_date": "2024-04-03T04:18:18Z", "repo": "meta-llama/llama3", "file_path": "example_chat_completion.py", "stars": 28838, "label": 0} +{"function": "def main(\n ckpt_dir: str,\n tokenizer_path: str,\n temperature: float = 0.6,\n top_p: float = 0.9,\n max_seq_len: int = 128,\n max_gen_len: int = 64,\n max_batch_size: int = 4,\n):\n \"\"\"\n Examples to run with the pre-trained models (no fine-tuning). Prompts are\n usually in the form of an incomplete text prefix that the model can then try to complete.\n\n The context window of llama3 models is 8192 tokens, so `max_seq_len` needs to be <= 8192.\n `max_gen_len` is needed because pre-trained models usually do not stop completions naturally.\n \"\"\"\n generator = Llama.build(\n ckpt_dir=ckpt_dir,\n tokenizer_path=tokenizer_path,\n max_seq_len=max_seq_len,\n max_batch_size=max_batch_size,\n )\n\n prompts: List[str] = [\n # For these prompts, the expected answer is the natural continuation of the prompt\n \"I believe the meaning of life is\",\n \"Simply put, the theory of relativity states that \",\n \"\"\"A brief message congratulating the team on the launch:\n\n Hi everyone,\n\n I just \"\"\",\n # Few shot prompt (providing a few examples before asking model to complete more);\n \"\"\"Translate English to French:\n\n sea otter => loutre de mer\n peppermint => menthe poivr\u00e9e\n plush girafe => girafe peluche\n cheese =>\"\"\",\n ]\n results = generator.text_completion(\n prompts,\n max_gen_len=max_gen_len,\n temperature=temperature,\n top_p=top_p,\n )\n for prompt, result in zip(prompts, results):\n print(prompt)\n print(f\"> {result['generation']}\")\n print(\"\\n==================================\\n\")", "creation_date": "2024-04-03T04:18:18Z", "repo": "meta-llama/llama3", "file_path": "example_text_completion.py", "stars": 28838, "label": 0} +{"function": "def get_requirements(path: str):\n return [l.strip() for l in open(path)]", "creation_date": "2024-04-03T04:18:18Z", "repo": "meta-llama/llama3", "file_path": "setup.py", "stars": 28838, "label": 0} +{"function": "def sample_top_p(probs, p):\n \"\"\"\n Perform top-p (nucleus) sampling on a probability distribution.\n\n Args:\n probs (torch.Tensor): Probability distribution tensor.\n p (float): Probability threshold for top-p sampling.\n\n Returns:\n torch.Tensor: Sampled token indices.\n\n Note:\n Top-p sampling selects the smallest set of tokens whose cumulative probability mass\n exceeds the threshold p. The distribution is renormalized based on the selected tokens.\n \"\"\"\n probs_sort, probs_idx = torch.sort(probs, dim=-1, descending=True)\n probs_sum = torch.cumsum(probs_sort, dim=-1)\n mask = probs_sum - probs_sort > p\n probs_sort[mask] = 0.0\n probs_sort.div_(probs_sort.sum(dim=-1, keepdim=True))\n next_token = torch.multinomial(probs_sort, num_samples=1)\n next_token = torch.gather(probs_idx, -1, next_token)\n return next_token", "creation_date": "2024-04-03T04:18:18Z", "repo": "meta-llama/llama3", "file_path": "llama/generation.py", "stars": 28838, "label": 0} +{"function": " def build(\n ckpt_dir: str,\n tokenizer_path: str,\n max_seq_len: int,\n max_batch_size: int,\n model_parallel_size: Optional[int] = None,\n seed: int = 1,\n ) -> \"Llama\":\n \"\"\"\n Build a Llama instance by initializing and loading a model checkpoint.\n\n Args:\n ckpt_dir (str): Path to the directory containing checkpoint files.\n tokenizer_path (str): Path to the tokenizer file.\n max_seq_len (int): Maximum sequence length for input text.\n max_batch_size (int): Maximum batch size for inference.\n model_parallel_size (Optional[int], optional): Number of model parallel processes.\n If not provided, it's determined from the environment. Defaults to None.\n\n Returns:\n Llama: An instance of the Llama class with the loaded model and tokenizer.\n\n Raises:\n AssertionError: If there are no checkpoint files in the specified directory,\n or if the model parallel size does not match the number of checkpoint files.\n\n Note:\n This method initializes the distributed process group, sets the device to CUDA,\n and loads the pre-trained model and tokenizer.\n \"\"\"\n assert 1 <= max_seq_len <= 8192, f\"max_seq_len must be between 1 and 8192, got {max_seq_len}.\"\n assert os.path.isdir(ckpt_dir), f\"Checkpoint directory '{ckpt_dir}' does not exist.\"\n assert os.path.isfile(tokenizer_path), f\"Tokenizer file '{tokenizer_path}' does not exist.\"\n \n if not torch.distributed.is_initialized():\n torch.distributed.init_process_group(\"nccl\")\n if not model_parallel_is_initialized():\n if model_parallel_size is None:\n model_parallel_size = int(os.environ.get(\"WORLD_SIZE\", 1))\n initialize_model_parallel(model_parallel_size)\n\n local_rank = int(os.environ.get(\"LOCAL_RANK\", 0))\n torch.cuda.set_device(local_rank)\n\n # seed must be the same in all processes\n torch.manual_seed(seed)\n\n if local_rank > 0:\n sys.stdout = open(os.devnull, \"w\")\n\n start_time = time.time()\n checkpoints = sorted(Path(ckpt_dir).glob(\"*.pth\"))\n assert len(checkpoints) > 0, f\"no checkpoint files found in {ckpt_dir}\"\n assert model_parallel_size == len(\n checkpoints\n ), f\"Loading a checkpoint for MP={len(checkpoints)} but world size is {model_parallel_size}\"\n ckpt_path = checkpoints[get_model_parallel_rank()]\n checkpoint = torch.load(ckpt_path, map_location=\"cpu\")\n with open(Path(ckpt_dir) / \"params.json\", \"r\") as f:\n params = json.loads(f.read())\n\n model_args: ModelArgs = ModelArgs(\n max_seq_len=max_seq_len,\n max_batch_size=max_batch_size,\n **params,\n )\n tokenizer = Tokenizer(model_path=tokenizer_path)\n assert model_args.vocab_size == tokenizer.n_words\n if torch.cuda.is_bf16_supported():\n torch.set_default_tensor_type(torch.cuda.BFloat16Tensor)\n else:\n torch.set_default_tensor_type(torch.cuda.HalfTensor)\n model = Transformer(model_args)\n model.load_state_dict(checkpoint, strict=False)\n print(f\"Loaded in {time.time() - start_time:.2f} seconds\")\n\n return Llama(model, tokenizer)", "creation_date": "2024-04-03T04:18:18Z", "repo": "meta-llama/llama3", "file_path": "llama/generation.py", "stars": 28838, "label": 0} +{"function": " def __init__(self, model: Transformer, tokenizer: Tokenizer):\n self.model = model\n self.tokenizer = tokenizer\n self.formatter = ChatFormat(tokenizer)", "creation_date": "2024-04-03T04:18:18Z", "repo": "meta-llama/llama3", "file_path": "llama/generation.py", "stars": 28838, "label": 0} +{"function": " def generate(\n self,\n prompt_tokens: List[List[int]],\n max_gen_len: int,\n temperature: float = 0.6,\n top_p: float = 0.9,\n logprobs: bool = False,\n echo: bool = False,\n ) -> Tuple[List[List[int]], Optional[List[List[float]]]]:\n \"\"\"\n Generate text sequences based on provided prompts using the language generation model.\n\n Args:\n prompt_tokens (List[List[int]]): List of tokenized prompts, where each prompt is represented as a list of integers.\n max_gen_len (int): Maximum length of the generated text sequence.\n temperature (float, optional): Temperature value for controlling randomness in sampling. Defaults to 0.6.\n top_p (float, optional): Top-p probability threshold for nucleus sampling. Defaults to 0.9.\n logprobs (bool, optional): Flag indicating whether to compute token log probabilities. Defaults to False.\n echo (bool, optional): Flag indicating whether to include prompt tokens in the generated output. Defaults to False.\n\n Returns:\n Tuple[List[List[int]], Optional[List[List[float]]]]: A tuple containing generated token sequences and, if logprobs is True, corresponding token log probabilities.\n\n Note:\n This method uses the provided prompts as a basis for generating text. It employs nucleus sampling to produce text with controlled randomness.\n If logprobs is True, token log probabilities are computed for each generated token.\n\n \"\"\"\n params = self.model.params\n bsz = len(prompt_tokens)\n assert bsz <= params.max_batch_size, (bsz, params.max_batch_size)\n\n min_prompt_len = min(len(t) for t in prompt_tokens)\n max_prompt_len = max(len(t) for t in prompt_tokens)\n assert max_prompt_len <= params.max_seq_len\n total_len = min(params.max_seq_len, max_gen_len + max_prompt_len)\n\n pad_id = self.tokenizer.pad_id\n tokens = torch.full((bsz, total_len), pad_id, dtype=torch.long, device=\"cuda\")\n for k, t in enumerate(prompt_tokens):\n tokens[k, : len(t)] = torch.tensor(t, dtype=torch.long, device=\"cuda\")\n if logprobs:\n token_logprobs = torch.zeros_like(tokens, dtype=torch.float)\n\n prev_pos = 0\n eos_reached = torch.tensor([False] * bsz, device=\"cuda\")\n input_text_mask = tokens != pad_id\n if min_prompt_len == total_len:\n logits = self.model.forward(tokens, prev_pos)\n token_logprobs = -F.cross_entropy(\n input=logits.transpose(1, 2),\n target=tokens,\n reduction=\"none\",\n ignore_index=pad_id,\n )\n\n stop_tokens = torch.tensor(list(self.tokenizer.stop_tokens))\n\n for cur_pos in range(min_prompt_len, total_len):\n logits = self.model.forward(tokens[:, prev_pos:cur_pos], prev_pos)\n if temperature > 0:\n probs = torch.softmax(logits[:, -1] / temperature, dim=-1)\n next_token = sample_top_p(probs, top_p)\n else:\n next_token = torch.argmax(logits[:, -1], dim=-1)\n\n next_token = next_token.reshape(-1)\n # only replace token if prompt has already been generated\n next_token = torch.where(\n input_text_mask[:, cur_pos], tokens[:, cur_pos], next_token\n )\n tokens[:, cur_pos] = next_token\n if logprobs:\n token_logprobs[:, prev_pos + 1 : cur_pos + 1] = -F.cross_entropy(\n input=logits.transpose(1, 2),\n target=tokens[:, prev_pos + 1 : cur_pos + 1],\n reduction=\"none\",\n ignore_index=pad_id,\n )\n eos_reached |= (~input_text_mask[:, cur_pos]) & (\n torch.isin(next_token, stop_tokens)\n )\n prev_pos = cur_pos\n if all(eos_reached):\n break\n\n if logprobs:\n token_logprobs = token_logprobs.tolist()\n out_tokens, out_logprobs = [], []\n for i, toks in enumerate(tokens.tolist()):\n # cut to max gen len\n start = 0 if echo else len(prompt_tokens[i])\n toks = toks[start : len(prompt_tokens[i]) + max_gen_len]\n probs = None\n if logprobs:\n probs = token_logprobs[i][start : len(prompt_tokens[i]) + max_gen_len]\n # cut to after eos tok if any\n for stop_token in self.tokenizer.stop_tokens:\n try:\n eos_idx = toks.index(stop_token)\n toks = toks[:eos_idx]\n probs = probs[:eos_idx] if logprobs else None\n except ValueError:\n pass\n out_tokens.append(toks)\n out_logprobs.append(probs)\n return (out_tokens, out_logprobs if logprobs else None)", "creation_date": "2024-04-03T04:18:18Z", "repo": "meta-llama/llama3", "file_path": "llama/generation.py", "stars": 28838, "label": 0} +{"function": " def text_completion(\n self,\n prompts: List[str],\n temperature: float = 0.6,\n top_p: float = 0.9,\n max_gen_len: Optional[int] = None,\n logprobs: bool = False,\n echo: bool = False,\n ) -> List[CompletionPrediction]:\n \"\"\"\n Perform text completion for a list of prompts using the language generation model.\n\n Args:\n prompts (List[str]): List of text prompts for completion.\n temperature (float, optional): Temperature value for controlling randomness in sampling. Defaults to 0.6.\n top_p (float, optional): Top-p probability threshold for nucleus sampling. Defaults to 0.9.\n max_gen_len (Optional[int], optional): Maximum length of the generated completion sequence.\n If not provided, it's set to the model's maximum sequence length minus 1.\n logprobs (bool, optional): Flag indicating whether to compute token log probabilities. Defaults to False.\n echo (bool, optional): Flag indicating whether to include prompt tokens in the generated output. Defaults to False.\n\n Returns:\n List[CompletionPrediction]: List of completion predictions, each containing the generated text completion.\n\n Note:\n This method generates text completions for the provided prompts, employing nucleus sampling to introduce controlled randomness.\n If logprobs is True, token log probabilities are computed for each generated token.\n\n \"\"\"\n if max_gen_len is None:\n max_gen_len = self.model.params.max_seq_len - 1\n prompt_tokens = [self.tokenizer.encode(x, bos=True, eos=False) for x in prompts]\n generation_tokens, generation_logprobs = self.generate(\n prompt_tokens=prompt_tokens,\n max_gen_len=max_gen_len,\n temperature=temperature,\n top_p=top_p,\n logprobs=logprobs,\n echo=echo,\n )\n if logprobs:\n return [\n {\n \"generation\": self.tokenizer.decode(t),\n \"tokens\": [self.tokenizer.decode([x]) for x in t],\n \"logprobs\": logprobs_i,\n }\n for t, logprobs_i in zip(generation_tokens, generation_logprobs)\n ]\n return [{\"generation\": self.tokenizer.decode(t)} for t in generation_tokens]", "creation_date": "2024-04-03T04:18:18Z", "repo": "meta-llama/llama3", "file_path": "llama/generation.py", "stars": 28838, "label": 0} +{"function": " def chat_completion(\n self,\n dialogs: List[Dialog],\n temperature: float = 0.6,\n top_p: float = 0.9,\n max_gen_len: Optional[int] = None,\n logprobs: bool = False,\n ) -> List[ChatPrediction]:\n \"\"\"\n Generate assistant responses for a list of conversational dialogs using the language generation model.\n\n Args:\n dialogs (List[Dialog]): List of conversational dialogs, where each dialog is a list of messages.\n temperature (float, optional): Temperature value for controlling randomness in sampling. Defaults to 0.6.\n top_p (float, optional): Top-p probability threshold for nucleus sampling. Defaults to 0.9.\n max_gen_len (Optional[int], optional): Maximum length of the generated response sequence.\n If not provided, it's set to the model's maximum sequence length minus 1.\n logprobs (bool, optional): Flag indicating whether to compute token log probabilities. Defaults to False.\n\n Returns:\n List[ChatPrediction]: List of chat predictions, each containing the assistant's generated response.\n\n Note:\n This method generates assistant responses for the provided conversational dialogs.\n It employs nucleus sampling to introduce controlled randomness in text generation.\n If logprobs is True, token log probabilities are computed for each generated token.\n \"\"\"\n if max_gen_len is None:\n max_gen_len = self.model.params.max_seq_len - 1\n\n prompt_tokens = [\n self.formatter.encode_dialog_prompt(dialog) for dialog in dialogs\n ]\n generation_tokens, generation_logprobs = self.generate(\n prompt_tokens=prompt_tokens,\n max_gen_len=max_gen_len,\n temperature=temperature,\n top_p=top_p,\n logprobs=logprobs,\n )\n if logprobs:\n return [\n {\n \"generation\": {\n \"role\": \"assistant\",\n \"content\": self.tokenizer.decode(t),\n },\n \"tokens\": [self.tokenizer.decode([x]) for x in t],\n \"logprobs\": logprobs_i,\n }\n for t, logprobs_i in zip(generation_tokens, generation_logprobs)\n ]\n return [\n {\n \"generation\": {\n \"role\": \"assistant\",\n \"content\": self.tokenizer.decode(t),\n },\n }\n for t in generation_tokens\n ]", "creation_date": "2024-04-03T04:18:18Z", "repo": "meta-llama/llama3", "file_path": "llama/generation.py", "stars": 28838, "label": 0} +{"function": "def precompute_freqs_cis(dim: int, end: int, theta: float = 10000.0):\n freqs = 1.0 / (theta ** (torch.arange(0, dim, 2)[: (dim // 2)].float() / dim))\n t = torch.arange(end, device=freqs.device, dtype=torch.float32)\n freqs = torch.outer(t, freqs)\n freqs_cis = torch.polar(torch.ones_like(freqs), freqs) # complex64\n return freqs_cis", "creation_date": "2024-04-03T04:18:18Z", "repo": "meta-llama/llama3", "file_path": "llama/model.py", "stars": 28838, "label": 0} +{"function": "def create_cover_letter(parameters: dict, llm_api_key: str):\n \"\"\"\n Logic to create a CV.\n \"\"\"\n try:\n logger.info(\"Generating a CV based on provided parameters.\")\n\n # Carica il resume in testo semplice\n with open(parameters[\"uploads\"][\"plainTextResume\"], \"r\", encoding=\"utf-8\") as file:\n plain_text_resume = file.read()\n\n style_manager = StyleManager()\n available_styles = style_manager.get_styles()\n\n if not available_styles:\n logger.warning(\"No styles available. Proceeding without style selection.\")\n else:\n # Present style choices to the user\n choices = style_manager.format_choices(available_styles)\n questions = [\n inquirer.List(\n \"style\",\n message=\"Select a style for the resume:\",\n choices=choices,\n )\n ]\n style_answer = inquirer.prompt(questions)\n if style_answer and \"style\" in style_answer:\n selected_choice = style_answer[\"style\"]\n for style_name, (file_name, author_link) in available_styles.items():\n if selected_choice.startswith(style_name):\n style_manager.set_selected_style(style_name)\n logger.info(f\"Selected style: {style_name}\")\n break\n else:\n logger.warning(\"No style selected. Proceeding with default style.\")\n questions = [\n inquirer.Text('job_url', message=\"Please enter the URL of the job description:\")\n ]\n answers = inquirer.prompt(questions)\n job_url = answers.get('job_url')\n resume_generator = ResumeGenerator()\n resume_object = Resume(plain_text_resume)\n driver = init_browser()\n resume_generator.set_resume_object(resume_object)\n resume_facade = ResumeFacade( \n api_key=llm_api_key,\n style_manager=style_manager,\n resume_generator=resume_generator,\n resume_object=resume_object,\n output_path=Path(\"data_folder/output\"),\n )\n resume_facade.set_driver(driver)\n resume_facade.link_to_job(job_url)\n result_base64, suggested_name = resume_facade.create_cover_letter() \n\n # Decodifica Base64 in dati binari\n try:\n pdf_data = base64.b64decode(result_base64)\n except base64.binascii.Error as e:\n logger.error(\"Error decoding Base64: %s\", e)\n raise\n\n # Definisci il percorso della cartella di output utilizzando `suggested_name`\n output_dir = Path(parameters[\"outputFileDirectory\"]) / suggested_name\n\n # Crea la cartella se non esiste\n try:\n output_dir.mkdir(parents=True, exist_ok=True)\n logger.info(f\"Cartella di output creata o gi\u00e0 esistente: {output_dir}\")\n except IOError as e:\n logger.error(\"Error creating output directory: %s\", e)\n raise\n \n output_path = output_dir / \"cover_letter_tailored.pdf\"\n try:\n with open(output_path, \"wb\") as file:\n file.write(pdf_data)\n logger.info(f\"CV salvato in: {output_path}\")\n except IOError as e:\n logger.error(\"Error writing file: %s\", e)\n raise\n except Exception as e:\n logger.exception(f\"An error occurred while creating the CV: {e}\")\n raise", "creation_date": "2024-12-05T00:01:47Z", "repo": "feder-cr/Jobs_Applier_AI_Agent_AIHawk", "file_path": "main.py", "stars": 28437, "label": 0} +{"function": "def create_resume_pdf_job_tailored(parameters: dict, llm_api_key: str):\n \"\"\"\n Logic to create a CV.\n \"\"\"\n try:\n logger.info(\"Generating a CV based on provided parameters.\")\n\n # Carica il resume in testo semplice\n with open(parameters[\"uploads\"][\"plainTextResume\"], \"r\", encoding=\"utf-8\") as file:\n plain_text_resume = file.read()\n\n style_manager = StyleManager()\n available_styles = style_manager.get_styles()\n\n if not available_styles:\n logger.warning(\"No styles available. Proceeding without style selection.\")\n else:\n # Present style choices to the user\n choices = style_manager.format_choices(available_styles)\n questions = [\n inquirer.List(\n \"style\",\n message=\"Select a style for the resume:\",\n choices=choices,\n )\n ]\n style_answer = inquirer.prompt(questions)\n if style_answer and \"style\" in style_answer:\n selected_choice = style_answer[\"style\"]\n for style_name, (file_name, author_link) in available_styles.items():\n if selected_choice.startswith(style_name):\n style_manager.set_selected_style(style_name)\n logger.info(f\"Selected style: {style_name}\")\n break\n else:\n logger.warning(\"No style selected. Proceeding with default style.\")\n questions = [inquirer.Text('job_url', message=\"Please enter the URL of the job description:\")]\n answers = inquirer.prompt(questions)\n job_url = answers.get('job_url')\n resume_generator = ResumeGenerator()\n resume_object = Resume(plain_text_resume)\n driver = init_browser()\n resume_generator.set_resume_object(resume_object)\n resume_facade = ResumeFacade( \n api_key=llm_api_key,\n style_manager=style_manager,\n resume_generator=resume_generator,\n resume_object=resume_object,\n output_path=Path(\"data_folder/output\"),\n )\n resume_facade.set_driver(driver)\n resume_facade.link_to_job(job_url)\n result_base64, suggested_name = resume_facade.create_resume_pdf_job_tailored() \n\n # Decodifica Base64 in dati binari\n try:\n pdf_data = base64.b64decode(result_base64)\n except base64.binascii.Error as e:\n logger.error(\"Error decoding Base64: %s\", e)\n raise\n\n # Definisci il percorso della cartella di output utilizzando `suggested_name`\n output_dir = Path(parameters[\"outputFileDirectory\"]) / suggested_name\n\n # Crea la cartella se non esiste\n try:\n output_dir.mkdir(parents=True, exist_ok=True)\n logger.info(f\"Cartella di output creata o gi\u00e0 esistente: {output_dir}\")\n except IOError as e:\n logger.error(\"Error creating output directory: %s\", e)\n raise\n \n output_path = output_dir / \"resume_tailored.pdf\"\n try:\n with open(output_path, \"wb\") as file:\n file.write(pdf_data)\n logger.info(f\"CV salvato in: {output_path}\")\n except IOError as e:\n logger.error(\"Error writing file: %s\", e)\n raise\n except Exception as e:\n logger.exception(f\"An error occurred while creating the CV: {e}\")\n raise", "creation_date": "2024-12-05T00:01:47Z", "repo": "feder-cr/Jobs_Applier_AI_Agent_AIHawk", "file_path": "main.py", "stars": 28437, "label": 0} +{"function": "def create_resume_pdf(parameters: dict, llm_api_key: str):\n \"\"\"\n Logic to create a CV.\n \"\"\"\n try:\n logger.info(\"Generating a CV based on provided parameters.\")\n\n # Load the plain text resume\n with open(parameters[\"uploads\"][\"plainTextResume\"], \"r\", encoding=\"utf-8\") as file:\n plain_text_resume = file.read()\n\n # Initialize StyleManager\n style_manager = StyleManager()\n available_styles = style_manager.get_styles()\n\n if not available_styles:\n logger.warning(\"No styles available. Proceeding without style selection.\")\n else:\n # Present style choices to the user\n choices = style_manager.format_choices(available_styles)\n questions = [\n inquirer.List(\n \"style\",\n message=\"Select a style for the resume:\",\n choices=choices,\n )\n ]\n style_answer = inquirer.prompt(questions)\n if style_answer and \"style\" in style_answer:\n selected_choice = style_answer[\"style\"]\n for style_name, (file_name, author_link) in available_styles.items():\n if selected_choice.startswith(style_name):\n style_manager.set_selected_style(style_name)\n logger.info(f\"Selected style: {style_name}\")\n break\n else:\n logger.warning(\"No style selected. Proceeding with default style.\")\n\n # Initialize the Resume Generator\n resume_generator = ResumeGenerator()\n resume_object = Resume(plain_text_resume)\n driver = init_browser()\n resume_generator.set_resume_object(resume_object)\n\n # Create the ResumeFacade\n resume_facade = ResumeFacade(\n api_key=llm_api_key,\n style_manager=style_manager,\n resume_generator=resume_generator,\n resume_object=resume_object,\n output_path=Path(\"data_folder/output\"),\n )\n resume_facade.set_driver(driver)\n result_base64 = resume_facade.create_resume_pdf()\n\n # Decode Base64 to binary data\n try:\n pdf_data = base64.b64decode(result_base64)\n except base64.binascii.Error as e:\n logger.error(\"Error decoding Base64: %s\", e)\n raise\n\n # Define the output directory using `suggested_name`\n output_dir = Path(parameters[\"outputFileDirectory\"])\n\n # Write the PDF file\n output_path = output_dir / \"resume_base.pdf\"\n try:\n with open(output_path, \"wb\") as file:\n file.write(pdf_data)\n logger.info(f\"Resume saved at: {output_path}\")\n except IOError as e:\n logger.error(\"Error writing file: %s\", e)\n raise\n except Exception as e:\n logger.exception(f\"An error occurred while creating the CV: {e}\")\n raise", "creation_date": "2024-12-05T00:01:47Z", "repo": "feder-cr/Jobs_Applier_AI_Agent_AIHawk", "file_path": "main.py", "stars": 28437, "label": 0} +{"function": "def handle_inquiries(selected_actions: List[str], parameters: dict, llm_api_key: str):\n \"\"\"\n Decide which function to call based on the selected user actions.\n\n :param selected_actions: List of actions selected by the user.\n :param parameters: Configuration parameters dictionary.\n :param llm_api_key: API key for the language model.\n \"\"\"\n try:\n if selected_actions:\n if \"Generate Resume\" == selected_actions:\n logger.info(\"Crafting a standout professional resume...\")\n create_resume_pdf(parameters, llm_api_key)\n \n if \"Generate Resume Tailored for Job Description\" == selected_actions:\n logger.info(\"Customizing your resume to enhance your job application...\")\n create_resume_pdf_job_tailored(parameters, llm_api_key)\n \n if \"Generate Tailored Cover Letter for Job Description\" == selected_actions:\n logger.info(\"Designing a personalized cover letter to enhance your job application...\")\n create_cover_letter(parameters, llm_api_key)\n\n else:\n logger.warning(\"No actions selected. Nothing to execute.\")\n except Exception as e:\n logger.exception(f\"An error occurred while handling inquiries: {e}\")\n raise", "creation_date": "2024-12-05T00:01:47Z", "repo": "feder-cr/Jobs_Applier_AI_Agent_AIHawk", "file_path": "main.py", "stars": 28437, "label": 0} +{"function": "def prompt_user_action() -> str:\n \"\"\"\n Use inquirer to ask the user which action they want to perform.\n\n :return: Selected action.\n \"\"\"\n try:\n questions = [\n inquirer.List(\n 'action',\n message=\"Select the action you want to perform:\",\n choices=[\n \"Generate Resume\",\n \"Generate Resume Tailored for Job Description\",\n \"Generate Tailored Cover Letter for Job Description\",\n ],\n ),\n ]\n answer = inquirer.prompt(questions)\n if answer is None:\n print(\"No answer provided. The user may have interrupted.\")\n return \"\"\n return answer.get('action', \"\")\n except Exception as e:\n print(f\"An error occurred: {e}\")\n return \"\"", "creation_date": "2024-12-05T00:01:47Z", "repo": "feder-cr/Jobs_Applier_AI_Agent_AIHawk", "file_path": "main.py", "stars": 28437, "label": 0} +{"function": "def main():\n \"\"\"Main entry point for the AIHawk Job Application Bot.\"\"\"\n try:\n # Define and validate the data folder\n data_folder = Path(\"data_folder\")\n secrets_file, config_file, plain_text_resume_file, output_folder = FileManager.validate_data_folder(data_folder)\n\n # Validate configuration and secrets\n config = ConfigValidator.validate_config(config_file)\n llm_api_key = ConfigValidator.validate_secrets(secrets_file)\n\n # Prepare parameters\n config[\"uploads\"] = FileManager.get_uploads(plain_text_resume_file)\n config[\"outputFileDirectory\"] = output_folder\n\n # Interactive prompt for user to select actions\n selected_actions = prompt_user_action()\n\n # Handle selected actions and execute them\n handle_inquiries(selected_actions, config, llm_api_key)\n\n except ConfigError as ce:\n logger.error(f\"Configuration error: {ce}\")\n logger.error(\n \"Refer to the configuration guide for troubleshooting: \"\n \"https://github.com/feder-cr/Auto_Jobs_Applier_AIHawk?tab=readme-ov-file#configuration\"\n )\n except FileNotFoundError as fnf:\n logger.error(f\"File not found: {fnf}\")\n logger.error(\"Ensure all required files are present in the data folder.\")\n except RuntimeError as re:\n logger.error(f\"Runtime error: {re}\")\n logger.debug(traceback.format_exc())\n except Exception as e:\n logger.exception(f\"An unexpected error occurred: {e}\")", "creation_date": "2024-12-05T00:01:47Z", "repo": "feder-cr/Jobs_Applier_AI_Agent_AIHawk", "file_path": "main.py", "stars": 28437, "label": 0} +{"function": " def validate_email(email: str) -> bool:\n \"\"\"Validate the format of an email address.\"\"\"\n return bool(ConfigValidator.EMAIL_REGEX.match(email))", "creation_date": "2024-12-05T00:01:47Z", "repo": "feder-cr/Jobs_Applier_AI_Agent_AIHawk", "file_path": "main.py", "stars": 28437, "label": 0} +{"function": " def load_yaml(yaml_path: Path) -> dict:\n \"\"\"Load and parse a YAML file.\"\"\"\n try:\n with open(yaml_path, \"r\") as stream:\n return yaml.safe_load(stream)\n except yaml.YAMLError as exc:\n raise ConfigError(f\"Error reading YAML file {yaml_path}: {exc}\")\n except FileNotFoundError:\n raise ConfigError(f\"YAML file not found: {yaml_path}\")", "creation_date": "2024-12-05T00:01:47Z", "repo": "feder-cr/Jobs_Applier_AI_Agent_AIHawk", "file_path": "main.py", "stars": 28437, "label": 0} +{"function": " def validate_config(cls, config_yaml_path: Path) -> dict:\n \"\"\"Validate the main configuration YAML file.\"\"\"\n parameters = cls.load_yaml(config_yaml_path)\n # Check for required keys and their types\n for key, expected_type in cls.REQUIRED_CONFIG_KEYS.items():\n if key not in parameters:\n if key in [\"company_blacklist\", \"title_blacklist\", \"location_blacklist\"]:\n parameters[key] = []\n else:\n raise ConfigError(f\"Missing required key '{key}' in {config_yaml_path}\")\n elif not isinstance(parameters[key], expected_type):\n if key in [\"company_blacklist\", \"title_blacklist\", \"location_blacklist\"] and parameters[key] is None:\n parameters[key] = []\n else:\n raise ConfigError(\n f\"Invalid type for key '{key}' in {config_yaml_path}. Expected {expected_type.__name__}.\"\n )\n cls._validate_experience_levels(parameters[\"experience_level\"], config_yaml_path)\n cls._validate_job_types(parameters[\"job_types\"], config_yaml_path)\n cls._validate_date_filters(parameters[\"date\"], config_yaml_path)\n cls._validate_list_of_strings(parameters, [\"positions\", \"locations\"], config_yaml_path)\n cls._validate_distance(parameters[\"distance\"], config_yaml_path)\n cls._validate_blacklists(parameters, config_yaml_path)\n return parameters", "creation_date": "2024-12-05T00:01:47Z", "repo": "feder-cr/Jobs_Applier_AI_Agent_AIHawk", "file_path": "main.py", "stars": 28437, "label": 0} +{"function": " def _validate_experience_levels(cls, experience_levels: dict, config_path: Path):\n \"\"\"Ensure experience levels are booleans.\"\"\"\n for level in cls.EXPERIENCE_LEVELS:\n if not isinstance(experience_levels.get(level), bool):\n raise ConfigError(\n f\"Experience level '{level}' must be a boolean in {config_path}\"\n )", "creation_date": "2024-12-05T00:01:47Z", "repo": "feder-cr/Jobs_Applier_AI_Agent_AIHawk", "file_path": "main.py", "stars": 28437, "label": 0} +{"function": "def fetch_requirements(paths) -> List[str]:\n \"\"\"\n This function reads the requirements file.\n\n Args:\n path (str): the path to the requirements file.\n\n Returns:\n The lines in the requirements file.\n \"\"\"\n if not isinstance(paths, list):\n paths = [paths]\n requirements = []\n for path in paths:\n with open(path, \"r\") as fd:\n requirements += [r.strip() for r in fd.readlines()]\n return requirements", "creation_date": "2024-02-27T03:58:22Z", "repo": "hpcaitech/Open-Sora", "file_path": "setup.py", "stars": 26861, "label": 0} +{"function": "def fetch_readme() -> str:\n \"\"\"\n This function reads the README.md file in the current directory.\n\n Returns:\n The lines in the README file.\n \"\"\"\n with open(\"README.md\", encoding=\"utf-8\") as f:\n return f.read()", "creation_date": "2024-02-27T03:58:22Z", "repo": "hpcaitech/Open-Sora", "file_path": "setup.py", "stars": 26861, "label": 0} +{"function": "def main():\n torch.set_grad_enabled(False)\n # ======================================================\n # configs & runtime variables\n # ======================================================\n # == parse configs ==\n cfg = parse_configs()\n\n # == get dtype & device ==\n dtype = to_torch_dtype(cfg.get(\"dtype\", \"fp32\"))\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n if is_distributed():\n colossalai.launch_from_torch({})\n device = get_current_device()\n set_seed(cfg.get(\"seed\", 1024))\n\n # == init logger ==\n logger = create_logger()\n logger.info(\"Inference configuration:\\n %s\", pformat(cfg.to_dict()))\n verbose = cfg.get(\"verbose\", 1)\n\n # ======================================================\n # build model & loss\n # ======================================================\n if cfg.get(\"ckpt_path\", None) is not None:\n cfg.model.from_pretrained = cfg.ckpt_path\n logger.info(\"Building models...\")\n model = build_module(cfg.model, MODELS, device_map=device, torch_dtype=dtype).eval()\n log_model_params(model)\n\n # ======================================================\n # build dataset and dataloader\n # ======================================================\n logger.info(\"Building dataset...\")\n # == build dataset ==\n dataset = build_module(cfg.dataset, DATASETS)\n logger.info(\"Dataset contains %s samples.\", len(dataset))\n # == build dataloader ==\n dataloader_args = dict(\n dataset=dataset,\n batch_size=cfg.get(\"batch_size\", None),\n num_workers=cfg.get(\"num_workers\", 4),\n seed=cfg.get(\"seed\", 1024),\n shuffle=False,\n drop_last=False,\n pin_memory=True,\n process_group=get_data_parallel_group(),\n prefetch_factor=cfg.get(\"prefetch_factor\", None),\n )\n\n if cfg.get(\"eval_setting\", None) is not None:\n # e.g. 32x256, 1x1024\n num_frames = int(cfg.eval_setting.split(\"x\")[0])\n resolution = str(cfg.eval_setting.split(\"x\")[-1])\n bucket_config = {\n resolution + \"px\" + \"_ar1:1\": {num_frames: (1.0, 1)},\n }\n print(\"eval setting:\\n\", bucket_config)\n else:\n bucket_config = cfg.get(\"bucket_config\", None)\n\n dataloader, sampler = prepare_dataloader(\n bucket_config=bucket_config,\n num_bucket_build_workers=cfg.get(\"num_bucket_build_workers\", 1),\n **dataloader_args,\n )\n dataiter = iter(dataloader)\n num_steps_per_epoch = len(dataloader)\n\n # ======================================================\n # inference\n # ======================================================\n # prepare arguments\n save_fps = cfg.get(\"fps\", 16) // cfg.get(\"frame_interval\", 1)\n save_dir = cfg.get(\"save_dir\", None)\n save_dir_orig = os.path.join(save_dir, \"orig\")\n save_dir_recn = os.path.join(save_dir, \"recn\")\n os.makedirs(save_dir_orig, exist_ok=True)\n os.makedirs(save_dir_recn, exist_ok=True)\n\n running_sum = running_var = 0.0\n num_samples = 0\n\n # Iter over the dataset\n with tqdm(\n enumerate(dataiter),\n disable=not is_main_process() or verbose < 1,\n total=num_steps_per_epoch,\n initial=0,\n ) as pbar:\n for _, batch in pbar:\n # == load data ==\n x = batch[\"video\"].to(device, dtype) # [B, C, T, H, W]\n path = batch[\"path\"]\n\n # == vae encoding & decoding ===\n x_rec, posterior, z = model(x)\n\n num_samples += 1\n running_sum += z.mean()\n running_var += (z - running_sum / num_samples).pow(2).mean()\n if num_samples % 10 == 0:\n logger.info(\n \"VAE feature per channel stats: mean %s, var %s\",\n (running_sum / num_samples).item(),\n (running_var / num_samples).sqrt().item(),\n )\n\n # == save samples ==\n if is_main_process() and save_dir is not None:\n for idx, x_orig in enumerate(x):\n fname = os.path.splitext(os.path.basename(path[idx]))[0]\n save_path_orig = os.path.join(save_dir_orig, f\"{fname}_orig\")\n save_sample(x_orig, save_path=save_path_orig, fps=save_fps)\n\n save_path_rec = os.path.join(save_dir_recn, f\"{fname}_recn\")\n save_sample(x_rec[idx], save_path=save_path_rec, fps=save_fps)\n\n logger.info(\"Inference finished.\")\n log_cuda_max_memory(\"inference\")", "creation_date": "2025-03-12T05:14:22Z", "repo": "hpcaitech/Open-Sora", "file_path": "scripts/vae/inference.py", "stars": 26861, "label": 0} +{"function": "def main():\n torch.set_grad_enabled(False)\n # ======================================================\n # configs & runtime variables\n # ======================================================\n # == parse configs ==\n cfg = parse_configs()\n\n # == get dtype & device ==\n dtype = to_torch_dtype(cfg.get(\"dtype\", \"bf16\"))\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n if is_distributed():\n colossalai.launch_from_torch({})\n device = get_current_device()\n set_seed(cfg.get(\"seed\", 1024))\n\n # == init logger ==\n logger = create_logger()\n logger.info(\"Inference configuration:\\n %s\", pformat(cfg.to_dict()))\n verbose = cfg.get(\"verbose\", 1)\n\n # ======================================================\n # build model & loss\n # ======================================================\n if cfg.get(\"ckpt_path\", None) is not None:\n cfg.model.from_pretrained = cfg.ckpt_path\n logger.info(\"Building models...\")\n model = build_module(cfg.model, MODELS, device_map=device, torch_dtype=dtype).eval()\n log_model_params(model)\n\n # ======================================================\n # build dataset and dataloader\n # ======================================================\n logger.info(\"Building dataset...\")\n # == build dataset ==\n dataset = build_module(cfg.dataset, DATASETS)\n logger.info(\"Dataset contains %s samples.\", len(dataset))\n # == build dataloader ==\n dataloader_args = dict(\n dataset=dataset,\n batch_size=cfg.get(\"batch_size\", None),\n num_workers=cfg.get(\"num_workers\", 4),\n seed=cfg.get(\"seed\", 1024),\n shuffle=False,\n drop_last=False,\n pin_memory=True,\n process_group=get_data_parallel_group(),\n prefetch_factor=cfg.get(\"prefetch_factor\", None),\n )\n\n if cfg.get(\"eval_setting\", None) is not None:\n # e.g. 32x256x256, 1x1024x1024\n num_frames = int(cfg.eval_setting.split(\"x\")[0])\n resolution = str(cfg.eval_setting.split(\"x\")[-1])\n bucket_config = {\n resolution + \"px_ar1:1\": {num_frames: (1.0, 1)},\n }\n print(\"eval setting:\\n\", bucket_config)\n else:\n bucket_config = cfg.get(\"bucket_config\", None)\n\n dataloader, _ = prepare_dataloader(\n bucket_config=bucket_config,\n num_bucket_build_workers=cfg.get(\"num_bucket_build_workers\", 1),\n **dataloader_args,\n )\n dataiter = iter(dataloader)\n num_steps_per_epoch = len(dataloader)\n\n # ======================================================\n # inference\n # ======================================================\n num_samples = 0\n running_sum = running_var = 0.0\n\n # Iter over the dataset\n with tqdm(\n enumerate(dataiter),\n disable=not is_main_process() or verbose < 1,\n total=num_steps_per_epoch,\n initial=0,\n ) as pbar:\n for _, batch in pbar:\n # == load data ==\n x = batch[\"video\"].to(device, dtype) # [B, C, T, H, W]\n\n # == vae encoding & decoding ===\n z = model.encode(x)\n\n num_samples += 1\n running_sum += z.mean().item()\n running_var += (z - running_sum / num_samples).pow(2).mean().item()\n shift = running_sum / num_samples\n scale = (running_var / num_samples) ** 0.5\n pbar.set_postfix({\"mean\": shift, \"std\": scale})\n\n logger.info(\"Mean: %.4f, std: %.4f\", shift, scale)\n log_cuda_max_memory(\"inference\")", "creation_date": "2025-03-12T05:14:22Z", "repo": "hpcaitech/Open-Sora", "file_path": "scripts/vae/stats.py", "stars": 26861, "label": 0} +{"function": "def main():\n # ======================================================\n # 1. configs & runtime variables\n # ======================================================\n # == parse configs ==\n cfg = parse_configs()\n\n # == get dtype & device ==\n dtype = to_torch_dtype(cfg.get(\"dtype\", \"bf16\"))\n device, coordinator = setup_device()\n checkpoint_io = CheckpointIO()\n set_seed(cfg.get(\"seed\", 1024))\n PinMemoryCache.force_dtype = dtype\n pin_memory_cache_pre_alloc_numels = cfg.get(\"pin_memory_cache_pre_alloc_numels\", None)\n PinMemoryCache.pre_alloc_numels = pin_memory_cache_pre_alloc_numels\n\n # == init ColossalAI booster ==\n plugin_type = cfg.get(\"plugin\", \"zero2\")\n plugin_config = cfg.get(\"plugin_config\", {})\n plugin = (\n create_colossalai_plugin(\n plugin=plugin_type,\n dtype=cfg.get(\"dtype\", \"bf16\"),\n grad_clip=cfg.get(\"grad_clip\", 0),\n **plugin_config,\n )\n if plugin_type != \"none\"\n else None\n )\n booster = Booster(plugin=plugin)\n\n # == init exp_dir ==\n exp_name, exp_dir = create_experiment_workspace(\n cfg.get(\"outputs\", \"./outputs\"),\n model_name=config_to_name(cfg),\n config=cfg.to_dict(),\n )\n if is_log_process(plugin_type, plugin_config):\n print(f\"changing {exp_dir} to share\")\n os.system(f\"chgrp -R share {exp_dir}\")\n\n # == init logger, tensorboard & wandb ==\n logger = create_logger(exp_dir)\n logger.info(\"Training configuration:\\n %s\", pformat(cfg.to_dict()))\n tb_writer = None\n if coordinator.is_master():\n tb_writer = create_tensorboard_writer(exp_dir)\n if cfg.get(\"wandb\", False):\n wandb.init(\n project=cfg.get(\"wandb_project\", \"Open-Sora\"),\n name=cfg.get(\"wandb_expr_name\", exp_name),\n config=cfg.to_dict(),\n dir=exp_dir,\n )\n\n # ======================================================\n # 2. build dataset and dataloader\n # ======================================================\n logger.info(\"Building dataset...\")\n # == build dataset ==\n dataset = build_module(cfg.dataset, DATASETS)\n logger.info(\"Dataset contains %s samples.\", len(dataset))\n\n # == build dataloader ==\n cache_pin_memory = pin_memory_cache_pre_alloc_numels is not None\n dataloader_args = dict(\n dataset=dataset,\n batch_size=cfg.get(\"batch_size\", None),\n num_workers=cfg.get(\"num_workers\", 4),\n seed=cfg.get(\"seed\", 1024),\n shuffle=True,\n drop_last=True,\n pin_memory=True,\n process_group=get_data_parallel_group(),\n prefetch_factor=cfg.get(\"prefetch_factor\", None),\n cache_pin_memory=cache_pin_memory,\n )\n dataloader, sampler = prepare_dataloader(\n bucket_config=cfg.get(\"bucket_config\", None),\n num_bucket_build_workers=cfg.get(\"num_bucket_build_workers\", 1),\n **dataloader_args,\n )\n num_steps_per_epoch = len(dataloader)\n\n # ======================================================\n # 3. build model\n # ======================================================\n logger.info(\"Building models...\")\n\n # == build vae model ==\n model = build_module(cfg.model, MODELS, device_map=device, torch_dtype=dtype).train()\n log_model_params(model)\n\n if cfg.get(\"grad_checkpoint\", False):\n set_grad_checkpoint(model)\n vae_loss_fn = VAELoss(**cfg.vae_loss_config, device=device, dtype=dtype)\n\n # == build EMA model ==\n if cfg.get(\"ema_decay\", None) is not None:\n ema = deepcopy(model).cpu().eval().requires_grad_(False)\n ema_shape_dict = record_model_param_shape(ema)\n logger.info(\"EMA model created.\")\n else:\n ema = ema_shape_dict = None\n logger.info(\"No EMA model created.\")\n\n # == build discriminator model ==\n use_discriminator = cfg.get(\"discriminator\", None) is not None\n if use_discriminator:\n discriminator = build_module(cfg.discriminator, MODELS).to(device, dtype).train()\n log_model_params(discriminator)\n generator_loss_fn = GeneratorLoss(**cfg.gen_loss_config)\n discriminator_loss_fn = DiscriminatorLoss(**cfg.disc_loss_config)\n\n # == setup optimizer ==\n optimizer = create_optimizer(model, cfg.optim)\n\n # == setup lr scheduler ==\n lr_scheduler = create_lr_scheduler(\n optimizer=optimizer, num_steps_per_epoch=num_steps_per_epoch, epochs=cfg.get(\"epochs\", 1000), **cfg.lr_scheduler\n )\n\n # == setup discriminator optimizer ==\n if use_discriminator:\n disc_optimizer = create_optimizer(discriminator, cfg.optim_discriminator)\n disc_lr_scheduler = create_lr_scheduler(\n optimizer=disc_optimizer,\n num_steps_per_epoch=num_steps_per_epoch,\n epochs=cfg.get(\"epochs\", 1000),\n **cfg.disc_lr_scheduler,\n )\n\n # =======================================================\n # 4. distributed training preparation with colossalai\n # =======================================================\n logger.info(\"Preparing for distributed training...\")\n # == boosting ==\n torch.set_default_dtype(dtype)\n model, optimizer, _, dataloader, lr_scheduler = booster.boost(\n model=model,\n optimizer=optimizer,\n lr_scheduler=lr_scheduler,\n dataloader=dataloader,\n )\n\n if use_discriminator:\n discriminator, disc_optimizer, _, _, disc_lr_scheduler = booster.boost(\n model=discriminator,\n optimizer=disc_optimizer,\n lr_scheduler=disc_lr_scheduler,\n )\n torch.set_default_dtype(torch.float)\n logger.info(\"Boosted model for distributed training\")\n\n # == global variables ==\n cfg_epochs = cfg.get(\"epochs\", 1000)\n mixed_strategy = cfg.get(\"mixed_strategy\", None)\n mixed_image_ratio = cfg.get(\"mixed_image_ratio\", 0.0)\n # modulate mixed image ratio since we force rank 0 to be video\n num_ranks = dist.get_world_size()\n modulated_mixed_image_ratio = (\n num_ranks * mixed_image_ratio / (num_ranks - 1) if num_ranks > 1 else mixed_image_ratio\n )\n if is_log_process(plugin_type, plugin_config):\n print(\"modulated mixed image ratio:\", modulated_mixed_image_ratio)\n\n start_epoch = start_step = log_step = acc_step = 0\n running_loss = dict( # loss accumulated over config.log_every steps\n all=0.0,\n nll=0.0,\n nll_rec=0.0,\n nll_per=0.0,\n kl=0.0,\n gen=0.0,\n gen_w=0.0,\n disc=0.0,\n debug=0.0,\n )\n\n def log_loss(name, loss, loss_dict, use_video):\n # only calculate loss for video\n if use_video == 0:\n loss.data = torch.tensor(0.0, device=device, dtype=dtype)\n all_reduce_sum(loss.data)\n num_video = torch.tensor(use_video, device=device, dtype=dtype)\n all_reduce_sum(num_video)\n loss_item = loss.item() / num_video.item()\n loss_dict[name] = loss_item\n running_loss[name] += loss_item\n\n logger.info(\"Training for %s epochs with %s steps per epoch\", cfg_epochs, num_steps_per_epoch)\n\n # == resume ==\n if cfg.get(\"load\", None) is not None:\n logger.info(\"Loading checkpoint from %s\", cfg.load)\n start_epoch = cfg.get(\"start_epoch\", None)\n start_step = cfg.get(\"start_step\", None)\n ret = checkpoint_io.load(\n booster,\n cfg.load,\n model=model,\n ema=ema,\n optimizer=optimizer,\n lr_scheduler=lr_scheduler,\n sampler=(\n None if start_step is not None else sampler\n ), # if specify start step, set last_micro_batch_access_index of a new sampler instead\n )\n if start_step is not None:\n # if start step exceeds data length, go to next epoch\n if start_step > num_steps_per_epoch:\n start_epoch = (\n start_epoch + start_step // num_steps_per_epoch\n if start_epoch is not None\n else start_step // num_steps_per_epoch\n )\n start_step = start_step % num_steps_per_epoch\n sampler.set_step(start_step)\n\n start_epoch = start_epoch if start_epoch is not None else ret[0]\n start_step = start_step if start_step is not None else ret[1]\n\n if (\n use_discriminator\n and os.path.exists(os.path.join(cfg.load, \"discriminator\"))\n and not cfg.get(\"restart_disc\", False)\n ):\n booster.load_model(discriminator, os.path.join(cfg.load, \"discriminator\"))\n if cfg.get(\"load_optimizer\", True):\n booster.load_optimizer(disc_optimizer, os.path.join(cfg.load, \"disc_optimizer\"))\n if disc_lr_scheduler is not None:\n booster.load_lr_scheduler(disc_lr_scheduler, os.path.join(cfg.load, \"disc_lr_scheduler\"))\n if cfg.get(\"disc_lr\", None) is not None:\n set_lr(disc_optimizer, disc_lr_scheduler, cfg.disc_lr)\n\n logger.info(\"Loaded checkpoint %s at epoch %s step %s\", cfg.load, start_epoch, start_step)\n\n if cfg.get(\"lr\", None) is not None:\n set_lr(optimizer, lr_scheduler, cfg.lr, cfg.get(\"initial_lr\", None))\n\n if cfg.get(\"update_warmup_steps\", False):\n assert (\n cfg.lr_scheduler.get(\"warmup_steps\", None) is not None\n ), \"you need to set lr_scheduler.warmup_steps in order to pass --update-warmup-steps True\"\n set_warmup_steps(lr_scheduler, cfg.lr_scheduler.warmup_steps)\n if use_discriminator:\n assert (\n cfg.disc_lr_scheduler.get(\"warmup_steps\", None) is not None\n ), \"you need to set disc_lr_scheduler.warmup_steps in order to pass --update-warmup-steps True\"\n set_warmup_steps(disc_lr_scheduler, cfg.disc_lr_scheduler.warmup_steps)\n\n # == sharding EMA model ==\n if ema is not None:\n model_sharding(ema)\n ema = ema.to(device)\n\n if cfg.get(\"freeze_layers\", None) == \"all\":\n for param in model.module.parameters():\n param.requires_grad = False\n print(\"all layers frozen\")\n\n # model.module.requires_grad_(False)\n # =======================================================\n # 5. training loop\n # =======================================================\n dist.barrier()\n accumulation_steps = int(cfg.get(\"accumulation_steps\", 1))\n for epoch in range(start_epoch, cfg_epochs):\n # == set dataloader to new epoch ==\n sampler.set_epoch(epoch)\n dataiter = iter(dataloader)\n logger.info(\"Beginning epoch %s...\", epoch)\n random.seed(1024 + dist.get_rank()) # load vid/img for each rank\n\n # == training loop in an epoch ==\n with tqdm(\n enumerate(dataiter, start=start_step),\n desc=f\"Epoch {epoch}\",\n disable=not coordinator.is_master(),\n total=num_steps_per_epoch,\n initial=start_step,\n ) as pbar:\n pbar_iter = iter(pbar)\n\n def fetch_data():\n step, batch = next(pbar_iter)\n pinned_video = batch[\"video\"]\n batch[\"video\"] = pinned_video.to(device, dtype, non_blocking=True)\n return batch, step, pinned_video\n\n batch_, step_, pinned_video_ = fetch_data()\n\n profiler_ctxt = (\n profile(\n activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA],\n schedule=my_schedule,\n on_trace_ready=torch.profiler.tensorboard_trace_handler(\"./log/profile\"),\n record_shapes=True,\n profile_memory=True,\n with_stack=True,\n )\n if cfg.get(\"profile\", False)\n else nullcontext()\n )\n\n with profiler_ctxt:\n for _ in range(start_step, num_steps_per_epoch):\n if cfg.get(\"profile\", False) and _ == WARMUP + ACTIVE + WAIT + 3:\n break\n\n # == load data ===\n batch, step, pinned_video = batch_, step_, pinned_video_\n if step + 1 < num_steps_per_epoch:\n batch_, step_, pinned_video_ = fetch_data()\n\n # == log config ==\n global_step = epoch * num_steps_per_epoch + step\n actual_update_step = (global_step + 1) // accumulation_steps\n log_step += 1\n acc_step += 1\n\n # == mixed strategy ==\n x = batch[\"video\"]\n t_length = x.size(2)\n use_video = 1\n if mixed_strategy == \"mixed_video_image\":\n if random.random() < modulated_mixed_image_ratio and dist.get_rank() != 0:\n # NOTE: enable the first rank to use video\n t_length = 1\n use_video = 0\n elif mixed_strategy == \"mixed_video_random\":\n t_length = random.randint(1, x.size(2))\n x = x[:, :, :t_length, :, :]\n\n with Timer(\"model\", log=True) if cfg.get(\"profile\", False) else nullcontext():\n # == forward pass ==\n x_rec, posterior, z = model(x)\n\n if cfg.get(\"profile\", False):\n profiler_ctxt.step()\n\n if cache_pin_memory:\n dataiter.remove_cache(pinned_video)\n\n # == loss initialization ==\n vae_loss = torch.tensor(0.0, device=device, dtype=dtype)\n loss_dict = {} # loss at every step\n\n # == reconstruction loss ==\n ret = vae_loss_fn(x, x_rec, posterior)\n nll_loss = ret[\"nll_loss\"]\n kl_loss = ret[\"kl_loss\"]\n recon_loss = ret[\"recon_loss\"]\n perceptual_loss = ret[\"perceptual_loss\"]\n vae_loss += nll_loss + kl_loss\n\n # == generator loss ==\n if use_discriminator:\n # turn off grad update for disc\n discriminator.requires_grad_(False)\n fake_logits = discriminator(x_rec.contiguous())\n\n generator_loss, g_loss = generator_loss_fn(\n fake_logits,\n nll_loss,\n model.module.get_last_layer(),\n actual_update_step,\n is_training=model.training,\n )\n # print(f\"generator_loss: {generator_loss}, recon_loss: {recon_loss}, perceptual_loss: {perceptual_loss}\")\n\n vae_loss += generator_loss\n # turn on disc training\n discriminator.requires_grad_(True)\n\n # == generator backward & update ==\n ctx = (\n booster.no_sync(model, optimizer)\n if cfg.get(\"plugin\", \"zero2\") in (\"zero1\", \"zero1-seq\")\n and (step + 1) % accumulation_steps != 0\n else nullcontext()\n )\n with Timer(\"backward\", log=True) if cfg.get(\"profile\", False) else nullcontext():\n with ctx:\n booster.backward(loss=vae_loss / accumulation_steps, optimizer=optimizer)\n\n with Timer(\"optimizer\", log=True) if cfg.get(\"profile\", False) else nullcontext():\n if (step + 1) % accumulation_steps == 0:\n optimizer.step()\n optimizer.zero_grad()\n if lr_scheduler is not None:\n lr_scheduler.step(\n actual_update_step,\n )\n # == update EMA ==\n if ema is not None:\n update_ema(\n ema,\n model.unwrap(),\n optimizer=optimizer,\n decay=cfg.get(\"ema_decay\", 0.9999),\n )\n\n # == logging ==\n log_loss(\"all\", vae_loss, loss_dict, use_video)\n log_loss(\"nll\", nll_loss, loss_dict, use_video)\n log_loss(\"nll_rec\", recon_loss, loss_dict, use_video)\n log_loss(\"nll_per\", perceptual_loss, loss_dict, use_video)\n log_loss(\"kl\", kl_loss, loss_dict, use_video)\n if use_discriminator:\n log_loss(\"gen_w\", generator_loss, loss_dict, use_video)\n log_loss(\"gen\", g_loss, loss_dict, use_video)\n\n # == loss: discriminator adversarial ==\n if use_discriminator:\n real_logits = discriminator(x.detach().contiguous())\n fake_logits = discriminator(x_rec.detach().contiguous())\n disc_loss = discriminator_loss_fn(\n real_logits,\n fake_logits,\n actual_update_step,\n )\n\n # == discriminator backward & update ==\n ctx = (\n booster.no_sync(discriminator, disc_optimizer)\n if cfg.get(\"plugin\", \"zero2\") in (\"zero1\", \"zero1-seq\")\n and (step + 1) % accumulation_steps != 0\n else nullcontext()\n )\n with ctx:\n booster.backward(loss=disc_loss / accumulation_steps, optimizer=disc_optimizer)\n if (step + 1) % accumulation_steps == 0:\n disc_optimizer.step()\n disc_optimizer.zero_grad()\n if disc_lr_scheduler is not None:\n disc_lr_scheduler.step(actual_update_step)\n\n # log\n log_loss(\"disc\", disc_loss, loss_dict, use_video)\n\n # == logging ==\n if (global_step + 1) % accumulation_steps == 0:\n if coordinator.is_master() and actual_update_step % cfg.get(\"log_every\", 1) == 0:\n avg_loss = {k: v / log_step for k, v in running_loss.items()}\n # progress bar\n pbar.set_postfix(\n {\n # \"step\": step,\n # \"global_step\": global_step,\n # \"actual_update_step\": actual_update_step,\n # \"lr\": optimizer.param_groups[0][\"lr\"],\n **{k: f\"{v:.2f}\" for k, v in avg_loss.items()},\n }\n )\n # tensorboard\n tb_writer.add_scalar(\"loss\", vae_loss.item(), actual_update_step)\n # wandb\n if cfg.get(\"wandb\", False):\n wandb.log(\n {\n \"iter\": global_step,\n \"epoch\": epoch,\n \"lr\": optimizer.param_groups[0][\"lr\"],\n \"avg_loss_\": avg_loss,\n \"avg_loss\": avg_loss[\"all\"],\n \"loss_\": loss_dict,\n \"loss\": vae_loss.item(),\n \"global_grad_norm\": optimizer.get_grad_norm(),\n },\n step=actual_update_step,\n )\n\n running_loss = {k: 0.0 for k in running_loss}\n log_step = 0\n\n # == checkpoint saving ==\n ckpt_every = cfg.get(\"ckpt_every\", 0)\n if ckpt_every > 0 and actual_update_step % ckpt_every == 0 and coordinator.is_master():\n subprocess.run(\"sudo drop_cache\", shell=True)\n\n if ckpt_every > 0 and actual_update_step % ckpt_every == 0:\n # mannually garbage collection\n gc.collect()\n\n save_dir = checkpoint_io.save(\n booster,\n exp_dir,\n model=model,\n ema=ema,\n optimizer=optimizer,\n lr_scheduler=lr_scheduler,\n sampler=sampler,\n epoch=epoch,\n step=step + 1,\n global_step=global_step + 1,\n batch_size=cfg.get(\"batch_size\", None),\n actual_update_step=actual_update_step,\n ema_shape_dict=ema_shape_dict,\n async_io=True,\n )\n\n if is_log_process(plugin_type, plugin_config):\n os.system(f\"chgrp -R share {save_dir}\")\n\n if use_discriminator:\n booster.save_model(discriminator, os.path.join(save_dir, \"discriminator\"), shard=True)\n booster.save_optimizer(\n disc_optimizer,\n os.path.join(save_dir, \"disc_optimizer\"),\n shard=True,\n size_per_shard=4096,\n )\n if disc_lr_scheduler is not None:\n booster.save_lr_scheduler(\n disc_lr_scheduler, os.path.join(save_dir, \"disc_lr_scheduler\")\n )\n dist.barrier()\n\n logger.info(\n \"Saved checkpoint at epoch %s, step %s, global_step %s to %s\",\n epoch,\n step + 1,\n actual_update_step,\n save_dir,\n )\n\n # remove old checkpoints\n rm_checkpoints(exp_dir, keep_n_latest=cfg.get(\"keep_n_latest\", -1))\n logger.info(\n \"Removed old checkpoints and kept %s latest ones.\", cfg.get(\"keep_n_latest\", -1)\n )\n\n if cfg.get(\"profile\", False):\n profiler_ctxt.export_chrome_trace(\"./log/profile/trace.json\")\n\n sampler.reset()\n start_step = 0", "creation_date": "2025-03-12T05:14:22Z", "repo": "hpcaitech/Open-Sora", "file_path": "scripts/vae/train.py", "stars": 26861, "label": 0} +{"function": " def log_loss(name, loss, loss_dict, use_video):\n # only calculate loss for video\n if use_video == 0:\n loss.data = torch.tensor(0.0, device=device, dtype=dtype)\n all_reduce_sum(loss.data)\n num_video = torch.tensor(use_video, device=device, dtype=dtype)\n all_reduce_sum(num_video)\n loss_item = loss.item() / num_video.item()\n loss_dict[name] = loss_item\n running_loss[name] += loss_item", "creation_date": "2025-03-12T05:14:22Z", "repo": "hpcaitech/Open-Sora", "file_path": "scripts/vae/train.py", "stars": 26861, "label": 0} +{"function": " def fetch_data():\n step, batch = next(pbar_iter)\n pinned_video = batch[\"video\"]\n batch[\"video\"] = pinned_video.to(device, dtype, non_blocking=True)\n return batch, step, pinned_video", "creation_date": "2025-03-12T05:14:22Z", "repo": "hpcaitech/Open-Sora", "file_path": "scripts/vae/train.py", "stars": 26861, "label": 0} +{"function": "def main():\n # ======================================================\n # 1. configs & runtime variables\n # ======================================================\n torch.set_grad_enabled(False)\n\n # == parse configs ==\n cfg = parse_configs()\n cfg = parse_alias(cfg)\n\n # == device and dtype ==\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n dtype = to_torch_dtype(cfg.get(\"dtype\", \"bf16\"))\n seed = cfg.get(\"seed\", 1024)\n if seed is not None:\n set_seed(seed)\n\n # == init distributed env ==\n init_inference_environment()\n logger = create_logger()\n logger.info(\"Inference configuration:\\n %s\", pformat(cfg.to_dict()))\n is_saving_process = get_is_saving_process(cfg)\n booster = get_booster(cfg)\n booster_ae = get_booster(cfg, ae=True)\n\n # ======================================================\n # 2. build dataset and dataloader\n # ======================================================\n logger.info(\"Building dataset...\")\n\n # save directory\n save_dir = cfg.save_dir\n os.makedirs(save_dir, exist_ok=True)\n\n # == build dataset ==\n if cfg.get(\"prompt\"):\n cfg.dataset.data_path = create_tmp_csv(save_dir, cfg.prompt, cfg.get(\"ref\", None), create=is_main_process())\n dist.barrier()\n dataset = build_module(cfg.dataset, DATASETS)\n\n # range selection\n start_index = cfg.get(\"start_index\", 0)\n end_index = cfg.get(\"end_index\", None)\n if end_index is None:\n end_index = start_index + cfg.get(\"num_samples\", len(dataset.data) + 1)\n dataset.data = dataset.data[start_index:end_index]\n logger.info(\"Dataset contains %s samples.\", len(dataset))\n\n # == build dataloader ==\n dataloader_args = dict(\n dataset=dataset,\n batch_size=cfg.get(\"batch_size\", 1),\n num_workers=cfg.get(\"num_workers\", 4),\n seed=cfg.get(\"seed\", 1024),\n shuffle=False,\n drop_last=False,\n pin_memory=True,\n process_group=get_data_parallel_group(),\n prefetch_factor=cfg.get(\"prefetch_factor\", None),\n )\n dataloader, _ = prepare_dataloader(**dataloader_args)\n\n # == prepare default params ==\n sampling_option = SamplingOption(**cfg.sampling_option)\n sampling_option = sanitize_sampling_option(sampling_option)\n\n cond_type = cfg.get(\"cond_type\", \"t2v\")\n prompt_refine = cfg.get(\"prompt_refine\", False)\n fps_save = cfg.get(\"fps_save\", 16)\n num_sample = cfg.get(\"num_sample\", 1)\n\n type_name = \"image\" if cfg.sampling_option.num_frames == 1 else \"video\"\n sub_dir = f\"{type_name}_{cfg.sampling_option.resolution}\"\n os.makedirs(os.path.join(save_dir, sub_dir), exist_ok=True)\n use_t2i2v = cfg.get(\"use_t2i2v\", False)\n img_sub_dir = os.path.join(sub_dir, \"generated_condition\")\n if use_t2i2v:\n os.makedirs(os.path.join(save_dir, sub_dir, \"generated_condition\"), exist_ok=True)\n\n # ======================================================\n # 3. build model\n # ======================================================\n logger.info(\"Building models...\")\n\n # == build flux model ==\n model, model_ae, model_t5, model_clip, optional_models = prepare_models(\n cfg, device, dtype, offload_model=cfg.get(\"offload_model\", False)\n )\n log_cuda_max_memory(\"build model\")\n\n if booster:\n model, _, _, _, _ = booster.boost(model=model)\n model = model.unwrap()\n if booster_ae:\n model_ae, _, _, _, _ = booster_ae.boost(model=model_ae)\n model_ae = model_ae.unwrap()\n\n api_fn = prepare_api(model, model_ae, model_t5, model_clip, optional_models)\n\n # prepare image flux model if t2i2v\n if use_t2i2v:\n api_fn_img = prepare_api(\n optional_models[\"img_flux\"], optional_models[\"img_flux_ae\"], model_t5, model_clip, optional_models\n )\n\n # ======================================================\n # 4. inference\n # ======================================================\n for epoch in range(num_sample): # generate multiple samples with different seeds\n dataloader_iter = iter(dataloader)\n with tqdm(\n enumerate(dataloader_iter, start=0),\n desc=\"Inference progress\",\n disable=not is_main_process(),\n initial=0,\n total=len(dataloader),\n ) as pbar:\n for _, batch in pbar:\n original_text = batch.pop(\"text\")\n if use_t2i2v:\n batch[\"text\"] = original_text if not prompt_refine else refine_prompts(original_text, type=\"t2i\")\n sampling_option_t2i = modify_option_to_t2i(\n sampling_option,\n distilled=True,\n img_resolution=cfg.get(\"img_resolution\", \"768px\"),\n )\n if cfg.get(\"offload_model\", False):\n model_move_start = time.time()\n model = model.to(\"cpu\", dtype)\n model_ae = model_ae.to(\"cpu\", dtype)\n optional_models[\"img_flux\"].to(device, dtype)\n optional_models[\"img_flux_ae\"].to(device, dtype)\n logger.info(\n \"offload video diffusion model to cpu, load image flux model to gpu: %s s\",\n time.time() - model_move_start,\n )\n\n logger.info(\"Generating image condition by flux...\")\n x_cond = api_fn_img(\n sampling_option_t2i,\n \"t2v\",\n seed=sampling_option.seed + epoch if sampling_option.seed else None,\n channel=cfg[\"img_flux\"][\"in_channels\"],\n **batch,\n ).cpu()\n\n # save image to disk\n batch[\"name\"] = process_and_save(\n x_cond,\n batch,\n cfg,\n img_sub_dir,\n sampling_option_t2i,\n epoch,\n start_index,\n saving=is_saving_process,\n )\n dist.barrier()\n\n if cfg.get(\"offload_model\", False):\n model_move_start = time.time()\n model = model.to(device, dtype)\n model_ae = model_ae.to(device, dtype)\n optional_models[\"img_flux\"].to(\"cpu\", dtype)\n optional_models[\"img_flux_ae\"].to(\"cpu\", dtype)\n logger.info(\n \"load video diffusion model to gpu, offload image flux model to cpu: %s s\",\n time.time() - model_move_start,\n )\n\n ref_dir = os.path.join(save_dir, os.path.join(sub_dir, \"generated_condition\"))\n batch[\"ref\"] = [os.path.join(ref_dir, f\"{x}.png\") for x in batch[\"name\"]]\n cond_type = \"i2v_head\"\n\n batch[\"text\"] = original_text\n if prompt_refine:\n batch[\"text\"] = refine_prompts(\n original_text, type=\"t2v\" if cond_type == \"t2v\" else \"t2i\", image_paths=batch.get(\"ref\", None)\n )\n batch[\"text\"] = add_fps_info_to_text(batch.pop(\"text\"), fps=fps_save)\n if \"motion_score\" in cfg:\n batch[\"text\"] = add_motion_score_to_text(batch.pop(\"text\"), cfg.get(\"motion_score\", 5))\n\n logger.info(\"Generating video...\")\n x = api_fn(\n sampling_option,\n cond_type,\n seed=sampling_option.seed + epoch if sampling_option.seed else None,\n patch_size=cfg.get(\"patch_size\", 2),\n save_prefix=cfg.get(\"save_prefix\", \"\"),\n channel=cfg[\"model\"][\"in_channels\"],\n **batch,\n ).cpu()\n\n if is_saving_process:\n process_and_save(x, batch, cfg, sub_dir, sampling_option, epoch, start_index)\n dist.barrier()\n\n logger.info(\"Inference finished.\")\n log_cuda_max_memory(\"inference\")", "creation_date": "2025-03-12T05:14:22Z", "repo": "hpcaitech/Open-Sora", "file_path": "scripts/diffusion/inference.py", "stars": 26861, "label": 0} +{"function": "def main():\n # ======================================================\n # 1. configs & runtime variables\n # ======================================================\n # == parse configs ==\n cfg = parse_configs()\n\n # == get dtype & device ==\n dtype = to_torch_dtype(cfg.get(\"dtype\", \"bf16\"))\n device, coordinator = setup_device()\n grad_ckpt_buffer_size = cfg.get(\"grad_ckpt_buffer_size\", 0)\n if grad_ckpt_buffer_size > 0:\n GLOBAL_ACTIVATION_MANAGER.setup_buffer(grad_ckpt_buffer_size, dtype)\n checkpoint_io = CheckpointIO()\n set_seed(cfg.get(\"seed\", 1024))\n PinMemoryCache.force_dtype = dtype\n pin_memory_cache_pre_alloc_numels = cfg.get(\"pin_memory_cache_pre_alloc_numels\", None)\n PinMemoryCache.pre_alloc_numels = pin_memory_cache_pre_alloc_numels\n\n # == init ColossalAI booster ==\n plugin_type = cfg.get(\"plugin\", \"zero2\")\n plugin_config = cfg.get(\"plugin_config\", {})\n plugin_kwargs = {}\n if plugin_type == \"hybrid\":\n plugin_kwargs[\"custom_policy\"] = MMDiTPolicy\n plugin = create_colossalai_plugin(\n plugin=plugin_type,\n dtype=cfg.get(\"dtype\", \"bf16\"),\n grad_clip=cfg.get(\"grad_clip\", 0),\n **plugin_config,\n **plugin_kwargs,\n )\n booster = Booster(plugin=plugin)\n\n seq_align = plugin_config.get(\"sp_size\", 1)\n\n # == init exp_dir ==\n exp_name, exp_dir = create_experiment_workspace(\n cfg.get(\"outputs\", \"./outputs\"),\n model_name=config_to_name(cfg),\n config=cfg.to_dict(),\n exp_name=cfg.get(\"exp_name\", None), # useful for automatic restart to specify the exp_name\n )\n\n if is_log_process(plugin_type, plugin_config):\n print(f\"changing {exp_dir} to share\")\n os.system(f\"chgrp -R share {exp_dir}\")\n\n # == init logger, tensorboard & wandb ==\n logger = create_logger(exp_dir)\n logger.info(\"Training configuration:\\n %s\", pformat(cfg.to_dict()))\n tb_writer = None\n if coordinator.is_master():\n tb_writer = create_tensorboard_writer(exp_dir)\n if cfg.get(\"wandb\", False):\n wandb.init(\n project=cfg.get(\"wandb_project\", \"Open-Sora\"),\n name=exp_name,\n config=cfg.to_dict(),\n dir=exp_dir,\n )\n num_gpus = dist.get_world_size() if dist.is_initialized() else 1\n tp_size = cfg[\"plugin_config\"].get(\"tp_size\", 1)\n sp_size = cfg[\"plugin_config\"].get(\"sp_size\", 1)\n pp_size = cfg[\"plugin_config\"].get(\"pp_size\", 1)\n num_groups = num_gpus // (tp_size * sp_size * pp_size)\n logger.info(\"Number of GPUs: %s\", num_gpus)\n logger.info(\"Number of groups: %s\", num_groups)\n\n # ======================================================\n # 2. build dataset and dataloader\n # ======================================================\n logger.info(\"Building dataset...\")\n # == build dataset ==\n dataset = build_module(cfg.dataset, DATASETS)\n logger.info(\"Dataset contains %s samples.\", len(dataset))\n\n # == build dataloader ==\n cache_pin_memory = pin_memory_cache_pre_alloc_numels is not None\n dataloader_args = dict(\n dataset=dataset,\n batch_size=cfg.get(\"batch_size\", None),\n num_workers=cfg.get(\"num_workers\", 4),\n seed=cfg.get(\"seed\", 1024),\n shuffle=True,\n drop_last=True,\n pin_memory=True,\n process_group=get_data_parallel_group(),\n prefetch_factor=cfg.get(\"prefetch_factor\", None),\n cache_pin_memory=cache_pin_memory,\n num_groups=num_groups,\n )\n print_mem(\"before prepare_dataloader\")\n dataloader, sampler = prepare_dataloader(\n bucket_config=cfg.get(\"bucket_config\", None),\n num_bucket_build_workers=cfg.get(\"num_bucket_build_workers\", 1),\n **dataloader_args,\n )\n print_mem(\"after prepare_dataloader\")\n num_steps_per_epoch = len(dataloader)\n dataset.to_efficient()\n\n # ======================================================\n # 3. build model\n # ======================================================\n logger.info(\"Building models...\")\n\n # == build model model ==\n model = build_module(cfg.model, MODELS, device_map=device, torch_dtype=dtype).train()\n if cfg.get(\"grad_checkpoint\", True):\n set_grad_checkpoint(model)\n log_cuda_memory(\"diffusion\")\n log_model_params(model)\n\n # == build EMA model ==\n use_lora = cfg.get(\"lora_config\", None) is not None\n if cfg.get(\"ema_decay\", None) is not None and not use_lora:\n ema = deepcopy(model).cpu().eval().requires_grad_(False)\n ema_shape_dict = record_model_param_shape(ema)\n logger.info(\"EMA model created.\")\n else:\n ema = ema_shape_dict = None\n logger.info(\"No EMA model created.\")\n log_cuda_memory(\"EMA\")\n\n # == enable LoRA ==\n if use_lora:\n lora_config = LoraConfig(**cfg.get(\"lora_config\", None))\n model = booster.enable_lora(\n model=model,\n lora_config=lora_config,\n pretrained_dir=cfg.get(\"lora_checkpoint\", None),\n )\n log_cuda_memory(\"lora\")\n log_model_params(model)\n\n if not cfg.get(\"cached_video\", False):\n # == buildn autoencoder ==\n model_ae = build_module(cfg.ae, MODELS, device_map=device, torch_dtype=dtype).eval().requires_grad_(False)\n del model_ae.decoder\n log_cuda_memory(\"autoencoder\")\n log_model_params(model_ae)\n model_ae.encode = torch.compile(model_ae.encoder, dynamic=True)\n\n if not cfg.get(\"cached_text\", False):\n # == build text encoder (t5) ==\n model_t5 = build_module(cfg.t5, MODELS, device_map=device, torch_dtype=dtype).eval().requires_grad_(False)\n log_cuda_memory(\"t5\")\n log_model_params(model_t5)\n\n # == build text encoder (clip) ==\n model_clip = build_module(cfg.clip, MODELS, device_map=device, torch_dtype=dtype).eval().requires_grad_(False)\n log_cuda_memory(\"clip\")\n log_model_params(model_clip)\n\n # == setup optimizer ==\n optimizer = create_optimizer(model, cfg.optim)\n\n # == setup lr scheduler ==\n lr_scheduler = create_lr_scheduler(\n optimizer=optimizer,\n num_steps_per_epoch=num_steps_per_epoch,\n epochs=cfg.get(\"epochs\", 1000),\n warmup_steps=cfg.get(\"warmup_steps\", None),\n use_cosine_scheduler=cfg.get(\"use_cosine_scheduler\", False),\n )\n log_cuda_memory(\"optimizer\")\n\n # == prepare null vectors for dropout ==\n if cfg.get(\"cached_text\", False):\n null_txt = torch.load(\"/mnt/ddn/sora/tmp_load/null_t5.pt\", map_location=device)\n null_vec = torch.load(\"/mnt/ddn/sora/tmp_load/null_clip.pt\", map_location=device)\n else:\n null_txt = model_t5(\"\")\n null_vec = model_clip(\"\")\n\n # =======================================================\n # 4. distributed training preparation with colossalai\n # =======================================================\n logger.info(\"Preparing for distributed training...\")\n # == boosting ==\n torch.set_default_dtype(dtype)\n model, optimizer, _, dataloader, lr_scheduler = booster.boost(\n model=model,\n optimizer=optimizer,\n lr_scheduler=lr_scheduler,\n dataloader=dataloader,\n )\n torch.set_default_dtype(torch.float)\n logger.info(\"Boosted model for distributed training\")\n log_cuda_memory(\"boost\")\n\n # == global variables ==\n cfg_epochs = cfg.get(\"epochs\", 1000)\n log_step = acc_step = 0\n running_loss = 0.0\n timers = Timers(record_time=cfg.get(\"record_time\", False), record_barrier=cfg.get(\"record_barrier\", False))\n nsys = NsysProfiler(\n warmup_steps=cfg.get(\"nsys_warmup_steps\", 2),\n num_steps=cfg.get(\"nsys_num_steps\", 2),\n enabled=cfg.get(\"nsys\", False),\n )\n logger.info(\"Training for %s epochs with %s steps per epoch\", cfg_epochs, num_steps_per_epoch)\n\n # == resume ==\n load_master_weights = cfg.get(\"load_master_weights\", False)\n save_master_weights = cfg.get(\"save_master_weights\", False)\n start_epoch = cfg.get(\"start_epoch\", None)\n start_step = cfg.get(\"start_step\", None)\n if cfg.get(\"load\", None) is not None:\n logger.info(\"Loading checkpoint from %s\", cfg.load)\n\n lr_scheduler_to_load = lr_scheduler\n if cfg.get(\"update_warmup_steps\", False):\n lr_scheduler_to_load = None\n ret = checkpoint_io.load(\n booster,\n cfg.load,\n model=model,\n ema=ema,\n optimizer=optimizer,\n lr_scheduler=lr_scheduler_to_load,\n sampler=(\n None if start_step is not None else sampler\n ), # if specify start step, set last_micro_batch_access_index of a new sampler instead\n include_master_weights=load_master_weights,\n )\n start_epoch = start_epoch if start_epoch is not None else ret[0]\n start_step = start_step if start_step is not None else ret[1]\n logger.info(\"Loaded checkpoint %s at epoch %s step %s\", cfg.load, ret[0], ret[1])\n\n # load optimizer and scheduler will overwrite some of the hyperparameters, so we need to reset them\n set_lr(optimizer, lr_scheduler, cfg.optim.lr, cfg.get(\"initial_lr\", None))\n set_eps(optimizer, cfg.optim.eps)\n\n if cfg.get(\"update_warmup_steps\", False):\n assert (\n cfg.get(\"warmup_steps\", None) is not None\n ), \"you need to set warmup_steps in order to pass --update-warmup-steps True\"\n # set_warmup_steps(lr_scheduler, cfg.warmup_steps)\n lr_scheduler.step(start_epoch * num_steps_per_epoch + start_step)\n logger.info(\"The learning rate starts from %s\", optimizer.param_groups[0][\"lr\"])\n if start_step is not None:\n # if start step exceeds data length, go to next epoch\n if start_step > num_steps_per_epoch:\n start_epoch = (\n start_epoch + start_step // num_steps_per_epoch\n if start_epoch is not None\n else start_step // num_steps_per_epoch\n )\n start_step = start_step % num_steps_per_epoch\n else:\n start_step = 0\n sampler.set_step(start_step)\n start_epoch = start_epoch if start_epoch is not None else 0\n logger.info(\"Starting from epoch %s step %s\", start_epoch, start_step)\n\n # == sharding EMA model ==\n if ema is not None:\n model_sharding(ema)\n ema = ema.to(device)\n log_cuda_memory(\"sharding EMA\")\n\n # == warmup autoencoder ==\n if cfg.get(\"warmup_ae\", False):\n shapes = bucket_to_shapes(cfg.get(\"bucket_config\", None), batch_size=cfg.ae.batch_size)\n warmup_ae(model_ae, shapes, device, dtype)\n\n # =======================================================\n # 5. training iter\n # =======================================================\n sigma_min = cfg.get(\"sigma_min\", 1e-5)\n accumulation_steps = cfg.get(\"accumulation_steps\", 1)\n ckpt_every = cfg.get(\"ckpt_every\", 0)\n\n if cfg.get(\"is_causal_vae\", False):\n prepare_visual_condition = prepare_visual_condition_causal\n else:\n prepare_visual_condition = prepare_visual_condition_uncausal\n\n @torch.no_grad()\n def prepare_inputs(batch):\n inp = dict()\n x = batch.pop(\"video\")\n y = batch.pop(\"text\")\n bs = x.shape[0]\n\n # == encode video ==\n with nsys.range(\"encode_video\"), timers[\"encode_video\"]:\n # == prepare condition ==\n if cfg.get(\"condition_config\", None) is not None:\n # condition for i2v & v2v\n x_0, cond = prepare_visual_condition(x, cfg.condition_config, model_ae)\n cond = pack(cond, patch_size=cfg.get(\"patch_size\", 2))\n inp[\"cond\"] = cond\n else:\n if cfg.get(\"cached_video\", False):\n x_0 = batch.pop(\"video_latents\").to(device=device, dtype=dtype)\n else:\n x_0 = model_ae.encode(x)\n\n # == prepare timestep ==\n # follow SD3 time shift, shift_alpha = 1 for 256px and shift_alpha = 3 for 1024px\n shift_alpha = get_res_lin_function()((x_0.shape[-1] * x_0.shape[-2]) // 4)\n # add temporal influence\n shift_alpha *= math.sqrt(x_0.shape[-3]) # for image, T=1 so no effect\n t = torch.sigmoid(torch.randn((bs), device=device))\n t = time_shift(shift_alpha, t).to(dtype)\n\n if cfg.get(\"cached_text\", False):\n # == encode text ==\n t5_embedding = batch.pop(\"text_t5\").to(device=device, dtype=dtype)\n clip_embedding = batch.pop(\"text_clip\").to(device=device, dtype=dtype)\n with nsys.range(\"encode_text\"), timers[\"encode_text\"]:\n inp_ = prepare_ids(x_0, t5_embedding, clip_embedding)\n inp.update(inp_)\n x_0 = pack(x_0, patch_size=cfg.get(\"patch_size\", 2))\n else:\n # == encode text ==\n with nsys.range(\"encode_text\"), timers[\"encode_text\"]:\n inp_ = prepare(\n model_t5,\n model_clip,\n x_0,\n prompt=y,\n seq_align=seq_align,\n patch_size=cfg.get(\"patch_size\", 2),\n )\n inp.update(inp_)\n x_0 = pack(x_0, patch_size=cfg.get(\"patch_size\", 2))\n\n # == dropout ==\n if cfg.get(\"dropout_ratio\", None) is not None:\n cur_null_txt = null_txt\n num_pad_null_txt = inp[\"txt\"].shape[1] - cur_null_txt.shape[1]\n if num_pad_null_txt > 0:\n cur_null_txt = torch.cat([cur_null_txt] + [cur_null_txt[:, -1:]] * num_pad_null_txt, dim=1)\n inp[\"txt\"] = dropout_condition(\n cfg.dropout_ratio.get(\"t5\", 0.0),\n inp[\"txt\"],\n cur_null_txt,\n )\n inp[\"y_vec\"] = dropout_condition(\n cfg.dropout_ratio.get(\"clip\", 0.0),\n inp[\"y_vec\"],\n null_vec,\n )\n\n # == prepare noise vector ==\n x_1 = torch.randn_like(x_0, dtype=torch.float32).to(device, dtype)\n t_rev = 1 - t\n x_t = t_rev[:, None, None] * x_0 + (1 - (1 - sigma_min) * t_rev[:, None, None]) * x_1\n inp[\"img\"] = x_t\n inp[\"timesteps\"] = t.to(dtype)\n inp[\"guidance\"] = torch.full((x_t.shape[0],), cfg.get(\"guidance\", 4), device=x_t.device, dtype=x_t.dtype)\n\n return inp, x_0, x_1\n\n def run_iter(inp, x_0, x_1):\n if is_pipeline_enabled(plugin_type, plugin_config):\n inp[\"target\"] = (1 - sigma_min) * x_1 - x_0 # follow MovieGen, modify V_t accordingly\n with nsys.range(\"forward-backward\"), timers[\"forward-backward\"]:\n data_iter = iter([inp])\n if cfg.get(\"no_i2v_ref_loss\", False):\n loss_fn = (\n lambda out, input_: get_batch_loss(out, input_[\"target\"], input_.pop(\"masks\", None))\n / accumulation_steps\n )\n else:\n loss_fn = (\n lambda out, input_: F.mse_loss(out.float(), input_[\"target\"].float(), reduction=\"mean\")\n / accumulation_steps\n )\n loss = booster.execute_pipeline(data_iter, model, loss_fn, optimizer)[\"loss\"]\n loss = loss * accumulation_steps if loss is not None else loss\n loss_item = all_reduce_mean(loss.data.clone().detach())\n else:\n with nsys.range(\"forward\"), timers[\"forward\"]:\n model_pred = model(**inp) # B, T, L\n v_t = (1 - sigma_min) * x_1 - x_0\n if cfg.get(\"no_i2v_ref_loss\", False):\n loss = get_batch_loss(model_pred, v_t, inp.pop(\"masks\", None))\n else:\n loss = F.mse_loss(model_pred.float(), v_t.float(), reduction=\"mean\")\n\n loss_item = all_reduce_mean(loss.data.clone().detach()).item()\n\n # == backward & update ==\n dist.barrier()\n with nsys.range(\"backward\"), timers[\"backward\"]:\n ctx = (\n booster.no_sync(model, optimizer)\n if cfg.get(\"plugin\", \"zero2\") in (\"zero1\", \"zero1-seq\") and (step + 1) % accumulation_steps != 0\n else nullcontext()\n )\n with ctx:\n booster.backward(loss=(loss / accumulation_steps), optimizer=optimizer)\n\n with nsys.range(\"optim\"), timers[\"optim\"]:\n if (step + 1) % accumulation_steps == 0:\n booster.checkpoint_io.synchronize()\n optimizer.step()\n optimizer.zero_grad()\n if lr_scheduler is not None:\n lr_scheduler.step()\n\n # == update EMA ==\n if ema is not None:\n with nsys.range(\"update_ema\"), timers[\"update_ema\"]:\n update_ema(\n ema,\n model.unwrap(),\n optimizer=optimizer,\n decay=cfg.get(\"ema_decay\", 0.9999),\n )\n\n return loss_item\n\n # =======================================================\n # 6. training loop\n # =======================================================\n dist.barrier()\n for epoch in range(start_epoch, cfg_epochs):\n # == set dataloader to new epoch ==\n sampler.set_epoch(epoch)\n dataloader_iter = iter(dataloader)\n logger.info(\"Beginning epoch %s...\", epoch)\n\n # == training loop in an epoch ==\n with tqdm(\n enumerate(dataloader_iter, start=start_step),\n desc=f\"Epoch {epoch}\",\n disable=not is_log_process(plugin_type, plugin_config),\n initial=start_step,\n total=num_steps_per_epoch,\n ) as pbar:\n pbar_iter = iter(pbar)\n\n # prefetch one for non-blocking data loading\n def fetch_data():\n step, batch = next(pbar_iter)\n # print(f\"==debug== rank{dist.get_rank()} {dataloader_iter.get_cache_info()}\")\n pinned_video = batch[\"video\"]\n batch[\"video\"] = pinned_video.to(device, dtype, non_blocking=True)\n return batch, step, pinned_video\n\n batch_, step_, pinned_video_ = fetch_data()\n\n for _ in range(start_step, num_steps_per_epoch):\n nsys.step()\n # == load data ===\n with nsys.range(\"load_data\"), timers[\"load_data\"]:\n batch, step, pinned_video = batch_, step_, pinned_video_\n\n if step + 1 < num_steps_per_epoch:\n # only fetch new data if not last step\n batch_, step_, pinned_video_ = fetch_data()\n\n # == run iter ==\n with nsys.range(\"iter\"), timers[\"iter\"]:\n inp, x_0, x_1 = prepare_inputs(batch)\n if cache_pin_memory:\n dataloader_iter.remove_cache(pinned_video)\n loss = run_iter(inp, x_0, x_1)\n\n # == update log info ==\n if loss is not None:\n running_loss += loss\n\n # == log config ==\n global_step = epoch * num_steps_per_epoch + step\n actual_update_step = (global_step + 1) // accumulation_steps\n log_step += 1\n acc_step += 1\n\n # == logging ==\n if (global_step + 1) % accumulation_steps == 0:\n if actual_update_step % cfg.get(\"log_every\", 1) == 0:\n if is_log_process(plugin_type, plugin_config):\n avg_loss = running_loss / log_step\n # progress bar\n pbar.set_postfix(\n {\n \"loss\": avg_loss,\n \"global_grad_norm\": optimizer.get_grad_norm(),\n \"step\": step,\n \"global_step\": global_step,\n # \"actual_update_step\": actual_update_step,\n \"lr\": optimizer.param_groups[0][\"lr\"],\n }\n )\n # tensorboard\n if tb_writer is not None:\n tb_writer.add_scalar(\"loss\", loss, actual_update_step)\n # wandb\n if cfg.get(\"wandb\", False):\n wandb_dict = {\n \"iter\": global_step,\n \"acc_step\": acc_step,\n \"epoch\": epoch,\n \"loss\": loss,\n \"avg_loss\": avg_loss,\n \"lr\": optimizer.param_groups[0][\"lr\"],\n \"eps\": optimizer.param_groups[0][\"eps\"],\n \"global_grad_norm\": optimizer.get_grad_norm(), # test grad norm\n }\n if cfg.get(\"record_time\", False):\n wandb_dict.update(timers.to_dict())\n wandb.log(wandb_dict, step=actual_update_step)\n\n running_loss = 0.0\n log_step = 0\n\n # == checkpoint saving ==\n # uncomment below 3 lines to forcely clean cache\n with nsys.range(\"clean_cache\"), timers[\"clean_cache\"]:\n if ckpt_every > 0 and actual_update_step % ckpt_every == 0 and coordinator.is_master():\n subprocess.run(\"sudo drop_cache\", shell=True)\n\n with nsys.range(\"checkpoint\"), timers[\"checkpoint\"]:\n if ckpt_every > 0 and actual_update_step % ckpt_every == 0:\n # mannual garbage collection\n gc.collect()\n\n save_dir = checkpoint_io.save(\n booster,\n exp_dir,\n model=model,\n ema=ema,\n optimizer=optimizer,\n lr_scheduler=lr_scheduler,\n sampler=sampler,\n epoch=epoch,\n step=step + 1,\n global_step=global_step + 1,\n batch_size=cfg.get(\"batch_size\", None),\n lora=use_lora,\n actual_update_step=actual_update_step,\n ema_shape_dict=ema_shape_dict,\n async_io=cfg.get(\"async_io\", False),\n include_master_weights=save_master_weights,\n )\n\n if is_log_process(plugin_type, plugin_config):\n os.system(f\"chgrp -R share {save_dir}\")\n\n logger.info(\n \"Saved checkpoint at epoch %s, step %s, global_step %s to %s\",\n epoch,\n step + 1,\n actual_update_step,\n save_dir,\n )\n\n # remove old checkpoints\n rm_checkpoints(exp_dir, keep_n_latest=cfg.get(\"keep_n_latest\", -1))\n logger.info(\"Removed old checkpoints and kept %s latest ones.\", cfg.get(\"keep_n_latest\", -1))\n # uncomment below 3 lines to benchmark checkpoint\n # if ckpt_every > 0 and actual_update_step % ckpt_every == 0:\n # booster.checkpoint_io._sync_io()\n # checkpoint_io._sync_io()\n # == terminal timer ==\n if cfg.get(\"record_time\", False):\n print(timers.to_str(epoch, step))\n\n sampler.reset()\n start_step = 0\n log_cuda_max_memory(\"final\")", "creation_date": "2025-03-12T05:14:22Z", "repo": "hpcaitech/Open-Sora", "file_path": "scripts/diffusion/train.py", "stars": 26861, "label": 0} +{"function": " def prepare_inputs(batch):\n inp = dict()\n x = batch.pop(\"video\")\n y = batch.pop(\"text\")\n bs = x.shape[0]\n\n # == encode video ==\n with nsys.range(\"encode_video\"), timers[\"encode_video\"]:\n # == prepare condition ==\n if cfg.get(\"condition_config\", None) is not None:\n # condition for i2v & v2v\n x_0, cond = prepare_visual_condition(x, cfg.condition_config, model_ae)\n cond = pack(cond, patch_size=cfg.get(\"patch_size\", 2))\n inp[\"cond\"] = cond\n else:\n if cfg.get(\"cached_video\", False):\n x_0 = batch.pop(\"video_latents\").to(device=device, dtype=dtype)\n else:\n x_0 = model_ae.encode(x)\n\n # == prepare timestep ==\n # follow SD3 time shift, shift_alpha = 1 for 256px and shift_alpha = 3 for 1024px\n shift_alpha = get_res_lin_function()((x_0.shape[-1] * x_0.shape[-2]) // 4)\n # add temporal influence\n shift_alpha *= math.sqrt(x_0.shape[-3]) # for image, T=1 so no effect\n t = torch.sigmoid(torch.randn((bs), device=device))\n t = time_shift(shift_alpha, t).to(dtype)\n\n if cfg.get(\"cached_text\", False):\n # == encode text ==\n t5_embedding = batch.pop(\"text_t5\").to(device=device, dtype=dtype)\n clip_embedding = batch.pop(\"text_clip\").to(device=device, dtype=dtype)\n with nsys.range(\"encode_text\"), timers[\"encode_text\"]:\n inp_ = prepare_ids(x_0, t5_embedding, clip_embedding)\n inp.update(inp_)\n x_0 = pack(x_0, patch_size=cfg.get(\"patch_size\", 2))\n else:\n # == encode text ==\n with nsys.range(\"encode_text\"), timers[\"encode_text\"]:\n inp_ = prepare(\n model_t5,\n model_clip,\n x_0,\n prompt=y,\n seq_align=seq_align,\n patch_size=cfg.get(\"patch_size\", 2),\n )\n inp.update(inp_)\n x_0 = pack(x_0, patch_size=cfg.get(\"patch_size\", 2))\n\n # == dropout ==\n if cfg.get(\"dropout_ratio\", None) is not None:\n cur_null_txt = null_txt\n num_pad_null_txt = inp[\"txt\"].shape[1] - cur_null_txt.shape[1]\n if num_pad_null_txt > 0:\n cur_null_txt = torch.cat([cur_null_txt] + [cur_null_txt[:, -1:]] * num_pad_null_txt, dim=1)\n inp[\"txt\"] = dropout_condition(\n cfg.dropout_ratio.get(\"t5\", 0.0),\n inp[\"txt\"],\n cur_null_txt,\n )\n inp[\"y_vec\"] = dropout_condition(\n cfg.dropout_ratio.get(\"clip\", 0.0),\n inp[\"y_vec\"],\n null_vec,\n )\n\n # == prepare noise vector ==\n x_1 = torch.randn_like(x_0, dtype=torch.float32).to(device, dtype)\n t_rev = 1 - t\n x_t = t_rev[:, None, None] * x_0 + (1 - (1 - sigma_min) * t_rev[:, None, None]) * x_1\n inp[\"img\"] = x_t\n inp[\"timesteps\"] = t.to(dtype)\n inp[\"guidance\"] = torch.full((x_t.shape[0],), cfg.get(\"guidance\", 4), device=x_t.device, dtype=x_t.dtype)\n\n return inp, x_0, x_1", "creation_date": "2025-03-12T05:14:22Z", "repo": "hpcaitech/Open-Sora", "file_path": "scripts/diffusion/train.py", "stars": 26861, "label": 0} +{"function": " def __init__(\n self,\n role: str,\n raw_utterance: str,\n utterance_type: str,\n claim_to_make: Optional[str] = None,\n utterance: Optional[str] = None,\n queries: Optional[List[str]] = None,\n raw_retrieved_info: Optional[List[Information]] = None,\n cited_info: Optional[List[Information]] = None,\n ):\n self.utterance = utterance if utterance is not None else raw_utterance\n self.raw_utterance = raw_utterance\n self.role = role if \":\" not in role else role.split(\":\")[0]\n self.role_description = \"\" if \":\" not in role else role.split(\":\")[1]\n self.queries = queries if queries is not None else []\n self.raw_retrieved_info = (\n raw_retrieved_info if raw_retrieved_info is not None else []\n )\n self.cited_info = cited_info if cited_info is not None else {}\n self.utterance_type = utterance_type\n self.claim_to_make = claim_to_make if claim_to_make is not None else \"\"", "creation_date": "2024-09-25T04:44:35Z", "repo": "stanford-oval/storm", "file_path": "knowledge_storm/dataclass.py", "stars": 26518, "label": 0} +{"function": " def get_all_citation_index(self):\n citation_pattern = re.compile(r\"\\[(\\d+)\\]\")\n return list(map(int, citation_pattern.findall(self.utterance)))", "creation_date": "2024-09-25T04:44:35Z", "repo": "stanford-oval/storm", "file_path": "knowledge_storm/dataclass.py", "stars": 26518, "label": 0} +{"function": " def to_dict(self):\n raw_retrieved_info = [info.to_dict() for info in self.raw_retrieved_info]\n return {\n \"utterance\": self.utterance,\n \"raw_utterance\": self.raw_utterance,\n \"role\": self.role,\n \"role_description\": self.role_description,\n \"queries\": self.queries,\n \"utterance_type\": self.utterance_type,\n \"claim_to_make\": self.claim_to_make,\n \"raw_retrieved_info\": raw_retrieved_info,\n \"cited_info\": None,\n }", "creation_date": "2024-09-25T04:44:35Z", "repo": "stanford-oval/storm", "file_path": "knowledge_storm/dataclass.py", "stars": 26518, "label": 0} +{"function": " def from_dict(cls, conv_turn_dict: Dict):\n raw_retrieved_info = [\n Information.from_dict(info) for info in conv_turn_dict[\"raw_retrieved_info\"]\n ]\n\n return cls(\n utterance=conv_turn_dict[\"utterance\"],\n raw_utterance=conv_turn_dict[\"raw_utterance\"],\n role=f\"{conv_turn_dict['role']}: {conv_turn_dict['role_description']}\",\n queries=conv_turn_dict[\"queries\"],\n raw_retrieved_info=raw_retrieved_info,\n cited_info=None,\n utterance_type=conv_turn_dict[\"utterance_type\"],\n claim_to_make=conv_turn_dict[\"claim_to_make\"],\n )", "creation_date": "2024-09-25T04:44:35Z", "repo": "stanford-oval/storm", "file_path": "knowledge_storm/dataclass.py", "stars": 26518, "label": 0} +{"function": " def __init__(\n self,\n name: str,\n content: Optional[str] = None,\n parent: Optional[\"KnowledgeNode\"] = None,\n children: Optional[List[\"KnowledgeNode\"]] = None,\n synthesize_output: Optional[str] = None,\n need_regenerate_synthesize_output: bool = True,\n ):\n \"\"\"\n Initializes a KnowledgeNode instance.\n\n Args:\n name (str): The name of the node.\n content (list, optional): A list of information uuid. Defaults to None.\n parent (KnowledgeNode, optional): The parent node of the current node. Defaults to None.\n \"\"\"\n self.name = name\n self.content: Set[int] = set(content) if content is not None else set()\n self.children = [] if children is None else children\n self.parent = parent\n self.synthesize_output = synthesize_output\n self.need_regenerate_synthesize_output = need_regenerate_synthesize_output", "creation_date": "2024-09-25T04:44:35Z", "repo": "stanford-oval/storm", "file_path": "knowledge_storm/dataclass.py", "stars": 26518, "label": 0} +{"function": " def collect_all_content(self):\n \"\"\"\n Collects all content from the current node and its descendants.\n\n Returns:\n Set[int]: A set containing all content from the current node and its descendants.\n \"\"\"\n all_content = set(self.content)\n for child in self.children:\n all_content.update(child.collect_all_content())\n return all_content", "creation_date": "2024-09-25T04:44:35Z", "repo": "stanford-oval/storm", "file_path": "knowledge_storm/dataclass.py", "stars": 26518, "label": 0} +{"function": " def has_child(self, child_node_name: str):\n \"\"\"\n Check if the node has the child of given name.\n \"\"\"\n return child_node_name in [child.name for child in self.children]", "creation_date": "2024-09-25T04:44:35Z", "repo": "stanford-oval/storm", "file_path": "knowledge_storm/dataclass.py", "stars": 26518, "label": 0} +{"function": " def add_child(self, child_node_name: str, duplicate_handling: str = \"skip\"):\n \"\"\"\n Adds a child node to the current node.\n duplicate_handling (str): How to handle duplicate nodes. Options are \"skip\", \"none\", and \"raise error\".\n \"\"\"\n if self.has_child(child_node_name):\n if duplicate_handling == \"skip\":\n for child in self.children:\n if child.name == child_node_name:\n return child\n elif duplicate_handling == \"raise error\":\n raise Exception(\n f\"Insert node error. Node {child_node_name} already exists under its parent node {self.name}.\"\n )\n child_node = KnowledgeNode(name=child_node_name, parent=self)\n self.children.append(child_node)\n return child_node", "creation_date": "2024-09-25T04:44:35Z", "repo": "stanford-oval/storm", "file_path": "knowledge_storm/dataclass.py", "stars": 26518, "label": 0} +{"function": " def get_parent(self):\n \"\"\"\n Returns the parent node of the current node.\n\n Returns:\n KnowledgeNode: The parent node of the current node.\n \"\"\"\n return self.parent", "creation_date": "2024-09-25T04:44:35Z", "repo": "stanford-oval/storm", "file_path": "knowledge_storm/dataclass.py", "stars": 26518, "label": 0} +{"function": " def get_children(self):\n \"\"\"\n Returns the children of the current node.\n\n Returns:\n list: A list of child KnowledgeNode instances.\n \"\"\"\n return self.children", "creation_date": "2024-09-25T04:44:35Z", "repo": "stanford-oval/storm", "file_path": "knowledge_storm/dataclass.py", "stars": 26518, "label": 0} +{"function": "def initialize() -> SessionVariables:\n \"\"\"Initialize app logic.\"\"\"\n if \"session_variables\" not in st.session_state:\n st.set_page_config(\n layout=\"wide\",\n initial_sidebar_state=\"collapsed\",\n page_title=\"GraphRAG\",\n )\n sv = SessionVariables()\n datasets = load_dataset_listing()\n sv.datasets.value = datasets\n sv.dataset.value = (\n st.query_params[\"dataset\"].lower()\n if \"dataset\" in st.query_params\n else datasets[0].key\n )\n load_dataset(sv.dataset.value, sv)\n st.session_state[\"session_variables\"] = sv\n return st.session_state[\"session_variables\"]", "creation_date": "2025-04-07T17:59:02Z", "repo": "microsoft/graphrag", "file_path": "unified-search-app/app/app_logic.py", "stars": 26458, "label": 0} +{"function": "def load_dataset(dataset: str, sv: SessionVariables):\n \"\"\"Load dataset from the dropdown.\"\"\"\n sv.dataset.value = dataset\n sv.dataset_config.value = next(\n (d for d in sv.datasets.value if d.key == dataset), None\n )\n if sv.dataset_config.value is not None:\n sv.datasource.value = create_datasource(f\"{sv.dataset_config.value.path}\") # type: ignore\n sv.graphrag_config.value = sv.datasource.value.read_settings(\"settings.yaml\")\n load_knowledge_model(sv)", "creation_date": "2025-04-07T17:59:02Z", "repo": "microsoft/graphrag", "file_path": "unified-search-app/app/app_logic.py", "stars": 26458, "label": 0} +{"function": "def dataset_name(key: str, sv: SessionVariables) -> str:\n \"\"\"Get dataset name.\"\"\"\n return next((d for d in sv.datasets.value if d.key == key), None).name # type: ignore", "creation_date": "2025-04-07T17:59:02Z", "repo": "microsoft/graphrag", "file_path": "unified-search-app/app/app_logic.py", "stars": 26458, "label": 0} +{"function": "async def run_all_searches(query: str, sv: SessionVariables) -> list[SearchResult]:\n \"\"\"Run all search engines and return the results.\"\"\"\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n tasks = []\n if sv.include_drift_search.value:\n tasks.append(\n run_drift_search(\n query=query,\n sv=sv,\n )\n )\n\n if sv.include_basic_rag.value:\n tasks.append(\n run_basic_search(\n query=query,\n sv=sv,\n )\n )\n if sv.include_local_search.value:\n tasks.append(\n run_local_search(\n query=query,\n sv=sv,\n )\n )\n if sv.include_global_search.value:\n tasks.append(\n run_global_search(\n query=query,\n sv=sv,\n )\n )\n\n return await asyncio.gather(*tasks)", "creation_date": "2025-04-07T17:59:02Z", "repo": "microsoft/graphrag", "file_path": "unified-search-app/app/app_logic.py", "stars": 26458, "label": 0} +{"function": "async def run_generate_questions(query: str, sv: SessionVariables):\n \"\"\"Run global search to generate questions for the dataset.\"\"\"\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n tasks = []\n\n tasks.append(\n run_global_search_question_generation(\n query=query,\n sv=sv,\n )\n )\n\n return await asyncio.gather(*tasks)", "creation_date": "2025-04-07T17:59:02Z", "repo": "microsoft/graphrag", "file_path": "unified-search-app/app/app_logic.py", "stars": 26458, "label": 0} +{"function": "async def run_global_search_question_generation(\n query: str,\n sv: SessionVariables,\n) -> SearchResult:\n \"\"\"Run global search question generation process.\"\"\"\n empty_context_data: dict[str, pd.DataFrame] = {}\n\n response, context_data = await api.global_search(\n config=sv.graphrag_config.value,\n entities=sv.entities.value,\n communities=sv.communities.value,\n community_reports=sv.community_reports.value,\n dynamic_community_selection=True,\n response_type=\"Single paragraph\",\n community_level=sv.dataset_config.value.community_level,\n query=query,\n )\n\n # display response and reference context to UI\n return SearchResult(\n search_type=SearchType.Global,\n response=str(response),\n context=context_data if isinstance(context_data, dict) else empty_context_data,\n )", "creation_date": "2025-04-07T17:59:02Z", "repo": "microsoft/graphrag", "file_path": "unified-search-app/app/app_logic.py", "stars": 26458, "label": 0} +{"function": "async def run_local_search(\n query: str,\n sv: SessionVariables,\n) -> SearchResult:\n \"\"\"Run local search.\"\"\"\n print(f\"Local search query: {query}\") # noqa T201\n\n # build local search engine\n response_placeholder = st.session_state[\n f\"{SearchType.Local.value.lower()}_response_placeholder\"\n ]\n response_container = st.session_state[f\"{SearchType.Local.value.lower()}_container\"]\n\n with response_placeholder, st.spinner(\"Generating answer using local search...\"):\n empty_context_data: dict[str, pd.DataFrame] = {}\n\n response, context_data = await api.local_search(\n config=sv.graphrag_config.value,\n communities=sv.communities.value,\n entities=sv.entities.value,\n community_reports=sv.community_reports.value,\n text_units=sv.text_units.value,\n relationships=sv.relationships.value,\n covariates=sv.covariates.value,\n community_level=sv.dataset_config.value.community_level,\n response_type=\"Multiple Paragraphs\",\n query=query,\n )\n\n print(f\"Local Response: {response}\") # noqa T201\n print(f\"Context data: {context_data}\") # noqa T201\n\n # display response and reference context to UI\n search_result = SearchResult(\n search_type=SearchType.Local,\n response=str(response),\n context=context_data if isinstance(context_data, dict) else empty_context_data,\n )\n\n display_search_result(\n container=response_container, result=search_result, stats=None\n )\n\n if \"response_lengths\" not in st.session_state:\n st.session_state.response_lengths = []\n\n st.session_state[\"response_lengths\"].append({\n \"result\": search_result,\n \"search\": SearchType.Local.value.lower(),\n })\n\n return search_result", "creation_date": "2025-04-07T17:59:02Z", "repo": "microsoft/graphrag", "file_path": "unified-search-app/app/app_logic.py", "stars": 26458, "label": 0} +{"function": "async def run_global_search(query: str, sv: SessionVariables) -> SearchResult:\n \"\"\"Run global search.\"\"\"\n print(f\"Global search query: {query}\") # noqa T201\n\n # build global search engine\n response_placeholder = st.session_state[\n f\"{SearchType.Global.value.lower()}_response_placeholder\"\n ]\n response_container = st.session_state[\n f\"{SearchType.Global.value.lower()}_container\"\n ]\n\n response_placeholder.empty()\n with response_placeholder, st.spinner(\"Generating answer using global search...\"):\n empty_context_data: dict[str, pd.DataFrame] = {}\n\n response, context_data = await api.global_search(\n config=sv.graphrag_config.value,\n entities=sv.entities.value,\n communities=sv.communities.value,\n community_reports=sv.community_reports.value,\n dynamic_community_selection=False,\n response_type=\"Multiple Paragraphs\",\n community_level=sv.dataset_config.value.community_level,\n query=query,\n )\n\n print(f\"Context data: {context_data}\") # noqa T201\n print(f\"Global Response: {response}\") # noqa T201\n\n # display response and reference context to UI\n search_result = SearchResult(\n search_type=SearchType.Global,\n response=str(response),\n context=context_data if isinstance(context_data, dict) else empty_context_data,\n )\n\n display_search_result(\n container=response_container, result=search_result, stats=None\n )\n\n if \"response_lengths\" not in st.session_state:\n st.session_state.response_lengths = []\n\n st.session_state[\"response_lengths\"].append({\n \"result\": search_result,\n \"search\": SearchType.Global.value.lower(),\n })\n\n return search_result", "creation_date": "2025-04-07T17:59:02Z", "repo": "microsoft/graphrag", "file_path": "unified-search-app/app/app_logic.py", "stars": 26458, "label": 0} +{"function": "async def run_drift_search(\n query: str,\n sv: SessionVariables,\n) -> SearchResult:\n \"\"\"Run drift search.\"\"\"\n print(f\"Drift search query: {query}\") # noqa T201\n\n # build drift search engine\n response_placeholder = st.session_state[\n f\"{SearchType.Drift.value.lower()}_response_placeholder\"\n ]\n response_container = st.session_state[f\"{SearchType.Drift.value.lower()}_container\"]\n\n with response_placeholder, st.spinner(\"Generating answer using drift search...\"):\n empty_context_data: dict[str, pd.DataFrame] = {}\n\n response, context_data = await api.drift_search(\n config=sv.graphrag_config.value,\n entities=sv.entities.value,\n communities=sv.communities.value,\n community_reports=sv.community_reports.value,\n text_units=sv.text_units.value,\n relationships=sv.relationships.value,\n community_level=sv.dataset_config.value.community_level,\n response_type=\"Multiple Paragraphs\",\n query=query,\n )\n\n print(f\"Drift Response: {response}\") # noqa T201\n print(f\"Context data: {context_data}\") # noqa T201\n\n # display response and reference context to UI\n search_result = SearchResult(\n search_type=SearchType.Drift,\n response=str(response),\n context=context_data if isinstance(context_data, dict) else empty_context_data,\n )\n\n display_search_result(\n container=response_container, result=search_result, stats=None\n )\n\n if \"response_lengths\" not in st.session_state:\n st.session_state.response_lengths = []\n\n st.session_state[\"response_lengths\"].append({\n \"result\": None,\n \"search\": SearchType.Drift.value.lower(),\n })\n\n return search_result", "creation_date": "2025-04-07T17:59:02Z", "repo": "microsoft/graphrag", "file_path": "unified-search-app/app/app_logic.py", "stars": 26458, "label": 0} +{"function": "async def run_basic_search(\n query: str,\n sv: SessionVariables,\n) -> SearchResult:\n \"\"\"Run basic search.\"\"\"\n print(f\"Basic search query: {query}\") # noqa T201\n\n # build local search engine\n response_placeholder = st.session_state[\n f\"{SearchType.Basic.value.lower()}_response_placeholder\"\n ]\n response_container = st.session_state[f\"{SearchType.Basic.value.lower()}_container\"]\n\n with response_placeholder, st.spinner(\"Generating answer using basic RAG...\"):\n empty_context_data: dict[str, pd.DataFrame] = {}\n\n response, context_data = await api.basic_search(\n config=sv.graphrag_config.value,\n text_units=sv.text_units.value,\n query=query,\n )\n\n print(f\"Basic Response: {response}\") # noqa T201\n print(f\"Context data: {context_data}\") # noqa T201\n\n # display response and reference context to UI\n search_result = SearchResult(\n search_type=SearchType.Basic,\n response=str(response),\n context=context_data if isinstance(context_data, dict) else empty_context_data,\n )\n\n display_search_result(\n container=response_container, result=search_result, stats=None\n )\n\n if \"response_lengths\" not in st.session_state:\n st.session_state.response_lengths = []\n\n st.session_state[\"response_lengths\"].append({\n \"search\": SearchType.Basic.value.lower(),\n \"result\": search_result,\n })\n\n return search_result", "creation_date": "2025-04-07T17:59:02Z", "repo": "microsoft/graphrag", "file_path": "unified-search-app/app/app_logic.py", "stars": 26458, "label": 0} +{"function": " def setUp(self):\n self.test_db = cache.init_test_db()", "creation_date": "2024-12-20T06:38:52Z", "repo": "Byaidu/PDFMathTranslate", "file_path": "test/test_cache.py", "stars": 25673, "label": 0} +{"function": " def tearDown(self):\n # Clean up\n cache.clean_test_db(self.test_db)", "creation_date": "2024-12-20T06:38:52Z", "repo": "Byaidu/PDFMathTranslate", "file_path": "test/test_cache.py", "stars": 25673, "label": 0} +{"function": " def test_basic_set_get(self):\n \"\"\"Test basic set and get operations\"\"\"\n cache_instance = cache.TranslationCache(\"test_engine\")\n\n # Test get with non-existent entry\n result = cache_instance.get(\"hello\")\n self.assertIsNone(result)\n\n # Test set and get\n cache_instance.set(\"hello\", \"\u4f60\u597d\")\n result = cache_instance.get(\"hello\")\n self.assertEqual(result, \"\u4f60\u597d\")", "creation_date": "2024-12-20T06:38:52Z", "repo": "Byaidu/PDFMathTranslate", "file_path": "test/test_cache.py", "stars": 25673, "label": 0} +{"function": " def test_cache_overwrite(self):\n \"\"\"Test that cache entries can be overwritten\"\"\"\n cache_instance = cache.TranslationCache(\"test_engine\")\n\n # Set initial translation\n cache_instance.set(\"hello\", \"\u4f60\u597d\")\n\n # Overwrite with new translation\n cache_instance.set(\"hello\", \"\u60a8\u597d\")\n\n # Verify the new translation is returned\n result = cache_instance.get(\"hello\")\n self.assertEqual(result, \"\u60a8\u597d\")", "creation_date": "2024-12-20T06:38:52Z", "repo": "Byaidu/PDFMathTranslate", "file_path": "test/test_cache.py", "stars": 25673, "label": 0} +{"function": " def test_non_string_params(self):\n \"\"\"Test that non-string parameters are automatically converted to JSON\"\"\"\n params = {\"model\": \"gpt-3.5\", \"temperature\": 0.7}\n cache_instance = cache.TranslationCache(\"test_engine\", params)\n\n # Test that params are converted to JSON string internally\n cache_instance.set(\"hello\", \"\u4f60\u597d\")\n result = cache_instance.get(\"hello\")\n self.assertEqual(result, \"\u4f60\u597d\")\n\n # Test with different param types\n array_params = [\"param1\", \"param2\"]\n cache_instance2 = cache.TranslationCache(\"test_engine\", array_params)\n cache_instance2.set(\"hello\", \"\u4f60\u597d2\")\n self.assertEqual(cache_instance2.get(\"hello\"), \"\u4f60\u597d2\")\n\n # Test with nested structures\n nested_params = {\"options\": {\"temp\": 0.8, \"models\": [\"a\", \"b\"]}}\n cache_instance3 = cache.TranslationCache(\"test_engine\", nested_params)\n cache_instance3.set(\"hello\", \"\u4f60\u597d3\")\n self.assertEqual(cache_instance3.get(\"hello\"), \"\u4f60\u597d3\")", "creation_date": "2024-12-20T06:38:52Z", "repo": "Byaidu/PDFMathTranslate", "file_path": "test/test_cache.py", "stars": 25673, "label": 0} +{"function": " def test_engine_distinction(self):\n \"\"\"Test that cache distinguishes between different translation engines\"\"\"\n cache1 = cache.TranslationCache(\"engine1\")\n cache2 = cache.TranslationCache(\"engine2\")\n\n # Set same text with different engines\n cache1.set(\"hello\", \"\u4f60\u597d 1\")\n cache2.set(\"hello\", \"\u4f60\u597d 2\")\n\n # Verify each engine gets its own translation\n self.assertEqual(cache1.get(\"hello\"), \"\u4f60\u597d 1\")\n self.assertEqual(cache2.get(\"hello\"), \"\u4f60\u597d 2\")", "creation_date": "2024-12-20T06:38:52Z", "repo": "Byaidu/PDFMathTranslate", "file_path": "test/test_cache.py", "stars": 25673, "label": 0} +{"function": " def test_params_distinction(self):\n \"\"\"Test that cache distinguishes between different engine parameters\"\"\"\n params1 = {\"param\": \"value1\"}\n params2 = {\"param\": \"value2\"}\n cache1 = cache.TranslationCache(\"test_engine\", params1)\n cache2 = cache.TranslationCache(\"test_engine\", params2)\n\n # Set same text with different parameters\n cache1.set(\"hello\", \"\u4f60\u597d 1\")\n cache2.set(\"hello\", \"\u4f60\u597d 2\")\n\n # Verify each parameter set gets its own translation\n self.assertEqual(cache1.get(\"hello\"), \"\u4f60\u597d 1\")\n self.assertEqual(cache2.get(\"hello\"), \"\u4f60\u597d 2\")", "creation_date": "2024-12-20T06:38:52Z", "repo": "Byaidu/PDFMathTranslate", "file_path": "test/test_cache.py", "stars": 25673, "label": 0} +{"function": " def test_consistent_param_serialization(self):\n \"\"\"Test that dictionary parameters are consistently serialized regardless of key order\"\"\"\n # Test simple dictionary\n params1 = {\"b\": 1, \"a\": 2}\n params2 = {\"a\": 2, \"b\": 1}\n cache1 = cache.TranslationCache(\"test_engine\", params1)\n cache2 = cache.TranslationCache(\"test_engine\", params2)\n self.assertEqual(cache1.translate_engine_params, cache2.translate_engine_params)\n\n # Test nested dictionary\n params1 = {\"outer2\": {\"inner2\": 2, \"inner1\": 1}, \"outer1\": 3}\n params2 = {\"outer1\": 3, \"outer2\": {\"inner1\": 1, \"inner2\": 2}}\n cache1 = cache.TranslationCache(\"test_engine\", params1)\n cache2 = cache.TranslationCache(\"test_engine\", params2)\n self.assertEqual(cache1.translate_engine_params, cache2.translate_engine_params)\n\n # Test dictionary with list of dictionaries\n params1 = {\"b\": [{\"y\": 1, \"x\": 2}], \"a\": 3}\n params2 = {\"a\": 3, \"b\": [{\"x\": 2, \"y\": 1}]}\n cache1 = cache.TranslationCache(\"test_engine\", params1)\n cache2 = cache.TranslationCache(\"test_engine\", params2)\n self.assertEqual(cache1.translate_engine_params, cache2.translate_engine_params)\n\n # Test that different values still produce different results\n params1 = {\"a\": 1, \"b\": 2}\n params2 = {\"a\": 2, \"b\": 1}\n cache1 = cache.TranslationCache(\"test_engine\", params1)\n cache2 = cache.TranslationCache(\"test_engine\", params2)\n self.assertNotEqual(\n cache1.translate_engine_params, cache2.translate_engine_params\n )", "creation_date": "2024-12-20T06:38:52Z", "repo": "Byaidu/PDFMathTranslate", "file_path": "test/test_cache.py", "stars": 25673, "label": 0} +{"function": " def test_cache_with_sorted_params(self):\n \"\"\"Test that cache works correctly with sorted parameters\"\"\"\n params1 = {\"b\": [{\"y\": 1, \"x\": 2}], \"a\": 3}\n params2 = {\"a\": 3, \"b\": [{\"x\": 2, \"y\": 1}]}\n\n # Both caches should work with the same key\n cache1 = cache.TranslationCache(\"test_engine\", params1)\n cache1.set(\"hello\", \"\u4f60\u597d\")\n\n cache2 = cache.TranslationCache(\"test_engine\", params2)\n self.assertEqual(cache2.get(\"hello\"), \"\u4f60\u597d\")", "creation_date": "2024-12-20T06:38:52Z", "repo": "Byaidu/PDFMathTranslate", "file_path": "test/test_cache.py", "stars": 25673, "label": 0} +{"function": " def test_append_params(self):\n \"\"\"Test the append_params method\"\"\"\n cache_instance = cache.TranslationCache(\"test_engine\", {\"initial\": \"value\"})\n\n # Test appending new parameter\n cache_instance.add_params(\"new_param\", \"new_value\")\n self.assertEqual(\n cache_instance.params, {\"initial\": \"value\", \"new_param\": \"new_value\"}\n )\n\n # Test that cache with appended params works correctly\n cache_instance.set(\"hello\", \"\u4f60\u597d\")\n self.assertEqual(cache_instance.get(\"hello\"), \"\u4f60\u597d\")\n\n # Test overwriting existing parameter\n cache_instance.add_params(\"initial\", \"new_value\")\n self.assertEqual(\n cache_instance.params, {\"initial\": \"new_value\", \"new_param\": \"new_value\"}\n )\n\n # Cache should work with updated params\n cache_instance.set(\"hello2\", \"\u4f60\u597d2\")\n self.assertEqual(cache_instance.get(\"hello2\"), \"\u4f60\u597d2\")", "creation_date": "2024-12-20T06:38:52Z", "repo": "Byaidu/PDFMathTranslate", "file_path": "test/test_cache.py", "stars": 25673, "label": 0} +{"function": "def deps_list(*pkgs):\n return [deps[pkg] for pkg in pkgs]", "creation_date": "2025-01-24T16:50:13Z", "repo": "huggingface/open-r1", "file_path": "setup.py", "stars": 25023, "label": 0} +{"function": " def test_get_reward_funcs(self):\n \"\"\"Test get_reward_funcs with various reward functions.\"\"\"\n reward_names = [\n \"accuracy\",\n \"format\",\n \"reasoning_steps\",\n \"cosine\",\n \"repetition_penalty\",\n \"length\",\n \"tag_count\",\n \"code\",\n \"ioi_code\",\n \"code_format\",\n \"binary_code\",\n ]\n reward_func_names = [\n \"accuracy_reward\",\n \"format_reward\",\n \"reasoning_steps_reward\",\n \"cosine_scaled_reward\",\n \"repetition_penalty_reward\",\n \"len_reward\",\n \"tag_count_reward\",\n \"code_reward\",\n \"ioi_code_reward\",\n \"code_format_reward\",\n \"binary_code_reward\",\n ]\n\n args = GRPOScriptArguments(\n dataset_name=\"dummy\",\n reward_funcs=reward_names,\n )\n\n reward_funcs = get_reward_funcs(args)\n self.assertEqual(len(reward_funcs), 11)\n for func_name, func in zip(reward_func_names, reward_funcs):\n self.assertEqual(func_name, func.__name__)", "creation_date": "2025-02-06T19:10:05Z", "repo": "huggingface/open-r1", "file_path": "tests/test_rewards.py", "stars": 25023, "label": 0} +{"function": " def test_accuracy_reward_correct_answer(self):\n \"\"\"Test accuracy_reward with a correct answer.\"\"\"\n completion = [[{\"content\": r\"\\boxed{\\frac{63}{400}}\"}]]\n solution = [r\"\\frac{63}{400}\"]\n rewards = accuracy_reward(completion, solution)\n self.assertEqual(rewards[0], 1.0)", "creation_date": "2025-02-06T19:10:05Z", "repo": "huggingface/open-r1", "file_path": "tests/test_rewards.py", "stars": 25023, "label": 0} +{"function": " def test_accuracy_reward_wrong_answer(self):\n \"\"\"Test accuracy_reward with an incorrect answer.\"\"\"\n completion = [[{\"content\": r\"\\boxed{\\frac{64}{400}}\"}]]\n solution = [r\"\\frac{63}{400}\"]\n rewards = accuracy_reward(completion, solution)\n self.assertEqual(rewards[0], 0.0)", "creation_date": "2025-02-06T19:10:05Z", "repo": "huggingface/open-r1", "file_path": "tests/test_rewards.py", "stars": 25023, "label": 0} +{"function": " def test_accuracy_reward_wrong_answer_no_latex(self):\n \"\"\"Test accuracy_reward with an incorrect answer and gold solution with no latex.\"\"\"\n completion = [[{\"content\": r\"\\boxed{3}\"}]]\n solution = [\"6\"]\n rewards = accuracy_reward(completion, solution)\n self.assertEqual(rewards[0], 0.0)", "creation_date": "2025-02-06T19:10:05Z", "repo": "huggingface/open-r1", "file_path": "tests/test_rewards.py", "stars": 25023, "label": 0} +{"function": " def test_format_reward_correct(self):\n \"\"\"Test format_reward with correct format.\"\"\"\n completion = [[{\"content\": \"\\nSome reasoning\\n\\n\\nThe answer\\n\"}]]\n rewards = format_reward(completion)\n self.assertEqual(rewards[0], 1.0)", "creation_date": "2025-02-06T19:10:05Z", "repo": "huggingface/open-r1", "file_path": "tests/test_rewards.py", "stars": 25023, "label": 0} +{"function": " def test_format_reward_incorrect(self):\n \"\"\"Test format_reward with incorrect format.\"\"\"\n incorrect_formats = [\n \"Only thinking\",\n \"Only answer\",\n \"No tags at all\",\n \"Missing closingMissing closing\",\n \"Wrong orderWrong order\",\n ]\n\n for fmt in incorrect_formats:\n completion = [[{\"content\": fmt}]]\n rewards = format_reward(completion)\n self.assertEqual(rewards[0], 0.0)", "creation_date": "2025-02-06T19:10:05Z", "repo": "huggingface/open-r1", "file_path": "tests/test_rewards.py", "stars": 25023, "label": 0} +{"function": " def test_reasoning_steps_reward(self):\n \"\"\"Test reasoning_steps_reward with various formats.\"\"\"\n test_cases = [\n # Full credit cases (3 or more steps)\n (\"Step 1: First step\\nStep 2: Second step\\nStep 3: Third step\", 1.0),\n (\"First, we do this.\\nSecond, we do that.\\nFinally, we conclude.\", 1.0),\n # Partial credit cases (less than 3 steps)\n (\"Step 1: Only step\", 1 / 3),\n (\"First, we do this.\\nFinally, we conclude.\", 2 / 3),\n # No credit case\n (\"Just plain text without any clear steps\", 0.0),\n ]\n\n for content, expected_reward in test_cases:\n completion = [[{\"content\": content}]]\n rewards = reasoning_steps_reward(completion)\n self.assertAlmostEqual(rewards[0], expected_reward)", "creation_date": "2025-02-06T19:10:05Z", "repo": "huggingface/open-r1", "file_path": "tests/test_rewards.py", "stars": 25023, "label": 0} +{"function": " def test_multiple_completions(self):\n \"\"\"Test handling multiple completions at once.\"\"\"\n completions = [\n [{\"content\": r\"\\boxed{\\frac{63}{400}}\"}],\n [{\"content\": r\"\\boxed{\\frac{64}{400}}\"}],\n ]\n solutions = [r\"\\frac{63}{400}\", r\"\\frac{63}{400}\"]\n\n rewards = accuracy_reward(completions, solutions)\n self.assertEqual(len(rewards), 2)\n self.assertEqual(rewards[0], 1.0)\n self.assertEqual(rewards[1], 0.0)", "creation_date": "2025-02-06T19:10:05Z", "repo": "huggingface/open-r1", "file_path": "tests/test_rewards.py", "stars": 25023, "label": 0} +{"function": " def test_cosine_scaled_reward(self):\n \"\"\"Test cosine_scaled_reward with various cases.\"\"\"\n # Test parameters\n test_params = {\n \"min_value_wrong\": -1.0,\n \"max_value_wrong\": -0.5,\n \"min_value_correct\": 0.5,\n \"max_value_correct\": 1.0,\n \"max_len\": 100,\n }\n\n test_cases = [\n # Correct answers with different lengths\n (\n r\"\\boxed{\\frac{63}{400}}\",\n r\"\\frac{63}{400}\",\n 20,\n 0.943,\n ), # Short correct answer\n (\n r\"\\boxed{\\frac{63}{400}}\",\n r\"\\frac{63}{400}\",\n 80,\n 0.547,\n ), # Long correct answer\n # Wrong answers with different lengths\n (\n r\"\\boxed{\\frac{64}{400}}\",\n r\"\\frac{63}{400}\",\n 20,\n -0.942,\n ), # Short wrong answer\n (\n r\"\\boxed{\\frac{64}{400}}\",\n r\"\\frac{63}{400}\",\n 80,\n -0.547,\n ), # Long wrong answer\n ]\n\n for content, solution, content_len, expected_reward in test_cases:\n # Pad content to desired length\n padded_content = content + \" \" * (content_len - len(content))\n completion = [[{\"content\": padded_content}]]\n\n rewards = get_cosine_scaled_reward(**test_params)(completion, [solution])\n self.assertAlmostEqual(rewards[0], expected_reward, places=2)", "creation_date": "2025-02-06T19:10:05Z", "repo": "huggingface/open-r1", "file_path": "tests/test_rewards.py", "stars": 25023, "label": 0} +{"function": "def get_models(name: str, device: torch.device, offload: bool, is_schnell: bool):\n t5 = load_t5(device, max_length=256 if is_schnell else 512)\n clip = load_clip(device)\n model = load_flow_model(name, device=\"cpu\" if offload else device)\n ae = load_ae(name, device=\"cpu\" if offload else device)\n nsfw_classifier = pipeline(\"image-classification\", model=\"Falconsai/nsfw_image_detection\", device=device)\n return model, ae, t5, clip, nsfw_classifier", "creation_date": "2024-08-02T10:10:46Z", "repo": "black-forest-labs/flux", "file_path": "demo_gr.py", "stars": 23428, "label": 0} +{"function": "def create_demo(\n model_name: str,\n device: str = \"cuda\" if torch.cuda.is_available() else \"cpu\",\n offload: bool = False,\n track_usage: bool = False,\n):\n generator = FluxGenerator(model_name, device, offload, track_usage)\n is_schnell = model_name == \"flux-schnell\"\n\n with gr.Blocks() as demo:\n gr.Markdown(f\"# Flux Image Generation Demo - Model: {model_name}\")\n\n with gr.Row():\n with gr.Column():\n prompt = gr.Textbox(\n label=\"Prompt\",\n value='a photo of a forest with mist swirling around the tree trunks. The word \"FLUX\" is painted over it in big, red brush strokes with visible texture',\n )\n do_img2img = gr.Checkbox(label=\"Image to Image\", value=False, interactive=not is_schnell)\n init_image = gr.Image(label=\"Input Image\", visible=False)\n image2image_strength = gr.Slider(\n 0.0, 1.0, 0.8, step=0.1, label=\"Noising strength\", visible=False\n )\n\n with gr.Accordion(\"Advanced Options\", open=False):\n width = gr.Slider(128, 8192, 1360, step=16, label=\"Width\")\n height = gr.Slider(128, 8192, 768, step=16, label=\"Height\")\n num_steps = gr.Slider(1, 50, 4 if is_schnell else 50, step=1, label=\"Number of steps\")\n guidance = gr.Slider(\n 1.0, 10.0, 3.5, step=0.1, label=\"Guidance\", interactive=not is_schnell\n )\n seed = gr.Textbox(-1, label=\"Seed (-1 for random)\")\n add_sampling_metadata = gr.Checkbox(\n label=\"Add sampling parameters to metadata?\", value=True\n )\n\n generate_btn = gr.Button(\"Generate\")\n\n with gr.Column():\n output_image = gr.Image(label=\"Generated Image\")\n seed_output = gr.Number(label=\"Used Seed\")\n warning_text = gr.Textbox(label=\"Warning\", visible=False)\n download_btn = gr.File(label=\"Download full-resolution\")\n\n def update_img2img(do_img2img):\n return {\n init_image: gr.update(visible=do_img2img),\n image2image_strength: gr.update(visible=do_img2img),\n }\n\n do_img2img.change(update_img2img, do_img2img, [init_image, image2image_strength])\n\n generate_btn.click(\n fn=generator.generate_image,\n inputs=[\n width,\n height,\n num_steps,\n guidance,\n seed,\n prompt,\n init_image,\n image2image_strength,\n add_sampling_metadata,\n ],\n outputs=[output_image, seed_output, download_btn, warning_text],\n )\n\n return demo", "creation_date": "2024-08-02T10:10:46Z", "repo": "black-forest-labs/flux", "file_path": "demo_gr.py", "stars": 23428, "label": 0} +{"function": " def __init__(self, model_name: str, device: str, offload: bool, track_usage: bool):\n self.device = torch.device(device)\n self.offload = offload\n self.model_name = model_name\n self.is_schnell = model_name == \"flux-schnell\"\n self.model, self.ae, self.t5, self.clip, self.nsfw_classifier = get_models(\n model_name,\n device=self.device,\n offload=self.offload,\n is_schnell=self.is_schnell,\n )\n self.track_usage = track_usage", "creation_date": "2024-08-02T10:10:46Z", "repo": "black-forest-labs/flux", "file_path": "demo_gr.py", "stars": 23428, "label": 0} +{"function": " def generate_image(\n self,\n width,\n height,\n num_steps,\n guidance,\n seed,\n prompt,\n init_image=None,\n image2image_strength=0.0,\n add_sampling_metadata=True,\n ):\n seed = int(seed)\n if seed == -1:\n seed = None\n\n opts = SamplingOptions(\n prompt=prompt,\n width=width,\n height=height,\n num_steps=num_steps,\n guidance=guidance,\n seed=seed,\n )\n\n if opts.seed is None:\n opts.seed = torch.Generator(device=\"cpu\").seed()\n print(f\"Generating '{opts.prompt}' with seed {opts.seed}\")\n t0 = time.perf_counter()\n\n if init_image is not None:\n if isinstance(init_image, np.ndarray):\n init_image = torch.from_numpy(init_image).permute(2, 0, 1).float() / 255.0\n init_image = init_image.unsqueeze(0)\n init_image = init_image.to(self.device)\n init_image = torch.nn.functional.interpolate(init_image, (opts.height, opts.width))\n if self.offload:\n self.ae.encoder.to(self.device)\n init_image = self.ae.encode(init_image.to())\n if self.offload:\n self.ae = self.ae.cpu()\n torch.cuda.empty_cache()\n\n # prepare input\n x = get_noise(\n 1,\n opts.height,\n opts.width,\n device=self.device,\n dtype=torch.bfloat16,\n seed=opts.seed,\n )\n timesteps = get_schedule(\n opts.num_steps,\n x.shape[-1] * x.shape[-2] // 4,\n shift=(not self.is_schnell),\n )\n if init_image is not None:\n t_idx = int((1 - image2image_strength) * num_steps)\n t = timesteps[t_idx]\n timesteps = timesteps[t_idx:]\n x = t * x + (1.0 - t) * init_image.to(x.dtype)\n\n if self.offload:\n self.t5, self.clip = self.t5.to(self.device), self.clip.to(self.device)\n inp = prepare(t5=self.t5, clip=self.clip, img=x, prompt=opts.prompt)\n\n # offload TEs to CPU, load model to gpu\n if self.offload:\n self.t5, self.clip = self.t5.cpu(), self.clip.cpu()\n torch.cuda.empty_cache()\n self.model = self.model.to(self.device)\n\n # denoise initial noise\n x = denoise(self.model, **inp, timesteps=timesteps, guidance=opts.guidance)\n\n # offload model, load autoencoder to gpu\n if self.offload:\n self.model.cpu()\n torch.cuda.empty_cache()\n self.ae.decoder.to(x.device)\n\n # decode latents to pixel space\n x = unpack(x.float(), opts.height, opts.width)\n with torch.autocast(device_type=self.device.type, dtype=torch.bfloat16):\n x = self.ae.decode(x)\n\n if self.offload:\n self.ae.decoder.cpu()\n torch.cuda.empty_cache()\n\n t1 = time.perf_counter()\n\n print(f\"Done in {t1 - t0:.1f}s.\")\n # bring into PIL format\n x = x.clamp(-1, 1)\n x = embed_watermark(x.float())\n x = rearrange(x[0], \"c h w -> h w c\")\n\n img = Image.fromarray((127.5 * (x + 1.0)).cpu().byte().numpy())\n nsfw_score = [x[\"score\"] for x in self.nsfw_classifier(img) if x[\"label\"] == \"nsfw\"][0]\n\n if nsfw_score < NSFW_THRESHOLD:\n filename = f\"output/gradio/{uuid.uuid4()}.jpg\"\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n exif_data = Image.Exif()\n if init_image is None:\n exif_data[ExifTags.Base.Software] = \"AI generated;txt2img;flux\"\n else:\n exif_data[ExifTags.Base.Software] = \"AI generated;img2img;flux\"\n exif_data[ExifTags.Base.Make] = \"Black Forest Labs\"\n exif_data[ExifTags.Base.Model] = self.model_name\n if add_sampling_metadata:\n exif_data[ExifTags.Base.ImageDescription] = prompt\n img.save(filename, format=\"jpeg\", exif=exif_data, quality=95, subsampling=0)\n\n if self.track_usage:\n track_usage_via_api(self.model_name, 1)\n\n return img, str(opts.seed), filename, None\n else:\n return None, str(opts.seed), None, \"Your generated image may contain NSFW content.\"", "creation_date": "2024-08-02T10:10:46Z", "repo": "black-forest-labs/flux", "file_path": "demo_gr.py", "stars": 23428, "label": 0} +{"function": " def update_img2img(do_img2img):\n return {\n init_image: gr.update(visible=do_img2img),\n image2image_strength: gr.update(visible=do_img2img),\n }", "creation_date": "2024-08-02T10:10:46Z", "repo": "black-forest-labs/flux", "file_path": "demo_gr.py", "stars": 23428, "label": 0} +{"function": "def get_models(name: str, device: torch.device, offload: bool, is_schnell: bool):\n t5 = load_t5(device, max_length=256 if is_schnell else 512)\n clip = load_clip(device)\n model = load_flow_model(name, device=\"cpu\" if offload else device)\n ae = load_ae(name, device=\"cpu\" if offload else device)\n nsfw_classifier = pipeline(\"image-classification\", model=\"Falconsai/nsfw_image_detection\", device=device)\n return model, ae, t5, clip, nsfw_classifier", "creation_date": "2024-08-01T13:06:21Z", "repo": "black-forest-labs/flux", "file_path": "demo_st.py", "stars": 23428, "label": 0} +{"function": "def get_image() -> torch.Tensor | None:\n image = st.file_uploader(\"Input\", type=[\"jpg\", \"JPEG\", \"png\"])\n if image is None:\n return None\n image = Image.open(image).convert(\"RGB\")\n\n transform = transforms.Compose(\n [\n transforms.ToTensor(),\n transforms.Lambda(lambda x: 2.0 * x - 1.0),\n ]\n )\n img: torch.Tensor = transform(image)\n return img[None, ...]", "creation_date": "2024-08-01T13:06:21Z", "repo": "black-forest-labs/flux", "file_path": "demo_st.py", "stars": 23428, "label": 0} +{"function": "def main(\n device: str = \"cuda\" if torch.cuda.is_available() else \"cpu\",\n offload: bool = False,\n output_dir: str = \"output\",\n track_usage: bool = False,\n):\n torch_device = torch.device(device)\n names = list(configs.keys())\n name = st.selectbox(\"Which model to load?\", names)\n if name is None or not st.checkbox(\"Load model\", False):\n return\n\n is_schnell = name == \"flux-schnell\"\n model, ae, t5, clip, nsfw_classifier = get_models(\n name,\n device=torch_device,\n offload=offload,\n is_schnell=is_schnell,\n )\n\n do_img2img = (\n st.checkbox(\n \"Image to Image\",\n False,\n disabled=is_schnell,\n help=\"Partially noise an image and denoise again to get variations.\\n\\nOnly works for flux-dev\",\n )\n and not is_schnell\n )\n if do_img2img:\n init_image = get_image()\n if init_image is None:\n st.warning(\"Please add an image to do image to image\")\n image2image_strength = st.number_input(\"Noising strength\", min_value=0.0, max_value=1.0, value=0.8)\n if init_image is not None:\n h, w = init_image.shape[-2:]\n st.write(f\"Got image of size {w}x{h} ({h * w / 1e6:.2f}MP)\")\n resize_img = st.checkbox(\"Resize image\", False) or init_image is None\n else:\n init_image = None\n resize_img = True\n image2image_strength = 0.0\n\n # allow for packing and conversion to latent space\n width = int(\n 16 * (st.number_input(\"Width\", min_value=128, value=1360, step=16, disabled=not resize_img) // 16)\n )\n height = int(\n 16 * (st.number_input(\"Height\", min_value=128, value=768, step=16, disabled=not resize_img) // 16)\n )\n num_steps = int(st.number_input(\"Number of steps\", min_value=1, value=(4 if is_schnell else 50)))\n guidance = float(st.number_input(\"Guidance\", min_value=1.0, value=3.5, disabled=is_schnell))\n seed_str = st.text_input(\"Seed\", disabled=is_schnell)\n if seed_str.isdecimal():\n seed = int(seed_str)\n else:\n st.info(\"No seed set, set to positive integer to enable\")\n seed = None\n save_samples = st.checkbox(\"Save samples?\", not is_schnell)\n add_sampling_metadata = st.checkbox(\"Add sampling parameters to metadata?\", True)\n\n default_prompt = (\n \"a photo of a forest with mist swirling around the tree trunks. The word \"\n '\"FLUX\" is painted over it in big, red brush strokes with visible texture'\n )\n prompt = st_keyup(\"Enter a prompt\", value=default_prompt, debounce=300, key=\"interactive_text\")\n\n output_name = os.path.join(output_dir, \"img_{idx}.jpg\")\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n idx = 0\n else:\n fns = [fn for fn in iglob(output_name.format(idx=\"*\")) if re.search(r\"img_[0-9]+\\.jpg$\", fn)]\n if len(fns) > 0:\n idx = max(int(fn.split(\"_\")[-1].split(\".\")[0]) for fn in fns) + 1\n else:\n idx = 0\n\n rng = torch.Generator(device=\"cpu\")\n\n if \"seed\" not in st.session_state:\n st.session_state.seed = rng.seed()\n\n def increment_counter():\n st.session_state.seed += 1\n\n def decrement_counter():\n if st.session_state.seed > 0:\n st.session_state.seed -= 1\n\n opts = SamplingOptions(\n prompt=prompt,\n width=width,\n height=height,\n num_steps=num_steps,\n guidance=guidance,\n seed=seed,\n )\n\n if name == \"flux-schnell\":\n cols = st.columns([5, 1, 1, 5])\n with cols[1]:\n st.button(\"\u21a9\", on_click=increment_counter)\n with cols[2]:\n st.button(\"\u21aa\", on_click=decrement_counter)\n if is_schnell or st.button(\"Sample\"):\n if is_schnell:\n opts.seed = st.session_state.seed\n elif opts.seed is None:\n opts.seed = rng.seed()\n print(f\"Generating '{opts.prompt}' with seed {opts.seed}\")\n t0 = time.perf_counter()\n\n if init_image is not None:\n if resize_img:\n init_image = torch.nn.functional.interpolate(init_image, (opts.height, opts.width))\n else:\n h, w = init_image.shape[-2:]\n init_image = init_image[..., : 16 * (h // 16), : 16 * (w // 16)]\n opts.height = init_image.shape[-2]\n opts.width = init_image.shape[-1]\n if offload:\n ae.encoder.to(torch_device)\n init_image = ae.encode(init_image.to(torch_device))\n if offload:\n ae = ae.cpu()\n torch.cuda.empty_cache()\n\n # prepare input\n x = get_noise(\n 1,\n opts.height,\n opts.width,\n device=torch_device,\n dtype=torch.bfloat16,\n seed=opts.seed,\n )\n # divide pixel space by 16**2 to account for latent space conversion\n timesteps = get_schedule(\n opts.num_steps,\n (x.shape[-1] * x.shape[-2]) // 4,\n shift=(not is_schnell),\n )\n if init_image is not None:\n t_idx = int((1 - image2image_strength) * num_steps)\n t = timesteps[t_idx]\n timesteps = timesteps[t_idx:]\n x = t * x + (1.0 - t) * init_image.to(x.dtype)\n\n if offload:\n t5, clip = t5.to(torch_device), clip.to(torch_device)\n inp = prepare(t5=t5, clip=clip, img=x, prompt=opts.prompt)\n\n # offload TEs to CPU, load model to gpu\n if offload:\n t5, clip = t5.cpu(), clip.cpu()\n torch.cuda.empty_cache()\n model = model.to(torch_device)\n\n # denoise initial noise\n x = denoise(model, **inp, timesteps=timesteps, guidance=opts.guidance)\n\n # offload model, load autoencoder to gpu\n if offload:\n model.cpu()\n torch.cuda.empty_cache()\n ae.decoder.to(x.device)\n\n # decode latents to pixel space\n x = unpack(x.float(), opts.height, opts.width)\n with torch.autocast(device_type=torch_device.type, dtype=torch.bfloat16):\n x = ae.decode(x)\n\n if offload:\n ae.decoder.cpu()\n torch.cuda.empty_cache()\n\n t1 = time.perf_counter()\n\n fn = output_name.format(idx=idx)\n print(f\"Done in {t1 - t0:.1f}s.\")\n # bring into PIL format and save\n x = x.clamp(-1, 1)\n x = embed_watermark(x.float())\n x = rearrange(x[0], \"c h w -> h w c\")\n\n img = Image.fromarray((127.5 * (x + 1.0)).cpu().byte().numpy())\n nsfw_score = [x[\"score\"] for x in nsfw_classifier(img) if x[\"label\"] == \"nsfw\"][0]\n\n if nsfw_score < NSFW_THRESHOLD:\n buffer = BytesIO()\n exif_data = Image.Exif()\n if init_image is None:\n exif_data[ExifTags.Base.Software] = \"AI generated;txt2img;flux\"\n else:\n exif_data[ExifTags.Base.Software] = \"AI generated;img2img;flux\"\n exif_data[ExifTags.Base.Make] = \"Black Forest Labs\"\n exif_data[ExifTags.Base.Model] = name\n if add_sampling_metadata:\n exif_data[ExifTags.Base.ImageDescription] = prompt\n img.save(buffer, format=\"jpeg\", exif=exif_data, quality=95, subsampling=0)\n\n img_bytes = buffer.getvalue()\n if save_samples:\n print(f\"Saving {fn}\")\n with open(fn, \"wb\") as file:\n file.write(img_bytes)\n idx += 1\n if track_usage:\n track_usage_via_api(name, 1)\n\n st.session_state[\"samples\"] = {\n \"prompt\": opts.prompt,\n \"img\": img,\n \"seed\": opts.seed,\n \"bytes\": img_bytes,\n }\n opts.seed = None\n else:\n st.warning(\"Your generated image may contain NSFW content.\")\n st.session_state[\"samples\"] = None\n\n samples = st.session_state.get(\"samples\", None)\n if samples is not None:\n st.image(samples[\"img\"], caption=samples[\"prompt\"])\n st.download_button(\n \"Download full-resolution\",\n samples[\"bytes\"],\n file_name=\"generated.jpg\",\n mime=\"image/jpg\",\n )\n st.write(f\"Seed: {samples['seed']}\")", "creation_date": "2024-08-01T13:06:21Z", "repo": "black-forest-labs/flux", "file_path": "demo_st.py", "stars": 23428, "label": 0} +{"function": "def app():\n Fire(main)", "creation_date": "2024-08-01T13:06:21Z", "repo": "black-forest-labs/flux", "file_path": "demo_st.py", "stars": 23428, "label": 0} +{"function": " def increment_counter():\n st.session_state.seed += 1", "creation_date": "2024-08-01T13:06:21Z", "repo": "black-forest-labs/flux", "file_path": "demo_st.py", "stars": 23428, "label": 0} +{"function": "def init_model(args):\n tokenizer = AutoTokenizer.from_pretrained('./model/')\n if args.load == 0:\n moe_path = '_moe' if args.use_moe else ''\n modes = {0: 'pretrain', 1: 'full_sft', 2: 'rlhf', 3: 'reason', 4: 'grpo'}\n ckp = f'./{args.out_dir}/{modes[args.model_mode]}_{args.hidden_size}{moe_path}.pth'\n\n model = MiniMindForCausalLM(MiniMindConfig(\n hidden_size=args.hidden_size,\n num_hidden_layers=args.num_hidden_layers,\n use_moe=args.use_moe\n ))\n\n model.load_state_dict(torch.load(ckp, map_location=args.device), strict=True)\n\n if args.lora_name != 'None':\n apply_lora(model)\n load_lora(model, f'./{args.out_dir}/lora/{args.lora_name}_{args.hidden_size}.pth')\n else:\n transformers_model_path = './MiniMind2'\n tokenizer = AutoTokenizer.from_pretrained(transformers_model_path)\n model = AutoModelForCausalLM.from_pretrained(transformers_model_path, trust_remote_code=True)\n print(f'MiniMind\u6a21\u578b\u53c2\u6570\u91cf: {sum(p.numel() for p in model.parameters() if p.requires_grad) / 1e6:.2f}M(illion)')\n return model.eval().to(args.device), tokenizer", "creation_date": "2025-02-09T15:49:47Z", "repo": "jingyaogong/minimind", "file_path": "eval_model.py", "stars": 23015, "label": 0} +{"function": "def get_prompt_datas(args):\n if args.model_mode == 0:\n # pretrain\u6a21\u578b\u7684\u63a5\u9f99\u80fd\u529b\uff08\u65e0\u6cd5\u5bf9\u8bdd\uff09\n prompt_datas = [\n '\u9a6c\u514b\u601d\u4e3b\u4e49\u57fa\u672c\u539f\u7406',\n '\u4eba\u7c7b\u5927\u8111\u7684\u4e3b\u8981\u529f\u80fd',\n '\u4e07\u6709\u5f15\u529b\u539f\u7406\u662f',\n '\u4e16\u754c\u4e0a\u6700\u9ad8\u7684\u5c71\u5cf0\u662f',\n '\u4e8c\u6c27\u5316\u78b3\u5728\u7a7a\u6c14\u4e2d',\n '\u5730\u7403\u4e0a\u6700\u5927\u7684\u52a8\u7269\u6709',\n '\u676d\u5dde\u5e02\u7684\u7f8e\u98df\u6709'\n ]\n else:\n if args.lora_name == 'None':\n # \u901a\u7528\u5bf9\u8bdd\u95ee\u9898\n prompt_datas = [\n '\u8bf7\u4ecb\u7ecd\u4e00\u4e0b\u81ea\u5df1\u3002',\n '\u4f60\u66f4\u64c5\u957f\u54ea\u4e00\u4e2a\u5b66\u79d1\uff1f',\n '\u9c81\u8fc5\u7684\u300a\u72c2\u4eba\u65e5\u8bb0\u300b\u662f\u5982\u4f55\u6279\u5224\u5c01\u5efa\u793c\u6559\u7684\uff1f',\n '\u6211\u54b3\u55fd\u5df2\u7ecf\u6301\u7eed\u4e86\u4e24\u5468\uff0c\u9700\u8981\u53bb\u533b\u9662\u68c0\u67e5\u5417\uff1f',\n '\u8be6\u7ec6\u7684\u4ecb\u7ecd\u5149\u901f\u7684\u7269\u7406\u6982\u5ff5\u3002',\n '\u63a8\u8350\u4e00\u4e9b\u676d\u5dde\u7684\u7279\u8272\u7f8e\u98df\u5427\u3002',\n '\u8bf7\u4e3a\u6211\u8bb2\u89e3\u201c\u5927\u8bed\u8a00\u6a21\u578b\u201d\u8fd9\u4e2a\u6982\u5ff5\u3002',\n '\u5982\u4f55\u7406\u89e3ChatGPT\uff1f',\n 'Introduce the history of the United States, please.'\n ]\n else:\n # \u7279\u5b9a\u9886\u57df\u95ee\u9898\n lora_prompt_datas = {\n 'lora_identity': [\n \"\u4f60\u662fChatGPT\u5427\u3002\",\n \"\u4f60\u53eb\u4ec0\u4e48\u540d\u5b57\uff1f\",\n \"\u4f60\u548copenai\u662f\u4ec0\u4e48\u5173\u7cfb\uff1f\"\n ],\n 'lora_medical': [\n '\u6211\u6700\u8fd1\u7ecf\u5e38\u611f\u5230\u5934\u6655\uff0c\u53ef\u80fd\u662f\u4ec0\u4e48\u539f\u56e0\uff1f',\n '\u6211\u54b3\u55fd\u5df2\u7ecf\u6301\u7eed\u4e86\u4e24\u5468\uff0c\u9700\u8981\u53bb\u533b\u9662\u68c0\u67e5\u5417\uff1f',\n '\u670d\u7528\u6297\u751f\u7d20\u65f6\u9700\u8981\u6ce8\u610f\u54ea\u4e9b\u4e8b\u9879\uff1f',\n '\u4f53\u68c0\u62a5\u544a\u4e2d\u663e\u793a\u80c6\u56fa\u9187\u504f\u9ad8\uff0c\u6211\u8be5\u600e\u4e48\u529e\uff1f',\n '\u5b55\u5987\u5728\u996e\u98df\u4e0a\u9700\u8981\u6ce8\u610f\u4ec0\u4e48\uff1f',\n '\u8001\u5e74\u4eba\u5982\u4f55\u9884\u9632\u9aa8\u8d28\u758f\u677e\uff1f',\n '\u6211\u6700\u8fd1\u603b\u662f\u611f\u5230\u7126\u8651\uff0c\u5e94\u8be5\u600e\u4e48\u7f13\u89e3\uff1f',\n '\u5982\u679c\u6709\u4eba\u7a81\u7136\u6655\u5012\uff0c\u5e94\u8be5\u5982\u4f55\u6025\u6551\uff1f'\n ],\n }\n prompt_datas = lora_prompt_datas[args.lora_name]\n\n return prompt_datas", "creation_date": "2025-02-09T15:49:47Z", "repo": "jingyaogong/minimind", "file_path": "eval_model.py", "stars": 23015, "label": 0} +{"function": "def setup_seed(seed):\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False", "creation_date": "2025-02-09T15:49:47Z", "repo": "jingyaogong/minimind", "file_path": "eval_model.py", "stars": 23015, "label": 0} +{"function": "def main():\n parser = argparse.ArgumentParser(description=\"Chat with MiniMind\")\n parser.add_argument('--lora_name', default='None', type=str)\n parser.add_argument('--out_dir', default='out', type=str)\n parser.add_argument('--temperature', default=0.85, type=float)\n parser.add_argument('--top_p', default=0.85, type=float)\n parser.add_argument('--device', default='cuda' if torch.cuda.is_available() else 'cpu', type=str)\n # \u6b64\u5904max_seq_len\uff08\u6700\u5927\u8f93\u51fa\u957f\u5ea6\uff09\u5e76\u4e0d\u610f\u5473\u6a21\u578b\u5177\u6709\u5bf9\u5e94\u7684\u957f\u6587\u672c\u7684\u6027\u80fd\uff0c\u4ec5\u9632\u6b62QA\u51fa\u73b0\u88ab\u622a\u65ad\u7684\u95ee\u9898\n # MiniMind2-moe (145M)\uff1a(hidden_size=640, num_hidden_layers=8, use_moe=True)\n # MiniMind2-Small (26M)\uff1a(hidden_size=512, num_hidden_layers=8)\n # MiniMind2 (104M)\uff1a(hidden_size=768, num_hidden_layers=16)\n parser.add_argument('--hidden_size', default=512, type=int)\n parser.add_argument('--num_hidden_layers', default=8, type=int)\n parser.add_argument('--max_seq_len', default=8192, type=int)\n parser.add_argument('--use_moe', default=False, type=bool)\n # \u643a\u5e26\u5386\u53f2\u5bf9\u8bdd\u4e0a\u4e0b\u6587\u6761\u6570\n # history_cnt\u9700\u8981\u8bbe\u4e3a\u5076\u6570\uff0c\u5373\u3010\u7528\u6237\u95ee\u9898, \u6a21\u578b\u56de\u7b54\u3011\u4e3a1\u7ec4\uff1b\u8bbe\u7f6e\u4e3a0\u65f6\uff0c\u5373\u5f53\u524dquery\u4e0d\u643a\u5e26\u5386\u53f2\u4e0a\u6587\n # \u6a21\u578b\u672a\u7ecf\u8fc7\u5916\u63a8\u5fae\u8c03\u65f6\uff0c\u5728\u66f4\u957f\u7684\u4e0a\u4e0b\u6587\u7684chat_template\u65f6\u96be\u514d\u51fa\u73b0\u6027\u80fd\u7684\u660e\u663e\u9000\u5316\uff0c\u56e0\u6b64\u9700\u8981\u6ce8\u610f\u6b64\u5904\u8bbe\u7f6e\n parser.add_argument('--history_cnt', default=0, type=int)\n parser.add_argument('--load', default=0, type=int, help=\"0: \u539f\u751ftorch\u6743\u91cd\uff0c1: transformers\u52a0\u8f7d\")\n parser.add_argument('--model_mode', default=1, type=int,\n help=\"0: \u9884\u8bad\u7ec3\u6a21\u578b\uff0c1: SFT-Chat\u6a21\u578b\uff0c2: RLHF-Chat\u6a21\u578b\uff0c3: Reason\u6a21\u578b\uff0c4: RLAIF-Chat\u6a21\u578b\")\n args = parser.parse_args()\n\n model, tokenizer = init_model(args)\n\n prompts = get_prompt_datas(args)\n test_mode = int(input('[0] \u81ea\u52a8\u6d4b\u8bd5\\n[1] \u624b\u52a8\u8f93\u5165\\n'))\n streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)\n\n messages = []\n for idx, prompt in enumerate(prompts if test_mode == 0 else iter(lambda: input('\ud83d\udc76: '), '')):\n setup_seed(random.randint(0, 2048))\n # setup_seed(2025) # \u5982\u9700\u56fa\u5b9a\u6bcf\u6b21\u8f93\u51fa\u5219\u6362\u6210\u3010\u56fa\u5b9a\u3011\u7684\u968f\u673a\u79cd\u5b50\n if test_mode == 0: print(f'\ud83d\udc76: {prompt}')\n\n messages = messages[-args.history_cnt:] if args.history_cnt else []\n messages.append({\"role\": \"user\", \"content\": prompt})\n\n new_prompt = tokenizer.apply_chat_template(\n messages,\n tokenize=False,\n add_generation_prompt=True\n ) if args.model_mode != 0 else (tokenizer.bos_token + prompt)\n\n inputs = tokenizer(\n new_prompt,\n return_tensors=\"pt\",\n truncation=True\n ).to(args.device)\n\n print('\ud83e\udd16\ufe0f: ', end='')\n generated_ids = model.generate(\n inputs[\"input_ids\"],\n max_new_tokens=args.max_seq_len,\n num_return_sequences=1,\n do_sample=True,\n attention_mask=inputs[\"attention_mask\"],\n pad_token_id=tokenizer.pad_token_id,\n eos_token_id=tokenizer.eos_token_id,\n streamer=streamer,\n top_p=args.top_p,\n temperature=args.temperature\n )\n\n response = tokenizer.decode(generated_ids[0][inputs[\"input_ids\"].shape[1]:], skip_special_tokens=True)\n messages.append({\"role\": \"assistant\", \"content\": response})\n print('\\n\\n')", "creation_date": "2025-02-09T15:49:47Z", "repo": "jingyaogong/minimind", "file_path": "eval_model.py", "stars": 23015, "label": 0} +{"function": "def Logger(content):\n if not ddp or dist.get_rank() == 0:\n print(content)", "creation_date": "2025-04-26T02:05:47Z", "repo": "jingyaogong/minimind", "file_path": "trainer/train_distill_reason.py", "stars": 23015, "label": 0} +{"function": "def get_lr(current_step, total_steps, lr):\n return lr / 10 + 0.5 * lr * (1 + math.cos(math.pi * current_step / total_steps))", "creation_date": "2025-04-26T02:05:47Z", "repo": "jingyaogong/minimind", "file_path": "trainer/train_distill_reason.py", "stars": 23015, "label": 0} +{"function": "def train_epoch(epoch, wandb):\n # \u601d\u8003\u6807\u7b7e\u5360\u4f4d\u7b26\n start_of_think_ids = tokenizer('').input_ids\n end_of_think_ids = tokenizer('').input_ids\n start_of_answer_ids = tokenizer('').input_ids\n end_of_answer_ids = tokenizer('').input_ids\n loss_fct = nn.CrossEntropyLoss(reduction='none')\n start_time = time.time()\n for step, (X, Y, loss_mask) in enumerate(train_loader):\n X = X.to(args.device)\n Y = Y.to(args.device)\n loss_mask = loss_mask.to(args.device)\n lr = get_lr(epoch * iter_per_epoch + step, args.epochs * iter_per_epoch, args.learning_rate)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n with ctx:\n res = model(X)\n loss = loss_fct(\n res.logits.view(-1, res.logits.size(-1)),\n Y.view(-1)\n ).view(Y.size())\n sp_ids = torch.isin(Y.view(-1),\n torch.tensor(start_of_think_ids + end_of_think_ids\n + start_of_answer_ids + end_of_answer_ids\n ).to(args.device))\n # \u5728 sp_ids \u5bf9\u5e94\u7684\u4f4d\u7f6e\u589e\u52a0\u989d\u5916\u7684\u60e9\u7f5a\n loss_mask = loss_mask.view(-1)\n loss_mask_sum = loss_mask.sum()\n loss_mask[sp_ids] = 10\n loss_mask = loss_mask.view(Y.size())\n loss = (loss * loss_mask).sum() / loss_mask_sum\n loss += res.aux_loss\n loss = loss / args.accumulation_steps\n\n scaler.scale(loss).backward()\n\n if (step + 1) % args.accumulation_steps == 0:\n scaler.unscale_(optimizer)\n torch.nn.utils.clip_grad_norm_(model.parameters(), args.grad_clip)\n\n scaler.step(optimizer)\n scaler.update()\n\n optimizer.zero_grad(set_to_none=True)\n\n if step % args.log_interval == 0:\n spend_time = time.time() - start_time\n Logger(\n 'Epoch:[{}/{}]({}/{}) loss:{:.3f} lr:{:.12f} epoch_Time:{}min:'.format(\n epoch + 1,\n args.epochs,\n step,\n iter_per_epoch,\n loss.item() * args.accumulation_steps,\n optimizer.param_groups[-1]['lr'],\n spend_time / (step + 1) * iter_per_epoch // 60 - spend_time // 60))\n\n if (wandb is not None) and (not ddp or dist.get_rank() == 0):\n wandb.log({\"loss\": loss * args.accumulation_steps,\n \"lr\": optimizer.param_groups[-1]['lr'],\n \"epoch_Time\": spend_time / (step + 1) * iter_per_epoch // 60 - spend_time // 60})\n\n if (step + 1) % args.save_interval == 0 and (not ddp or dist.get_rank() == 0):\n model.eval()\n moe_path = '_moe' if lm_config.use_moe else ''\n ckp = f'{args.save_dir}/reason_{lm_config.hidden_size}{moe_path}.pth'\n\n if isinstance(model, torch.nn.parallel.DistributedDataParallel):\n state_dict = model.module.state_dict()\n else:\n state_dict = model.state_dict()\n\n state_dict = {k: v.half() for k, v in state_dict.items()} # \u534a\u7cbe\u5ea6\u4fdd\u5b58\n torch.save(state_dict, ckp)\n model.train()", "creation_date": "2025-04-26T02:05:47Z", "repo": "jingyaogong/minimind", "file_path": "trainer/train_distill_reason.py", "stars": 23015, "label": 0} +{"function": "def init_model(lm_config):\n tokenizer = AutoTokenizer.from_pretrained('../model')\n model = MiniMindForCausalLM(lm_config)\n moe_path = '_moe' if lm_config.use_moe else ''\n ckp = f'{args.save_dir}/rlhf_{lm_config.hidden_size}{moe_path}.pth'\n state_dict = torch.load(ckp, map_location=args.device)\n model.load_state_dict(state_dict, strict=False)\n Logger(f'LLM\u603b\u53c2\u6570\u91cf\uff1a{sum(p.numel() for p in model.parameters() if p.requires_grad) / 1e6:.3f} \u767e\u4e07')\n model = model.to(args.device)\n return model, tokenizer", "creation_date": "2025-04-26T02:05:47Z", "repo": "jingyaogong/minimind", "file_path": "trainer/train_distill_reason.py", "stars": 23015, "label": 0} +{"function": "def init_distributed_mode():\n if not ddp: return\n global ddp_local_rank, DEVICE\n\n dist.init_process_group(backend=\"nccl\")\n ddp_rank = int(os.environ[\"RANK\"])\n ddp_local_rank = int(os.environ[\"LOCAL_RANK\"])\n ddp_world_size = int(os.environ[\"WORLD_SIZE\"])\n DEVICE = f\"cuda:{ddp_local_rank}\"\n torch.cuda.set_device(DEVICE)", "creation_date": "2025-04-26T02:05:47Z", "repo": "jingyaogong/minimind", "file_path": "trainer/train_distill_reason.py", "stars": 23015, "label": 0} +{"function": "def Logger(content):\n if not ddp or dist.get_rank() == 0:\n print(content)", "creation_date": "2025-04-26T02:05:47Z", "repo": "jingyaogong/minimind", "file_path": "trainer/train_distillation.py", "stars": 23015, "label": 0} +{"function": "async def favicon():\n return FileResponse(gradio_app._favicon)", "creation_date": "2025-02-02T08:19:48Z", "repo": "Cinnamon/kotaemon", "file_path": "sso_app.py", "stars": 22784, "label": 0} +{"function": "def add_session_middleware(app):\n config_data = {\n \"GOOGLE_CLIENT_ID\": GOOGLE_CLIENT_ID,\n \"GOOGLE_CLIENT_SECRET\": GOOGLE_CLIENT_SECRET,\n }\n starlette_config = Config(environ=config_data)\n oauth = OAuth(starlette_config)\n oauth.register(\n name=\"google\",\n server_metadata_url=(\n \"https://accounts.google.com/\" \".well-known/openid-configuration\"\n ),\n client_kwargs={\"scope\": \"openid email profile\"},\n )\n\n app.add_middleware(SessionMiddleware, secret_key=SECRET_KEY)\n return oauth", "creation_date": "2025-02-02T08:19:48Z", "repo": "Cinnamon/kotaemon", "file_path": "sso_app_demo.py", "stars": 22784, "label": 0} +{"function": "def public(request: Request):\n root_url = gr.route_utils.get_root_url(request, \"/\", None)\n return RedirectResponse(url=f\"{root_url}/app/\")", "creation_date": "2025-02-02T08:19:48Z", "repo": "Cinnamon/kotaemon", "file_path": "sso_app_demo.py", "stars": 22784, "label": 0} +{"function": "async def favicon():\n return FileResponse(gradio_app._favicon)", "creation_date": "2025-02-02T08:19:48Z", "repo": "Cinnamon/kotaemon", "file_path": "sso_app_demo.py", "stars": 22784, "label": 0} +{"function": "async def logout(request: Request):\n request.session.pop(\"user\", None)\n return RedirectResponse(url=\"/\")", "creation_date": "2025-02-02T08:19:48Z", "repo": "Cinnamon/kotaemon", "file_path": "sso_app_demo.py", "stars": 22784, "label": 0} +{"function": "async def login(request: Request):\n root_url = gr.route_utils.get_root_url(request, \"/login\", None)\n redirect_uri = f\"{root_url}/auth\"\n return await oauth.google.authorize_redirect(request, redirect_uri)", "creation_date": "2025-02-02T08:19:48Z", "repo": "Cinnamon/kotaemon", "file_path": "sso_app_demo.py", "stars": 22784, "label": 0} +{"function": "async def auth(request: Request):\n try:\n access_token = await oauth.google.authorize_access_token(request)\n except OAuthError:\n return RedirectResponse(url=\"/\")\n request.session[\"user\"] = dict(access_token)[\"userinfo\"]\n return RedirectResponse(url=\"/\")", "creation_date": "2025-02-02T08:19:48Z", "repo": "Cinnamon/kotaemon", "file_path": "sso_app_demo.py", "stars": 22784, "label": 0} +{"function": "def serve_llamacpp_python(local_model_file: Path, **kwargs):\n def guess_chat_format(local_model_file):\n model_name = local_model_file.stem\n\n # handle known cases that the server backends handle incorrectly\n # this is highly heuristic, should be expand later\n # server backends usually has logic for this but they could still be wrong\n if \"qwen\" in model_name:\n return \"qwen\"\n\n return None\n\n # default port\n if \"port\" not in kwargs:\n kwargs[\"port\"] = 31415\n\n chat_format = guess_chat_format(local_model_file)\n if chat_format:\n kwargs = {**kwargs, \"chat_format\": chat_format}\n\n # these scripts create a separate conda env and run the server\n if system_name == \"Windows\":\n script_file = this_dir / \"server_llamacpp_windows.bat\"\n elif system_name == \"Linux\":\n script_file = this_dir / \"server_llamacpp_linux.sh\"\n elif system_name == \"Darwin\":\n script_file = this_dir / \"server_llamacpp_macos.sh\"\n else:\n raise ValueError(f\"Unsupported system: {system_name}\")\n\n args = \" \".join(f\"--{k} {v}\" for k, v in kwargs.items())\n\n cmd = f\"{script_file} --model {local_model_file} {args}\"\n subprocess.Popen(cmd, shell=True)", "creation_date": "2024-03-15T09:17:33Z", "repo": "Cinnamon/kotaemon", "file_path": "scripts/serve_local.py", "stars": 22784, "label": 0} +{"function": "def main():\n local_model_file = config(\"LOCAL_MODEL\", default=\"\")\n\n if not local_model_file:\n print(\"LOCAL_MODEL not set in the `.env` file.\")\n return\n\n local_model_file = Path(local_model_file)\n if not local_model_file.exists():\n print(f\"Local model not found: {local_model_file}\")\n return\n\n print(f\"Local model found: {local_model_file}\")\n will_start_server = input(\"Do you want to use this local model ? (y/n): \")\n\n if will_start_server.lower().strip() not in [\"y\", \"yes\"]:\n return\n\n print(\"Starting the local server...\")\n if local_model_file.suffix == \".gguf\":\n serve_llamacpp_python(local_model_file)\n else:\n raise ValueError(f\"Unsupported model file type: {local_model_file.suffix}\")", "creation_date": "2024-03-15T09:17:33Z", "repo": "Cinnamon/kotaemon", "file_path": "scripts/serve_local.py", "stars": 22784, "label": 0} +{"function": " def guess_chat_format(local_model_file):\n model_name = local_model_file.stem\n\n # handle known cases that the server backends handle incorrectly\n # this is highly heuristic, should be expand later\n # server backends usually has logic for this but they could still be wrong\n if \"qwen\" in model_name:\n return \"qwen\"\n\n return None", "creation_date": "2024-03-15T09:17:33Z", "repo": "Cinnamon/kotaemon", "file_path": "scripts/serve_local.py", "stars": 22784, "label": 0} +{"function": "def patch_multi_step_agent_with_suppressed_logging():\n with patch.object(MultiStepAgent, \"__init__\", autospec=True) as mock_init:\n\n def init_with_suppressed_logging(self, *args, verbosity_level=LogLevel.OFF, **kwargs):\n original_multi_step_agent_init(self, *args, verbosity_level=verbosity_level, **kwargs)\n\n mock_init.side_effect = init_with_suppressed_logging\n yield", "creation_date": "2025-02-05T17:48:39Z", "repo": "huggingface/smolagents", "file_path": "tests/conftest.py", "stars": 21175, "label": 0} +{"function": " def init_with_suppressed_logging(self, *args, verbosity_level=LogLevel.OFF, **kwargs):\n original_multi_step_agent_init(self, *args, verbosity_level=verbosity_level, **kwargs)", "creation_date": "2025-02-05T17:48:39Z", "repo": "huggingface/smolagents", "file_path": "tests/conftest.py", "stars": 21175, "label": 0} +{"function": "def get_new_path(suffix=\"\") -> str:\n directory = tempfile.mkdtemp()\n return os.path.join(directory, str(uuid.uuid4()) + suffix)", "creation_date": "2024-12-24T22:36:46Z", "repo": "huggingface/smolagents", "file_path": "tests/test_agents.py", "stars": 21175, "label": 0} +{"function": "def agent_logger():\n return AgentLogger(\n LogLevel.DEBUG, console=Console(record=True, no_color=True, force_terminal=False, file=io.StringIO())\n )", "creation_date": "2024-12-24T22:36:46Z", "repo": "huggingface/smolagents", "file_path": "tests/test_agents.py", "stars": 21175, "label": 0} +{"function": "def prompt_templates():\n return {\n \"system_prompt\": \"This is a test system prompt.\",\n \"managed_agent\": {\"task\": \"Task for {{name}}: {{task}}\", \"report\": \"Report for {{name}}: {{final_answer}}\"},\n \"planning\": {\n \"initial_plan\": \"The plan.\",\n \"update_plan_pre_messages\": \"custom\",\n \"update_plan_post_messages\": \"custom\",\n },\n \"final_answer\": {\"pre_messages\": \"custom\", \"post_messages\": \"custom\"},\n }", "creation_date": "2024-12-24T22:36:46Z", "repo": "huggingface/smolagents", "file_path": "tests/test_agents.py", "stars": 21175, "label": 0} +{"function": "def test_tool_calling_agents_raises_tool_call_error_being_invoked_with_wrong_arguments(arguments):\n @tool\n def _sample_tool(prompt: str) -> str:\n \"\"\"Tool that returns same string\n Args:\n prompt: The string to return\n Returns:\n The same string\n \"\"\"\n\n return prompt\n\n agent = ToolCallingAgent(model=FakeToolCallModel(), tools=[_sample_tool])\n with pytest.raises(AgentToolCallError):\n agent.execute_tool_call(_sample_tool.name, arguments)", "creation_date": "2024-12-24T22:36:46Z", "repo": "huggingface/smolagents", "file_path": "tests/test_agents.py", "stars": 21175, "label": 0} +{"function": "def test_tool_calling_agents_raises_agent_execution_error_when_tool_raises():\n @tool\n def _sample_tool(_: str) -> float:\n \"\"\"Tool that fails\n\n Args:\n _: The pointless string\n Returns:\n Some number\n \"\"\"\n\n return 1 / 0\n\n agent = ToolCallingAgent(model=FakeToolCallModel(), tools=[_sample_tool])\n with pytest.raises(AgentExecutionError):\n agent.execute_tool_call(_sample_tool.name, \"sample\")", "creation_date": "2024-12-24T22:36:46Z", "repo": "huggingface/smolagents", "file_path": "tests/test_agents.py", "stars": 21175, "label": 0} +{"function": " def generate(self, messages, tools_to_call_from=None, stop_sequences=None):\n if len(messages) < 3:\n return ChatMessage(\n role=MessageRole.ASSISTANT,\n content=\"\",\n tool_calls=[\n ChatMessageToolCall(\n id=\"call_0\",\n type=\"function\",\n function=ChatMessageToolCallFunction(\n name=\"python_interpreter\", arguments={\"code\": \"2*3.6452\"}\n ),\n )\n ],\n )\n else:\n return ChatMessage(\n role=MessageRole.ASSISTANT,\n content=\"\",\n tool_calls=[\n ChatMessageToolCall(\n id=\"call_1\",\n type=\"function\",\n function=ChatMessageToolCallFunction(name=\"final_answer\", arguments={\"answer\": \"7.2904\"}),\n )\n ],\n )", "creation_date": "2024-12-24T22:36:46Z", "repo": "huggingface/smolagents", "file_path": "tests/test_agents.py", "stars": 21175, "label": 0} +{"function": " def generate(self, messages, tools_to_call_from=None, stop_sequences=None):\n if len(messages) < 3:\n return ChatMessage(\n role=MessageRole.ASSISTANT,\n content=\"\",\n tool_calls=[\n ChatMessageToolCall(\n id=\"call_0\",\n type=\"function\",\n function=ChatMessageToolCallFunction(\n name=\"fake_image_generation_tool\",\n arguments={\"prompt\": \"An image of a cat\"},\n ),\n )\n ],\n )\n else:\n return ChatMessage(\n role=MessageRole.ASSISTANT,\n content=\"\",\n tool_calls=[\n ChatMessageToolCall(\n id=\"call_1\",\n type=\"function\",\n function=ChatMessageToolCallFunction(name=\"final_answer\", arguments=\"image.png\"),\n )\n ],\n )", "creation_date": "2024-12-24T22:36:46Z", "repo": "huggingface/smolagents", "file_path": "tests/test_agents.py", "stars": 21175, "label": 0} +{"function": " def generate(self, messages, tools_to_call_from=None, stop_sequences=None):\n if len(messages) < 3:\n return ChatMessage(\n role=MessageRole.ASSISTANT,\n content=\"\",\n tool_calls=[\n ChatMessageToolCall(\n id=\"call_0\",\n type=\"function\",\n function=ChatMessageToolCallFunction(\n name=\"fake_image_understanding_tool\",\n arguments={\n \"prompt\": \"What is in this image?\",\n \"image\": \"image.png\",\n },\n ),\n )\n ],\n )\n else:\n return ChatMessage(\n role=MessageRole.ASSISTANT,\n content=\"\",\n tool_calls=[\n ChatMessageToolCall(\n id=\"call_1\",\n type=\"function\",\n function=ChatMessageToolCallFunction(name=\"final_answer\", arguments=\"The image is a cat.\"),\n )\n ],\n )", "creation_date": "2024-12-24T22:36:46Z", "repo": "huggingface/smolagents", "file_path": "tests/test_agents.py", "stars": 21175, "label": 0} +{"function": "def run_command(command, shell=False):\n \"\"\"Run a system command and ensure it succeeds.\"\"\"\n try:\n subprocess.run(command, shell=shell, check=True)\n except subprocess.CalledProcessError as e:\n print(f\"Error occurred while running command: {e}\")\n sys.exit(1)", "creation_date": "2024-10-17T13:21:10Z", "repo": "microsoft/BitNet", "file_path": "run_inference.py", "stars": 20502, "label": 0} +{"function": "def run_inference():\n build_dir = \"build\"\n if platform.system() == \"Windows\":\n main_path = os.path.join(build_dir, \"bin\", \"Release\", \"llama-cli.exe\")\n if not os.path.exists(main_path):\n main_path = os.path.join(build_dir, \"bin\", \"llama-cli\")\n else:\n main_path = os.path.join(build_dir, \"bin\", \"llama-cli\")\n command = [\n f'{main_path}',\n '-m', args.model,\n '-n', str(args.n_predict),\n '-t', str(args.threads),\n '-p', args.prompt,\n '-ngl', '0',\n '-c', str(args.ctx_size),\n '--temp', str(args.temperature),\n \"-b\", \"1\",\n ]\n if args.conversation:\n command.append(\"-cnv\")\n run_command(command)", "creation_date": "2024-10-17T13:21:10Z", "repo": "microsoft/BitNet", "file_path": "run_inference.py", "stars": 20502, "label": 0} +{"function": "def signal_handler(sig, frame):\n print(\"Ctrl+C pressed, exiting...\")\n sys.exit(0)", "creation_date": "2024-10-17T13:21:10Z", "repo": "microsoft/BitNet", "file_path": "run_inference.py", "stars": 20502, "label": 0} +{"function": "def run_command(command, shell=False):\n \"\"\"Run a system command and ensure it succeeds.\"\"\"\n try:\n subprocess.run(command, shell=shell, check=True)\n except subprocess.CalledProcessError as e:\n print(f\"Error occurred while running command: {e}\")\n sys.exit(1)", "creation_date": "2025-05-08T08:22:12Z", "repo": "microsoft/BitNet", "file_path": "run_inference_server.py", "stars": 20502, "label": 0} +{"function": "def run_server():\n build_dir = \"build\"\n if platform.system() == \"Windows\":\n server_path = os.path.join(build_dir, \"bin\", \"Release\", \"llama-server.exe\")\n if not os.path.exists(server_path):\n server_path = os.path.join(build_dir, \"bin\", \"llama-server\")\n else:\n server_path = os.path.join(build_dir, \"bin\", \"llama-server\")\n \n command = [\n f'{server_path}',\n '-m', args.model,\n '-c', str(args.ctx_size),\n '-t', str(args.threads),\n '-n', str(args.n_predict),\n '-ngl', '0',\n '--temp', str(args.temperature),\n '--host', args.host,\n '--port', str(args.port),\n '-cb' # Enable continuous batching\n ]\n \n if args.prompt:\n command.extend(['-p', args.prompt])\n \n # Note: -cnv flag is removed as it's not supported by the server\n \n print(f\"Starting server on {args.host}:{args.port}\")\n run_command(command)", "creation_date": "2025-05-08T08:22:12Z", "repo": "microsoft/BitNet", "file_path": "run_inference_server.py", "stars": 20502, "label": 0} +{"function": "def signal_handler(sig, frame):\n print(\"Ctrl+C pressed, shutting down server...\")\n sys.exit(0)", "creation_date": "2025-05-08T08:22:12Z", "repo": "microsoft/BitNet", "file_path": "run_inference_server.py", "stars": 20502, "label": 0} +{"function": "def system_info():\n return platform.system(), ARCH_ALIAS[platform.machine()]", "creation_date": "2024-10-17T13:21:10Z", "repo": "microsoft/BitNet", "file_path": "setup_env.py", "stars": 20502, "label": 0} +{"function": "def get_model_name():\n if args.hf_repo:\n return SUPPORTED_HF_MODELS[args.hf_repo][\"model_name\"]\n return os.path.basename(os.path.normpath(args.model_dir))", "creation_date": "2024-10-17T13:21:10Z", "repo": "microsoft/BitNet", "file_path": "setup_env.py", "stars": 20502, "label": 0} +{"function": "def run_command(command, shell=False, log_step=None):\n \"\"\"Run a system command and ensure it succeeds.\"\"\"\n if log_step:\n log_file = os.path.join(args.log_dir, log_step + \".log\")\n with open(log_file, \"w\") as f:\n try:\n subprocess.run(command, shell=shell, check=True, stdout=f, stderr=f)\n except subprocess.CalledProcessError as e:\n logging.error(f\"Error occurred while running command: {e}, check details in {log_file}\")\n sys.exit(1)\n else:\n try:\n subprocess.run(command, shell=shell, check=True)\n except subprocess.CalledProcessError as e:\n logging.error(f\"Error occurred while running command: {e}\")\n sys.exit(1)", "creation_date": "2024-10-17T13:21:10Z", "repo": "microsoft/BitNet", "file_path": "setup_env.py", "stars": 20502, "label": 0} +{"function": "def prepare_model():\n _, arch = system_info()\n hf_url = args.hf_repo\n model_dir = args.model_dir\n quant_type = args.quant_type\n quant_embd = args.quant_embd\n if hf_url is not None:\n # download the model\n model_dir = os.path.join(model_dir, SUPPORTED_HF_MODELS[hf_url][\"model_name\"])\n Path(model_dir).mkdir(parents=True, exist_ok=True)\n logging.info(f\"Downloading model {hf_url} from HuggingFace to {model_dir}...\")\n run_command([\"huggingface-cli\", \"download\", hf_url, \"--local-dir\", model_dir], log_step=\"download_model\")\n elif not os.path.exists(model_dir):\n logging.error(f\"Model directory {model_dir} does not exist.\")\n sys.exit(1)\n else:\n logging.info(f\"Loading model from directory {model_dir}.\")\n gguf_path = os.path.join(model_dir, \"ggml-model-\" + quant_type + \".gguf\")\n if not os.path.exists(gguf_path) or os.path.getsize(gguf_path) == 0:\n logging.info(f\"Converting HF model to GGUF format...\")\n if quant_type.startswith(\"tl\"):\n run_command([sys.executable, \"utils/convert-hf-to-gguf-bitnet.py\", model_dir, \"--outtype\", quant_type, \"--quant-embd\"], log_step=\"convert_to_tl\")\n else: # i2s\n # convert to f32\n run_command([sys.executable, \"utils/convert-hf-to-gguf-bitnet.py\", model_dir, \"--outtype\", \"f32\"], log_step=\"convert_to_f32_gguf\")\n f32_model = os.path.join(model_dir, \"ggml-model-f32.gguf\")\n i2s_model = os.path.join(model_dir, \"ggml-model-i2_s.gguf\")\n # quantize to i2s\n if platform.system() != \"Windows\":\n if quant_embd:\n run_command([\"./build/bin/llama-quantize\", \"--token-embedding-type\", \"f16\", f32_model, i2s_model, \"I2_S\", \"1\", \"1\"], log_step=\"quantize_to_i2s\")\n else:\n run_command([\"./build/bin/llama-quantize\", f32_model, i2s_model, \"I2_S\", \"1\"], log_step=\"quantize_to_i2s\")\n else:\n if quant_embd:\n run_command([\"./build/bin/Release/llama-quantize\", \"--token-embedding-type\", \"f16\", f32_model, i2s_model, \"I2_S\", \"1\", \"1\"], log_step=\"quantize_to_i2s\")\n else:\n run_command([\"./build/bin/Release/llama-quantize\", f32_model, i2s_model, \"I2_S\", \"1\"], log_step=\"quantize_to_i2s\")\n\n logging.info(f\"GGUF model saved at {gguf_path}\")\n else:\n logging.info(f\"GGUF model already exists at {gguf_path}\")", "creation_date": "2024-10-17T13:21:10Z", "repo": "microsoft/BitNet", "file_path": "setup_env.py", "stars": 20502, "label": 0} +{"function": "def load_yaml_file(yaml_file: str) -> dict:\n with open(yaml_file, 'r', encoding='utf-8') as file:\n return yaml.safe_load(file)", "creation_date": "2024-12-20T11:21:43Z", "repo": "XiaoMi/ha_xiaomi_home", "file_path": "tools/common.py", "stars": 20285, "label": 0} +{"function": "def save_yaml_file(yaml_file: str, data: dict) -> None:\n with open(yaml_file, 'w', encoding='utf-8') as file:\n yaml.safe_dump(\n data=data, stream=file, allow_unicode=True)", "creation_date": "2024-12-20T11:21:43Z", "repo": "XiaoMi/ha_xiaomi_home", "file_path": "tools/common.py", "stars": 20285, "label": 0} +{"function": "def load_json_file(json_file: str) -> dict:\n with open(json_file, 'r', encoding='utf-8') as file:\n return json.load(file)", "creation_date": "2024-12-20T11:21:43Z", "repo": "XiaoMi/ha_xiaomi_home", "file_path": "tools/common.py", "stars": 20285, "label": 0} +{"function": "def save_json_file(json_file: str, data: dict) -> None:\n with open(json_file, 'w', encoding='utf-8') as file:\n json.dump(data, file, ensure_ascii=False, indent=4)", "creation_date": "2024-12-20T11:21:43Z", "repo": "XiaoMi/ha_xiaomi_home", "file_path": "tools/common.py", "stars": 20285, "label": 0} +{"function": "def http_get(\n url: str, params: dict = None, headers: dict = None\n) -> dict:\n if params:\n encoded_params = urlencode(params)\n full_url = f'{url}?{encoded_params}'\n else:\n full_url = url\n request = Request(full_url, method='GET', headers=headers or {})\n content: bytes = None\n with urlopen(request) as response:\n content = response.read()\n return (\n json.loads(str(content, 'utf-8'))\n if content is not None else None)", "creation_date": "2024-12-20T11:21:43Z", "repo": "XiaoMi/ha_xiaomi_home", "file_path": "tools/common.py", "stars": 20285, "label": 0} +{"function": "def update_profile_model(file_path: str):\n profile_rules: dict = http_get(\n url='https://miot-spec.org/instance/translate/models')\n if not profile_rules and 'models' not in profile_rules and not isinstance(\n profile_rules['models'], dict):\n raise ValueError('Failed to get profile rule')\n local_rules: dict = load_yaml_file(\n yaml_file=file_path) or {}\n for rule, ts in profile_rules['models'].items():\n if rule not in local_rules:\n local_rules[rule] = {'ts': ts}\n else:\n local_rules[rule]['ts'] = ts\n for mode in SPECIAL_MODELS:\n if mode not in local_rules:\n local_rules[mode] = {'ts': 1531108800}\n else:\n local_rules[mode]['ts'] = 1531108800\n local_rules = dict(sorted(local_rules.items()))\n save_yaml_file(\n yaml_file=file_path, data=local_rules)", "creation_date": "2024-12-20T11:21:43Z", "repo": "XiaoMi/ha_xiaomi_home", "file_path": "tools/update_lan_rule.py", "stars": 20285, "label": 0} +{"function": "def load_json_file(file_path: str) -> Optional[dict]:\n try:\n with open(file_path, 'r', encoding='utf-8') as file:\n return json.load(file)\n except FileNotFoundError:\n _LOGGER.info('%s is not found.', file_path)\n return None\n except json.JSONDecodeError:\n _LOGGER.info('%s is not a valid JSON file.', file_path)\n return None", "creation_date": "2024-12-12T12:45:30Z", "repo": "XiaoMi/ha_xiaomi_home", "file_path": "test/check_rule_format.py", "stars": 20285, "label": 0} +{"function": "def save_json_file(file_path: str, data: dict) -> None:\n with open(file_path, 'w', encoding='utf-8') as file:\n json.dump(data, file, ensure_ascii=False, indent=2)", "creation_date": "2024-12-12T12:45:30Z", "repo": "XiaoMi/ha_xiaomi_home", "file_path": "test/check_rule_format.py", "stars": 20285, "label": 0} +{"function": "def load_yaml_file(file_path: str) -> Optional[dict]:\n try:\n with open(file_path, 'r', encoding='utf-8') as file:\n return yaml.safe_load(file)\n except FileNotFoundError:\n _LOGGER.info('%s is not found.', file_path)\n return None\n except yaml.YAMLError:\n _LOGGER.info('%s, is not a valid YAML file.', file_path)\n return None", "creation_date": "2024-12-12T12:45:30Z", "repo": "XiaoMi/ha_xiaomi_home", "file_path": "test/check_rule_format.py", "stars": 20285, "label": 0} +{"function": "def save_yaml_file(file_path: str, data: dict) -> None:\n with open(file_path, 'w', encoding='utf-8') as file:\n yaml.safe_dump(data,\n file,\n default_flow_style=False,\n allow_unicode=True,\n indent=2,\n sort_keys=False)", "creation_date": "2024-12-12T12:45:30Z", "repo": "XiaoMi/ha_xiaomi_home", "file_path": "test/check_rule_format.py", "stars": 20285, "label": 0} +{"function": "def mock_playwright():\n with patch(\"playwright.async_api.async_playwright\") as mock:\n mock_pw = MockPlaywright()\n mock_browser = MockBrowser()\n mock_context = MockContext()\n mock_page = MockPage()\n\n mock_pw.chromium.launch.return_value = mock_browser\n mock_pw.firefox.launch.return_value = mock_browser\n mock_browser.new_context.return_value = mock_context\n mock_context.new_page.return_value = mock_page\n\n mock.return_value.__aenter__.return_value = mock_pw\n yield mock_pw, mock_browser, mock_context, mock_page", "creation_date": "2025-04-14T07:50:46Z", "repo": "ScrapeGraphAI/Scrapegraph-ai", "file_path": "tests/test_chromium.py", "stars": 20265, "label": 0} +{"function": "async def dummy_scraper(url):\n \"\"\"A dummy scraping function that returns dummy HTML content for the URL.\"\"\"\n return f\"dummy content for {url}\"", "creation_date": "2025-04-14T07:50:46Z", "repo": "ScrapeGraphAI/Scrapegraph-ai", "file_path": "tests/test_chromium.py", "stars": 20265, "label": 0} +{"function": "def loader_with_dummy(monkeypatch):\n \"\"\"Fixture returning a ChromiumLoader instance with dummy scraping methods patched.\"\"\"\n urls = [\"http://example.com\", \"http://test.com\"]\n loader = ChromiumLoader(urls, backend=\"playwright\", requires_js_support=False)\n monkeypatch.setattr(loader, \"ascrape_playwright\", dummy_scraper)\n monkeypatch.setattr(loader, \"ascrape_with_js_support\", dummy_scraper)\n monkeypatch.setattr(loader, \"ascrape_undetected_chromedriver\", dummy_scraper)\n return loader", "creation_date": "2025-04-14T07:50:46Z", "repo": "ScrapeGraphAI/Scrapegraph-ai", "file_path": "tests/test_chromium.py", "stars": 20265, "label": 0} +{"function": "def test_lazy_load(loader_with_dummy):\n \"\"\"Test that lazy_load yields Document objects with the correct dummy content and metadata.\"\"\"\n docs = list(loader_with_dummy.lazy_load())\n assert len(docs) == 2\n for doc, url in zip(docs, loader_with_dummy.urls):\n assert isinstance(doc, Document)\n assert f\"dummy content for {url}\" in doc.page_content\n assert doc.metadata[\"source\"] == url", "creation_date": "2025-04-14T07:50:46Z", "repo": "ScrapeGraphAI/Scrapegraph-ai", "file_path": "tests/test_chromium.py", "stars": 20265, "label": 0} +{"function": "async def test_alazy_load(loader_with_dummy):\n \"\"\"Test that alazy_load asynchronously yields Document objects with dummy content and proper metadata.\"\"\"\n docs = [doc async for doc in loader_with_dummy.alazy_load()]\n assert len(docs) == 2\n for doc, url in zip(docs, loader_with_dummy.urls):\n assert isinstance(doc, Document)\n assert f\"dummy content for {url}\" in doc.page_content\n assert doc.metadata[\"source\"] == url", "creation_date": "2025-04-14T07:50:46Z", "repo": "ScrapeGraphAI/Scrapegraph-ai", "file_path": "tests/test_chromium.py", "stars": 20265, "label": 0} +{"function": "async def test_scrape_method_unsupported_backend():\n \"\"\"Test that the scrape method raises a ValueError when an unsupported backend is provided.\"\"\"\n loader = ChromiumLoader([\"http://example.com\"], backend=\"unsupported\")\n with pytest.raises(ValueError):\n await loader.scrape(\"http://example.com\")", "creation_date": "2025-04-14T07:50:46Z", "repo": "ScrapeGraphAI/Scrapegraph-ai", "file_path": "tests/test_chromium.py", "stars": 20265, "label": 0} +{"function": "async def test_scrape_method_selenium(monkeypatch):\n \"\"\"Test that the scrape method works correctly for selenium by returning the dummy selenium content.\"\"\"\n\n async def dummy_selenium(url):\n return f\"dummy selenium content for {url}\"\n\n urls = [\"http://example.com\"]\n loader = ChromiumLoader(urls, backend=\"selenium\")\n loader.browser_name = \"chromium\"\n monkeypatch.setattr(loader, \"ascrape_undetected_chromedriver\", dummy_selenium)\n result = await loader.scrape(\"http://example.com\")\n assert \"dummy selenium content\" in result", "creation_date": "2025-04-14T07:50:46Z", "repo": "ScrapeGraphAI/Scrapegraph-ai", "file_path": "tests/test_chromium.py", "stars": 20265, "label": 0} +{"function": "async def test_ascrape_playwright_scroll(mock_playwright):\n \"\"\"Test the ascrape_playwright_scroll method with various configurations.\"\"\"\n mock_pw, mock_browser, mock_context, mock_page = mock_playwright\n\n url = \"http://example.com\"\n loader = ChromiumLoader([url], backend=\"playwright\")\n\n # Test with default parameters\n mock_page.evaluate.side_effect = [1000, 2000, 2000] # Simulate scrolling\n result = await loader.ascrape_playwright_scroll(url)\n\n assert mock_page.goto.call_count == 1\n assert mock_page.wait_for_load_state.call_count == 1\n assert mock_page.mouse.wheel.call_count > 0\n assert mock_page.content.call_count == 1\n\n # Test with custom parameters\n mock_page.evaluate.side_effect = [1000, 2000, 3000, 4000, 4000]\n result = await loader.ascrape_playwright_scroll(\n url, timeout=10, scroll=10000, sleep=1, scroll_to_bottom=True\n )\n\n assert mock_page.goto.call_count == 2\n assert mock_page.wait_for_load_state.call_count == 2\n assert mock_page.mouse.wheel.call_count > 0\n assert mock_page.content.call_count == 2", "creation_date": "2025-04-14T07:50:46Z", "repo": "ScrapeGraphAI/Scrapegraph-ai", "file_path": "tests/test_chromium.py", "stars": 20265, "label": 0} +{"function": "async def test_ascrape_with_js_support(mock_playwright):\n \"\"\"Test the ascrape_with_js_support method with different browser configurations.\"\"\"\n mock_pw, mock_browser, mock_context, mock_page = mock_playwright\n\n url = \"http://example.com\"\n loader = ChromiumLoader([url], backend=\"playwright\", requires_js_support=True)\n\n # Test with Chromium\n result = await loader.ascrape_with_js_support(url, browser_name=\"chromium\")\n assert mock_pw.chromium.launch.call_count == 1\n assert mock_page.goto.call_count == 1\n assert mock_page.content.call_count == 1\n\n # Test with Firefox\n result = await loader.ascrape_with_js_support(url, browser_name=\"firefox\")\n assert mock_pw.firefox.launch.call_count == 1\n assert mock_page.goto.call_count == 2\n assert mock_page.content.call_count == 2\n\n # Test with invalid browser name\n with pytest.raises(ValueError):\n await loader.ascrape_with_js_support(url, browser_name=\"invalid\")", "creation_date": "2025-04-14T07:50:46Z", "repo": "ScrapeGraphAI/Scrapegraph-ai", "file_path": "tests/test_chromium.py", "stars": 20265, "label": 0} +{"function": "async def test_scrape_method_playwright(mock_playwright):\n \"\"\"Test the scrape method with playwright backend.\"\"\"\n mock_pw, mock_browser, mock_context, mock_page = mock_playwright\n\n url = \"http://example.com\"\n loader = ChromiumLoader([url], backend=\"playwright\")\n\n mock_page.content.return_value = \"Playwright content\"\n result = await loader.scrape(url)\n\n assert \"Playwright content\" in result\n assert mock_pw.chromium.launch.call_count == 1\n assert mock_page.goto.call_count == 1\n assert mock_page.wait_for_load_state.call_count == 1\n assert mock_page.content.call_count == 1", "creation_date": "2025-04-14T07:50:46Z", "repo": "ScrapeGraphAI/Scrapegraph-ai", "file_path": "tests/test_chromium.py", "stars": 20265, "label": 0} +{"function": "def create_mock_response(message, function_calls=[], model=\"gpt-4o\"):\n role = message.get(\"role\", \"assistant\")\n content = message.get(\"content\", \"\")\n tool_calls = (\n [\n ChatCompletionMessageToolCall(\n id=\"mock_tc_id\",\n type=\"function\",\n function=Function(\n name=call.get(\"name\", \"\"),\n arguments=json.dumps(call.get(\"args\", {})),\n ),\n )\n for call in function_calls\n ]\n if function_calls\n else None\n )\n\n return ChatCompletion(\n id=\"mock_cc_id\",\n created=1234567890,\n model=model,\n object=\"chat.completion\",\n choices=[\n Choice(\n message=ChatCompletionMessage(\n role=role, content=content, tool_calls=tool_calls\n ),\n finish_reason=\"stop\",\n index=0,\n )\n ],\n )", "creation_date": "2024-10-10T20:17:28Z", "repo": "openai/swarm", "file_path": "tests/mock_client.py", "stars": 20084, "label": 0} +{"function": " def __init__(self):\n self.chat = MagicMock()\n self.chat.completions = MagicMock()", "creation_date": "2024-10-10T20:17:28Z", "repo": "openai/swarm", "file_path": "tests/mock_client.py", "stars": 20084, "label": 0} +{"function": " def set_response(self, response: ChatCompletion):\n \"\"\"\n Set the mock to return a specific response.\n :param response: A ChatCompletion response to return.\n \"\"\"\n self.chat.completions.create.return_value = response", "creation_date": "2024-10-10T20:17:28Z", "repo": "openai/swarm", "file_path": "tests/mock_client.py", "stars": 20084, "label": 0} +{"function": " def set_sequential_responses(self, responses: list[ChatCompletion]):\n \"\"\"\n Set the mock to return different responses sequentially.\n :param responses: A list of ChatCompletion responses to return in order.\n \"\"\"\n self.chat.completions.create.side_effect = responses", "creation_date": "2024-10-10T20:17:28Z", "repo": "openai/swarm", "file_path": "tests/mock_client.py", "stars": 20084, "label": 0} +{"function": " def assert_create_called_with(self, **kwargs):\n self.chat.completions.create.assert_called_with(**kwargs)", "creation_date": "2024-10-10T20:17:28Z", "repo": "openai/swarm", "file_path": "tests/mock_client.py", "stars": 20084, "label": 0} +{"function": "def mock_openai_client():\n m = MockOpenAIClient()\n m.set_response(\n create_mock_response({\"role\": \"assistant\", \"content\": DEFAULT_RESPONSE_CONTENT})\n )\n return m", "creation_date": "2024-10-10T20:17:28Z", "repo": "openai/swarm", "file_path": "tests/test_core.py", "stars": 20084, "label": 0} +{"function": "def test_run_with_simple_message(mock_openai_client: MockOpenAIClient):\n agent = Agent()\n # set up client and run\n client = Swarm(client=mock_openai_client)\n messages = [{\"role\": \"user\", \"content\": \"Hello, how are you?\"}]\n response = client.run(agent=agent, messages=messages)\n\n # assert response content\n assert response.messages[-1][\"role\"] == \"assistant\"\n assert response.messages[-1][\"content\"] == DEFAULT_RESPONSE_CONTENT", "creation_date": "2024-10-10T20:17:28Z", "repo": "openai/swarm", "file_path": "tests/test_core.py", "stars": 20084, "label": 0} +{"function": "def test_tool_call(mock_openai_client: MockOpenAIClient):\n expected_location = \"San Francisco\"\n\n # set up mock to record function calls\n get_weather_mock = Mock()\n\n def get_weather(location):\n get_weather_mock(location=location)\n return \"It's sunny today.\"\n\n agent = Agent(name=\"Test Agent\", functions=[get_weather])\n messages = [\n {\"role\": \"user\", \"content\": \"What's the weather like in San Francisco?\"}\n ]\n\n # set mock to return a response that triggers function call\n mock_openai_client.set_sequential_responses(\n [\n create_mock_response(\n message={\"role\": \"assistant\", \"content\": \"\"},\n function_calls=[\n {\"name\": \"get_weather\", \"args\": {\"location\": expected_location}}\n ],\n ),\n create_mock_response(\n {\"role\": \"assistant\", \"content\": DEFAULT_RESPONSE_CONTENT}\n ),\n ]\n )\n\n # set up client and run\n client = Swarm(client=mock_openai_client)\n response = client.run(agent=agent, messages=messages)\n\n get_weather_mock.assert_called_once_with(location=expected_location)\n assert response.messages[-1][\"role\"] == \"assistant\"\n assert response.messages[-1][\"content\"] == DEFAULT_RESPONSE_CONTENT", "creation_date": "2024-10-10T20:17:28Z", "repo": "openai/swarm", "file_path": "tests/test_core.py", "stars": 20084, "label": 0} +{"function": "def test_execute_tools_false(mock_openai_client: MockOpenAIClient):\n expected_location = \"San Francisco\"\n\n # set up mock to record function calls\n get_weather_mock = Mock()\n\n def get_weather(location):\n get_weather_mock(location=location)\n return \"It's sunny today.\"\n\n agent = Agent(name=\"Test Agent\", functions=[get_weather])\n messages = [\n {\"role\": \"user\", \"content\": \"What's the weather like in San Francisco?\"}\n ]\n\n # set mock to return a response that triggers function call\n mock_openai_client.set_sequential_responses(\n [\n create_mock_response(\n message={\"role\": \"assistant\", \"content\": \"\"},\n function_calls=[\n {\"name\": \"get_weather\", \"args\": {\"location\": expected_location}}\n ],\n ),\n create_mock_response(\n {\"role\": \"assistant\", \"content\": DEFAULT_RESPONSE_CONTENT}\n ),\n ]\n )\n\n # set up client and run\n client = Swarm(client=mock_openai_client)\n response = client.run(agent=agent, messages=messages, execute_tools=False)\n print(response)\n\n # assert function not called\n get_weather_mock.assert_not_called()\n\n # assert tool call is present in last response\n tool_calls = response.messages[-1].get(\"tool_calls\")\n assert tool_calls is not None and len(tool_calls) == 1\n tool_call = tool_calls[0]\n assert tool_call[\"function\"][\"name\"] == \"get_weather\"\n assert json.loads(tool_call[\"function\"][\"arguments\"]) == {\n \"location\": expected_location\n }", "creation_date": "2024-10-10T20:17:28Z", "repo": "openai/swarm", "file_path": "tests/test_core.py", "stars": 20084, "label": 0} +{"function": "def test_handoff(mock_openai_client: MockOpenAIClient):\n def transfer_to_agent2():\n return agent2\n\n agent1 = Agent(name=\"Test Agent 1\", functions=[transfer_to_agent2])\n agent2 = Agent(name=\"Test Agent 2\")\n\n # set mock to return a response that triggers the handoff\n mock_openai_client.set_sequential_responses(\n [\n create_mock_response(\n message={\"role\": \"assistant\", \"content\": \"\"},\n function_calls=[{\"name\": \"transfer_to_agent2\"}],\n ),\n create_mock_response(\n {\"role\": \"assistant\", \"content\": DEFAULT_RESPONSE_CONTENT}\n ),\n ]\n )\n\n # set up client and run\n client = Swarm(client=mock_openai_client)\n messages = [{\"role\": \"user\", \"content\": \"I want to talk to agent 2\"}]\n response = client.run(agent=agent1, messages=messages)\n\n assert response.agent == agent2\n assert response.messages[-1][\"role\"] == \"assistant\"\n assert response.messages[-1][\"content\"] == DEFAULT_RESPONSE_CONTENT", "creation_date": "2024-10-10T20:17:28Z", "repo": "openai/swarm", "file_path": "tests/test_core.py", "stars": 20084, "label": 0} +{"function": "def is_running_in_docker():\n \"\"\"Detect if code is running inside a Docker container.\"\"\"\n # Method 1: Check for .dockerenv file\n if os.path.exists('/.dockerenv'):\n return True\n \n # Method 2: Check cgroup\n try:\n with open('/proc/1/cgroup', 'r') as f:\n return 'docker' in f.read()\n except:\n pass\n \n return False", "creation_date": "2025-04-16T20:20:30Z", "repo": "Fosowl/agenticSeek", "file_path": "api.py", "stars": 20012, "label": 0} +{"function": "def initialize_system():\n stealth_mode = config.getboolean('BROWSER', 'stealth_mode')\n personality_folder = \"jarvis\" if config.getboolean('MAIN', 'jarvis_personality') else \"base\"\n languages = config[\"MAIN\"][\"languages\"].split(' ')\n \n # Force headless mode in Docker containers\n headless = config.getboolean('BROWSER', 'headless_browser')\n if is_running_in_docker() and not headless:\n # Print prominent warning to console (visible in docker-compose output)\n print(\"\\n\" + \"*\" * 70)\n print(\"*** WARNING: Detected Docker environment - forcing headless_browser=True ***\")\n print(\"*** INFO: To see the browser, run 'python cli.py' on your host machine ***\")\n print(\"*\" * 70 + \"\\n\")\n \n # Flush to ensure it's displayed immediately\n sys.stdout.flush()\n \n # Also log to file\n logger.warning(\"Detected Docker environment - forcing headless_browser=True\")\n logger.info(\"To see the browser, run 'python cli.py' on your host machine instead\")\n \n headless = True\n \n provider = Provider(\n provider_name=config[\"MAIN\"][\"provider_name\"],\n model=config[\"MAIN\"][\"provider_model\"],\n server_address=config[\"MAIN\"][\"provider_server_address\"],\n is_local=config.getboolean('MAIN', 'is_local')\n )\n logger.info(f\"Provider initialized: {provider.provider_name} ({provider.model})\")\n\n browser = Browser(\n create_driver(headless=headless, stealth_mode=stealth_mode, lang=languages[0]),\n anticaptcha_manual_install=stealth_mode\n )\n logger.info(\"Browser initialized\")\n\n agents = [\n CasualAgent(\n name=config[\"MAIN\"][\"agent_name\"],\n prompt_path=f\"prompts/{personality_folder}/casual_agent.txt\",\n provider=provider, verbose=False\n ),\n CoderAgent(\n name=\"coder\",\n prompt_path=f\"prompts/{personality_folder}/coder_agent.txt\",\n provider=provider, verbose=False\n ),\n FileAgent(\n name=\"File Agent\",\n prompt_path=f\"prompts/{personality_folder}/file_agent.txt\",\n provider=provider, verbose=False\n ),\n BrowserAgent(\n name=\"Browser\",\n prompt_path=f\"prompts/{personality_folder}/browser_agent.txt\",\n provider=provider, verbose=False, browser=browser\n ),\n PlannerAgent(\n name=\"Planner\",\n prompt_path=f\"prompts/{personality_folder}/planner_agent.txt\",\n provider=provider, verbose=False, browser=browser\n )\n ]\n logger.info(\"Agents initialized\")\n\n interaction = Interaction(\n agents,\n tts_enabled=config.getboolean('MAIN', 'speak'),\n stt_enabled=config.getboolean('MAIN', 'listen'),\n recover_last_session=config.getboolean('MAIN', 'recover_last_session'),\n langs=languages\n )\n logger.info(\"Interaction initialized\")\n return interaction", "creation_date": "2025-04-16T20:20:30Z", "repo": "Fosowl/agenticSeek", "file_path": "api.py", "stars": 20012, "label": 0} +{"function": "async def get_screenshot():\n logger.info(\"Screenshot endpoint called\")\n screenshot_path = \".screenshots/updated_screen.png\"\n if os.path.exists(screenshot_path):\n return FileResponse(screenshot_path)\n logger.error(\"No screenshot available\")\n return JSONResponse(\n status_code=404,\n content={\"error\": \"No screenshot available\"}\n )", "creation_date": "2025-04-16T20:20:30Z", "repo": "Fosowl/agenticSeek", "file_path": "api.py", "stars": 20012, "label": 0} +{"function": "async def health_check():\n logger.info(\"Health check endpoint called\")\n return {\"status\": \"healthy\", \"version\": \"0.1.0\"}", "creation_date": "2025-04-16T20:20:30Z", "repo": "Fosowl/agenticSeek", "file_path": "api.py", "stars": 20012, "label": 0} +{"function": "async def is_active():\n logger.info(\"Is active endpoint called\")\n return {\"is_active\": interaction.is_active}", "creation_date": "2025-04-16T20:20:30Z", "repo": "Fosowl/agenticSeek", "file_path": "api.py", "stars": 20012, "label": 0} +{"function": "async def stop():\n logger.info(\"Stop endpoint called\")\n interaction.current_agent.request_stop()\n return JSONResponse(status_code=200, content={\"status\": \"stopped\"})", "creation_date": "2025-04-16T20:20:30Z", "repo": "Fosowl/agenticSeek", "file_path": "api.py", "stars": 20012, "label": 0} +{"function": "async def get_latest_answer():\n global query_resp_history\n if interaction.current_agent is None:\n return JSONResponse(status_code=404, content={\"error\": \"No agent available\"})\n uid = str(uuid.uuid4())\n if not any(q[\"answer\"] == interaction.current_agent.last_answer for q in query_resp_history):\n query_resp = {\n \"done\": \"false\",\n \"answer\": interaction.current_agent.last_answer,\n \"reasoning\": interaction.current_agent.last_reasoning,\n \"agent_name\": interaction.current_agent.agent_name if interaction.current_agent else \"None\",\n \"success\": interaction.current_agent.success,\n \"blocks\": {f'{i}': block.jsonify() for i, block in enumerate(interaction.get_last_blocks_result())} if interaction.current_agent else {},\n \"status\": interaction.current_agent.get_status_message if interaction.current_agent else \"No status available\",\n \"uid\": uid\n }\n interaction.current_agent.last_answer = \"\"\n interaction.current_agent.last_reasoning = \"\"\n query_resp_history.append(query_resp)\n return JSONResponse(status_code=200, content=query_resp)\n if query_resp_history:\n return JSONResponse(status_code=200, content=query_resp_history[-1])\n return JSONResponse(status_code=404, content={\"error\": \"No answer available\"})", "creation_date": "2025-04-16T20:20:30Z", "repo": "Fosowl/agenticSeek", "file_path": "api.py", "stars": 20012, "label": 0} +{"function": "async def think_wrapper(interaction, query):\n try:\n interaction.last_query = query\n logger.info(\"Agents request is being processed\")\n success = await interaction.think()\n if not success:\n interaction.last_answer = \"Error: No answer from agent\"\n interaction.last_reasoning = \"Error: No reasoning from agent\"\n interaction.last_success = False\n else:\n interaction.last_success = True\n pretty_print(interaction.last_answer)\n interaction.speak_answer()\n return success\n except Exception as e:\n logger.error(f\"Error in think_wrapper: {str(e)}\")\n interaction.last_answer = f\"\"\n interaction.last_reasoning = f\"Error: {str(e)}\"\n interaction.last_success = False\n raise e", "creation_date": "2025-04-16T20:20:30Z", "repo": "Fosowl/agenticSeek", "file_path": "api.py", "stars": 20012, "label": 0} +{"function": "async def process_query(request: QueryRequest):\n global is_generating, query_resp_history\n logger.info(f\"Processing query: {request.query}\")\n query_resp = QueryResponse(\n done=\"false\",\n answer=\"\",\n reasoning=\"\",\n agent_name=\"Unknown\",\n success=\"false\",\n blocks={},\n status=\"Ready\",\n uid=str(uuid.uuid4())\n )\n if is_generating:\n logger.warning(\"Another query is being processed, please wait.\")\n return JSONResponse(status_code=429, content=query_resp.jsonify())\n\n try:\n is_generating = True\n success = await think_wrapper(interaction, request.query)\n is_generating = False\n\n if not success:\n query_resp.answer = interaction.last_answer\n query_resp.reasoning = interaction.last_reasoning\n return JSONResponse(status_code=400, content=query_resp.jsonify())\n\n if interaction.current_agent:\n blocks_json = {f'{i}': block.jsonify() for i, block in enumerate(interaction.current_agent.get_blocks_result())}\n else:\n logger.error(\"No current agent found\")\n blocks_json = {}\n query_resp.answer = \"Error: No current agent\"\n return JSONResponse(status_code=400, content=query_resp.jsonify())\n\n logger.info(f\"Answer: {interaction.last_answer}\")\n logger.info(f\"Blocks: {blocks_json}\")\n query_resp.done = \"true\"\n query_resp.answer = interaction.last_answer\n query_resp.reasoning = interaction.last_reasoning\n query_resp.agent_name = interaction.current_agent.agent_name\n query_resp.success = str(interaction.last_success)\n query_resp.blocks = blocks_json\n \n query_resp_dict = {\n \"done\": query_resp.done,\n \"answer\": query_resp.answer,\n \"agent_name\": query_resp.agent_name,\n \"success\": query_resp.success,\n \"blocks\": query_resp.blocks,\n \"status\": query_resp.status,\n \"uid\": query_resp.uid\n }\n query_resp_history.append(query_resp_dict)\n\n logger.info(\"Query processed successfully\")\n return JSONResponse(status_code=200, content=query_resp.jsonify())\n except Exception as e:\n logger.error(f\"An error occurred: {str(e)}\")\n sys.exit(1)\n finally:\n logger.info(\"Processing finished\")\n if config.getboolean('MAIN', 'save_session'):\n interaction.save_session()", "creation_date": "2025-04-16T20:20:30Z", "repo": "Fosowl/agenticSeek", "file_path": "api.py", "stars": 20012, "label": 0} +{"function": "async def main():\n pretty_print(\"Initializing...\", color=\"status\")\n stealth_mode = config.getboolean('BROWSER', 'stealth_mode')\n personality_folder = \"jarvis\" if config.getboolean('MAIN', 'jarvis_personality') else \"base\"\n languages = config[\"MAIN\"][\"languages\"].split(' ')\n\n provider = Provider(provider_name=config[\"MAIN\"][\"provider_name\"],\n model=config[\"MAIN\"][\"provider_model\"],\n server_address=config[\"MAIN\"][\"provider_server_address\"],\n is_local=config.getboolean('MAIN', 'is_local'))\n\n browser = Browser(\n create_driver(headless=config.getboolean('BROWSER', 'headless_browser'), stealth_mode=stealth_mode, lang=languages[0]),\n anticaptcha_manual_install=stealth_mode\n )\n\n agents = [\n CasualAgent(name=config[\"MAIN\"][\"agent_name\"],\n prompt_path=f\"prompts/{personality_folder}/casual_agent.txt\",\n provider=provider, verbose=False),\n CoderAgent(name=\"coder\",\n prompt_path=f\"prompts/{personality_folder}/coder_agent.txt\",\n provider=provider, verbose=False),\n FileAgent(name=\"File Agent\",\n prompt_path=f\"prompts/{personality_folder}/file_agent.txt\",\n provider=provider, verbose=False),\n BrowserAgent(name=\"Browser\",\n prompt_path=f\"prompts/{personality_folder}/browser_agent.txt\",\n provider=provider, verbose=False, browser=browser),\n PlannerAgent(name=\"Planner\",\n prompt_path=f\"prompts/{personality_folder}/planner_agent.txt\",\n provider=provider, verbose=False, browser=browser),\n #McpAgent(name=\"MCP Agent\",\n # prompt_path=f\"prompts/{personality_folder}/mcp_agent.txt\",\n # provider=provider, verbose=False), # NOTE under development\n ]\n\n interaction = Interaction(agents,\n tts_enabled=config.getboolean('MAIN', 'speak'),\n stt_enabled=config.getboolean('MAIN', 'listen'),\n recover_last_session=config.getboolean('MAIN', 'recover_last_session'),\n langs=languages\n )\n try:\n while interaction.is_active:\n interaction.get_user()\n if await interaction.think():\n interaction.show_answer()\n interaction.speak_answer()\n except Exception as e:\n if config.getboolean('MAIN', 'save_session'):\n interaction.save_session()\n raise e\n finally:\n if config.getboolean('MAIN', 'save_session'):\n interaction.save_session()", "creation_date": "2025-04-16T13:11:41Z", "repo": "Fosowl/agenticSeek", "file_path": "cli.py", "stars": 20012, "label": 0} +{"function": "def init_omni_lmm(model_path):\n torch.backends.cuda.matmul.allow_tf32 = True\n disable_torch_init()\n model_name = os.path.expanduser(model_path)\n print(f'Load omni_lmm model and tokenizer from {model_name}')\n tokenizer = AutoTokenizer.from_pretrained(\n model_name, model_max_length=2048)\n\n if False:\n # model on multiple devices for small size gpu memory (Nvidia 3090 24G x2) \n with init_empty_weights():\n model = OmniLMMForCausalLM.from_pretrained(model_name, tune_clip=True, torch_dtype=torch.bfloat16)\n model = load_checkpoint_and_dispatch(model, model_name, dtype=torch.bfloat16, \n device_map=\"auto\", no_split_module_classes=['Eva','MistralDecoderLayer', 'ModuleList', 'Resampler']\n )\n else:\n model = OmniLMMForCausalLM.from_pretrained(\n model_name, tune_clip=True, torch_dtype=torch.bfloat16\n ).to(device='cuda', dtype=torch.bfloat16)\n\n image_processor = build_transform(\n is_train=False, input_size=model.model.config.image_size, std_mode='OPENAI_CLIP')\n\n mm_use_im_start_end = getattr(model.config, \"mm_use_im_start_end\", False)\n assert mm_use_im_start_end\n\n tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN, DEFAULT_IM_START_TOKEN,\n DEFAULT_IM_END_TOKEN], special_tokens=True)\n\n\n vision_config = model.model.vision_config\n vision_config.im_patch_token = tokenizer.convert_tokens_to_ids(\n [DEFAULT_IMAGE_PATCH_TOKEN])[0]\n vision_config.use_im_start_end = mm_use_im_start_end\n vision_config.im_start_token, vision_config.im_end_token = tokenizer.convert_tokens_to_ids(\n [DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN])\n image_token_len = model.model.config.num_query\n\n return model, image_processor, image_token_len, tokenizer", "creation_date": "2024-02-01T06:45:00Z", "repo": "OpenBMB/MiniCPM-o", "file_path": "chat.py", "stars": 19806, "label": 0} +{"function": "def expand_question_into_multimodal(question_text, image_token_len, im_st_token, im_ed_token, im_patch_token):\n if '' in question_text[0]['content']:\n question_text[0]['content'] = question_text[0]['content'].replace(\n '', im_st_token + im_patch_token * image_token_len + im_ed_token)\n else:\n question_text[0]['content'] = im_st_token + im_patch_token * \\\n image_token_len + im_ed_token + '\\n' + question_text[0]['content']\n return question_text", "creation_date": "2024-02-01T06:45:00Z", "repo": "OpenBMB/MiniCPM-o", "file_path": "chat.py", "stars": 19806, "label": 0} +{"function": "def wrap_question_for_omni_lmm(question, image_token_len, tokenizer):\n question = expand_question_into_multimodal(\n question, image_token_len, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN, DEFAULT_IMAGE_PATCH_TOKEN)\n\n conversation = question\n data_dict = omni_preprocess(sources=[conversation],\n tokenizer=tokenizer,\n generation=True)\n\n data_dict = dict(input_ids=data_dict[\"input_ids\"][0],\n labels=data_dict[\"labels\"][0])\n return data_dict", "creation_date": "2024-02-01T06:45:00Z", "repo": "OpenBMB/MiniCPM-o", "file_path": "chat.py", "stars": 19806, "label": 0} +{"function": "def img2base64(file_name):\n with open(file_name, 'rb') as f:\n encoded_string = base64.b64encode(f.read())\n return encoded_string", "creation_date": "2024-02-01T06:45:00Z", "repo": "OpenBMB/MiniCPM-o", "file_path": "chat.py", "stars": 19806, "label": 0} +{"function": " def __init__(self, model_path) -> None:\n model, img_processor, image_token_len, tokenizer = init_omni_lmm(model_path)\n self.model = model\n self.image_token_len = image_token_len\n self.image_transform = img_processor\n self.tokenizer = tokenizer\n self.model.eval()", "creation_date": "2024-02-01T06:45:00Z", "repo": "OpenBMB/MiniCPM-o", "file_path": "chat.py", "stars": 19806, "label": 0} +{"function": " def decode(self, image, input_ids):\n with torch.inference_mode():\n output = self.model.generate_vllm(\n input_ids=input_ids.unsqueeze(0).cuda(),\n images=image.unsqueeze(0).half().cuda(),\n temperature=0.6,\n max_new_tokens=1024,\n # num_beams=num_beams,\n do_sample=True,\n output_scores=True,\n return_dict_in_generate=True,\n repetition_penalty=1.1,\n top_k=30,\n top_p=0.9,\n )\n\n response = self.tokenizer.decode(\n output.sequences[0], skip_special_tokens=True)\n response = response.strip()\n return response", "creation_date": "2024-02-01T06:45:00Z", "repo": "OpenBMB/MiniCPM-o", "file_path": "chat.py", "stars": 19806, "label": 0} +{"function": " def chat(self, input):\n try:\n image = Image.open(io.BytesIO(base64.b64decode(input['image']))).convert('RGB')\n except Exception as e:\n return \"Image decode error\"\n\n msgs = json.loads(input['question'])\n input_ids = wrap_question_for_omni_lmm(\n msgs, self.image_token_len, self.tokenizer)['input_ids']\n input_ids = torch.as_tensor(input_ids)\n #print('input_ids', input_ids)\n image = self.image_transform(image)\n\n out = self.decode(image, input_ids)\n\n return out", "creation_date": "2024-02-01T06:45:00Z", "repo": "OpenBMB/MiniCPM-o", "file_path": "chat.py", "stars": 19806, "label": 0} +{"function": " def __init__(self, model_path) -> None:\n self.model = AutoModel.from_pretrained(model_path, trust_remote_code=True).to(dtype=torch.bfloat16)\n self.tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)\n self.model.eval().cuda()", "creation_date": "2024-02-01T06:45:00Z", "repo": "OpenBMB/MiniCPM-o", "file_path": "chat.py", "stars": 19806, "label": 0} +{"function": " def chat(self, input):\n try:\n image = Image.open(io.BytesIO(base64.b64decode(input['image']))).convert('RGB')\n except Exception as e:\n return \"Image decode error\"\n\n msgs = json.loads(input['question'])\n \n answer, context, _ = self.model.chat(\n image=image,\n msgs=msgs,\n context=None,\n tokenizer=self.tokenizer,\n sampling=True,\n temperature=0.7\n \t)\n return answer", "creation_date": "2024-02-01T06:45:00Z", "repo": "OpenBMB/MiniCPM-o", "file_path": "chat.py", "stars": 19806, "label": 0} +{"function": " def __init__(self, model_path) -> None:\n self.model = AutoModel.from_pretrained(model_path, trust_remote_code=True).to(dtype=torch.float16)\n self.tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)\n self.model.eval().cuda()", "creation_date": "2024-02-01T06:45:00Z", "repo": "OpenBMB/MiniCPM-o", "file_path": "chat.py", "stars": 19806, "label": 0} +{"function": "def test_connect(data):\n print(\"Socket connected :: \", data)\n emit_agent(\"socket_response\", {\"data\": \"Server Connected\"})", "creation_date": "2024-03-21T03:08:45Z", "repo": "stitionai/devika", "file_path": "devika.py", "stars": 19381, "label": 0} +{"function": "def data():\n project = manager.get_project_list()\n models = LLM().list_models()\n search_engines = [\"Bing\", \"Google\", \"DuckDuckGo\"]\n return jsonify({\"projects\": project, \"models\": models, \"search_engines\": search_engines})", "creation_date": "2024-03-21T03:08:45Z", "repo": "stitionai/devika", "file_path": "devika.py", "stars": 19381, "label": 0} +{"function": "def get_messages():\n data = request.json\n project_name = data.get(\"project_name\")\n messages = manager.get_messages(project_name)\n return jsonify({\"messages\": messages})", "creation_date": "2024-03-21T03:08:45Z", "repo": "stitionai/devika", "file_path": "devika.py", "stars": 19381, "label": 0} +{"function": "def handle_message(data):\n logger.info(f\"User message: {data}\")\n message = data.get('message')\n base_model = data.get('base_model')\n project_name = data.get('project_name')\n search_engine = data.get('search_engine').lower()\n\n agent = Agent(base_model=base_model, search_engine=search_engine)\n\n state = AgentState.get_latest_state(project_name)\n if not state:\n thread = Thread(target=lambda: agent.execute(message, project_name))\n thread.start()\n else:\n if AgentState.is_agent_completed(project_name):\n thread = Thread(target=lambda: agent.subsequent_execute(message, project_name))\n thread.start()\n else:\n emit_agent(\"info\", {\"type\": \"warning\", \"message\": \"previous agent doesn't completed it's task.\"})\n last_state = AgentState.get_latest_state(project_name)\n if last_state[\"agent_is_active\"] or not last_state[\"completed\"]:\n thread = Thread(target=lambda: agent.execute(message, project_name))\n thread.start()\n else:\n thread = Thread(target=lambda: agent.subsequent_execute(message, project_name))\n thread.start()", "creation_date": "2024-03-21T03:08:45Z", "repo": "stitionai/devika", "file_path": "devika.py", "stars": 19381, "label": 0} +{"function": "def is_agent_active():\n data = request.json\n project_name = data.get(\"project_name\")\n is_active = AgentState.is_agent_active(project_name)\n return jsonify({\"is_active\": is_active})", "creation_date": "2024-03-21T03:08:45Z", "repo": "stitionai/devika", "file_path": "devika.py", "stars": 19381, "label": 0} +{"function": "def get_agent_state():\n data = request.json\n project_name = data.get(\"project_name\")\n agent_state = AgentState.get_latest_state(project_name)\n return jsonify({\"state\": agent_state})", "creation_date": "2024-03-21T03:08:45Z", "repo": "stitionai/devika", "file_path": "devika.py", "stars": 19381, "label": 0} +{"function": "def browser_snapshot():\n snapshot_path = request.args.get(\"snapshot_path\")\n return send_file(snapshot_path, as_attachment=True)", "creation_date": "2024-03-21T03:08:45Z", "repo": "stitionai/devika", "file_path": "devika.py", "stars": 19381, "label": 0} +{"function": "def get_browser_session():\n project_name = request.args.get(\"project_name\")\n agent_state = AgentState.get_latest_state(project_name)\n if not agent_state:\n return jsonify({\"session\": None})\n else:\n browser_session = agent_state[\"browser_session\"]\n return jsonify({\"session\": browser_session})", "creation_date": "2024-03-21T03:08:45Z", "repo": "stitionai/devika", "file_path": "devika.py", "stars": 19381, "label": 0} +{"function": "def get_terminal_session():\n project_name = request.args.get(\"project_name\")\n agent_state = AgentState.get_latest_state(project_name)\n if not agent_state:\n return jsonify({\"terminal_state\": None})\n else:\n terminal_state = agent_state[\"terminal_session\"]\n return jsonify({\"terminal_state\": terminal_state})", "creation_date": "2024-03-21T03:08:45Z", "repo": "stitionai/devika", "file_path": "devika.py", "stars": 19381, "label": 0} +{"function": "def run_code():\n data = request.json\n project_name = data.get(\"project_name\")\n code = data.get(\"code\")\n # TODO: Implement code execution logic\n return jsonify({\"message\": \"Code execution started\"})", "creation_date": "2024-03-21T03:08:45Z", "repo": "stitionai/devika", "file_path": "devika.py", "stars": 19381, "label": 0} +{"function": "async def mock_embedding_func(texts):\n return np.random.rand(len(texts), 10) # \u8fd4\u56de10\u7ef4\u968f\u673a\u5411\u91cf", "creation_date": "2025-04-03T19:40:46Z", "repo": "HKUDS/LightRAG", "file_path": "tests/test_graph_storage.py", "stars": 18349, "label": 0} +{"function": "def check_env_file():\n \"\"\"\n \u68c0\u67e5.env\u6587\u4ef6\u662f\u5426\u5b58\u5728\uff0c\u5982\u679c\u4e0d\u5b58\u5728\u5219\u53d1\u51fa\u8b66\u544a\n \u8fd4\u56deTrue\u8868\u793a\u5e94\u8be5\u7ee7\u7eed\u6267\u884c\uff0cFalse\u8868\u793a\u5e94\u8be5\u9000\u51fa\n \"\"\"\n if not os.path.exists(\".env\"):\n warning_msg = \"\u8b66\u544a: \u5f53\u524d\u76ee\u5f55\u4e2d\u6ca1\u6709\u627e\u5230.env\u6587\u4ef6\uff0c\u8fd9\u53ef\u80fd\u4f1a\u5f71\u54cd\u5b58\u50a8\u914d\u7f6e\u7684\u52a0\u8f7d\u3002\"\n ASCIIColors.yellow(warning_msg)\n\n # \u68c0\u67e5\u662f\u5426\u5728\u4ea4\u4e92\u5f0f\u7ec8\u7aef\u4e2d\u8fd0\u884c\n if sys.stdin.isatty():\n response = input(\"\u662f\u5426\u7ee7\u7eed\u6267\u884c? (yes/no): \")\n if response.lower() != \"yes\":\n ASCIIColors.red(\"\u6d4b\u8bd5\u7a0b\u5e8f\u5df2\u53d6\u6d88\")\n return False\n return True", "creation_date": "2025-04-03T19:40:46Z", "repo": "HKUDS/LightRAG", "file_path": "tests/test_graph_storage.py", "stars": 18349, "label": 0} +{"function": "async def initialize_graph_storage():\n \"\"\"\n \u6839\u636e\u73af\u5883\u53d8\u91cf\u521d\u59cb\u5316\u76f8\u5e94\u7684\u56fe\u5b58\u50a8\u5b9e\u4f8b\n \u8fd4\u56de\u521d\u59cb\u5316\u7684\u5b58\u50a8\u5b9e\u4f8b\n \"\"\"\n # \u4ece\u73af\u5883\u53d8\u91cf\u4e2d\u83b7\u53d6\u56fe\u5b58\u50a8\u7c7b\u578b\n graph_storage_type = os.getenv(\"LIGHTRAG_GRAPH_STORAGE\", \"NetworkXStorage\")\n\n # \u9a8c\u8bc1\u5b58\u50a8\u7c7b\u578b\u662f\u5426\u6709\u6548\n try:\n verify_storage_implementation(\"GRAPH_STORAGE\", graph_storage_type)\n except ValueError as e:\n ASCIIColors.red(f\"\u9519\u8bef: {str(e)}\")\n ASCIIColors.yellow(\n f\"\u652f\u6301\u7684\u56fe\u5b58\u50a8\u7c7b\u578b: {', '.join(STORAGE_IMPLEMENTATIONS['GRAPH_STORAGE']['implementations'])}\"\n )\n return None\n\n # \u68c0\u67e5\u6240\u9700\u7684\u73af\u5883\u53d8\u91cf\n required_env_vars = STORAGE_ENV_REQUIREMENTS.get(graph_storage_type, [])\n missing_env_vars = [var for var in required_env_vars if not os.getenv(var)]\n\n if missing_env_vars:\n ASCIIColors.red(\n f\"\u9519\u8bef: {graph_storage_type} \u9700\u8981\u4ee5\u4e0b\u73af\u5883\u53d8\u91cf\uff0c\u4f46\u672a\u8bbe\u7f6e: {', '.join(missing_env_vars)}\"\n )\n return None\n\n # \u52a8\u6001\u5bfc\u5165\u76f8\u5e94\u7684\u6a21\u5757\n module_path = STORAGES.get(graph_storage_type)\n if not module_path:\n ASCIIColors.red(f\"\u9519\u8bef: \u672a\u627e\u5230 {graph_storage_type} \u7684\u6a21\u5757\u8def\u5f84\")\n return None\n\n try:\n module = importlib.import_module(module_path, package=\"lightrag\")\n storage_class = getattr(module, graph_storage_type)\n except (ImportError, AttributeError) as e:\n ASCIIColors.red(f\"\u9519\u8bef: \u5bfc\u5165 {graph_storage_type} \u5931\u8d25: {str(e)}\")\n return None\n\n # \u521d\u59cb\u5316\u5b58\u50a8\u5b9e\u4f8b\n global_config = {\n \"embedding_batch_num\": 10, # \u6279\u5904\u7406\u5927\u5c0f\n \"vector_db_storage_cls_kwargs\": {\n \"cosine_better_than_threshold\": 0.5 # \u4f59\u5f26\u76f8\u4f3c\u5ea6\u9608\u503c\n },\n \"working_dir\": os.environ.get(\"WORKING_DIR\", \"./rag_storage\"), # \u5de5\u4f5c\u76ee\u5f55\n }\n\n # \u5982\u679c\u4f7f\u7528 NetworkXStorage\uff0c\u9700\u8981\u5148\u521d\u59cb\u5316 shared_storage\n if graph_storage_type == \"NetworkXStorage\":\n initialize_share_data() # \u4f7f\u7528\u5355\u8fdb\u7a0b\u6a21\u5f0f\n\n try:\n storage = storage_class(\n namespace=\"test_graph\",\n global_config=global_config,\n embedding_func=mock_embedding_func,\n )\n\n # \u521d\u59cb\u5316\u8fde\u63a5\n await storage.initialize()\n return storage\n except Exception as e:\n ASCIIColors.red(f\"\u9519\u8bef: \u521d\u59cb\u5316 {graph_storage_type} \u5931\u8d25: {str(e)}\")\n return None", "creation_date": "2025-04-03T19:40:46Z", "repo": "HKUDS/LightRAG", "file_path": "tests/test_graph_storage.py", "stars": 18349, "label": 0} +{"function": "async def test_graph_basic(storage):\n \"\"\"\n \u6d4b\u8bd5\u56fe\u6570\u636e\u5e93\u7684\u57fa\u672c\u64cd\u4f5c:\n 1. \u4f7f\u7528 upsert_node \u63d2\u5165\u4e24\u4e2a\u8282\u70b9\n 2. \u4f7f\u7528 upsert_edge \u63d2\u5165\u4e00\u6761\u8fde\u63a5\u4e24\u4e2a\u8282\u70b9\u7684\u8fb9\n 3. \u4f7f\u7528 get_node \u8bfb\u53d6\u4e00\u4e2a\u8282\u70b9\n 4. \u4f7f\u7528 get_edge \u8bfb\u53d6\u4e00\u6761\u8fb9\n \"\"\"\n try:\n # 1. \u63d2\u5165\u7b2c\u4e00\u4e2a\u8282\u70b9\n node1_id = \"\u4eba\u5de5\u667a\u80fd\"\n node1_data = {\n \"entity_id\": node1_id,\n \"description\": \"\u4eba\u5de5\u667a\u80fd\u662f\u8ba1\u7b97\u673a\u79d1\u5b66\u7684\u4e00\u4e2a\u5206\u652f\uff0c\u5b83\u4f01\u56fe\u4e86\u89e3\u667a\u80fd\u7684\u5b9e\u8d28\uff0c\u5e76\u751f\u4ea7\u51fa\u4e00\u79cd\u65b0\u7684\u80fd\u4ee5\u4eba\u7c7b\u667a\u80fd\u76f8\u4f3c\u7684\u65b9\u5f0f\u505a\u51fa\u53cd\u5e94\u7684\u667a\u80fd\u673a\u5668\u3002\",\n \"keywords\": \"AI,\u673a\u5668\u5b66\u4e60,\u6df1\u5ea6\u5b66\u4e60\",\n \"entity_type\": \"\u6280\u672f\u9886\u57df\",\n }\n print(f\"\u63d2\u5165\u8282\u70b91: {node1_id}\")\n await storage.upsert_node(node1_id, node1_data)\n\n # 2. \u63d2\u5165\u7b2c\u4e8c\u4e2a\u8282\u70b9\n node2_id = \"\u673a\u5668\u5b66\u4e60\"\n node2_data = {\n \"entity_id\": node2_id,\n \"description\": \"\u673a\u5668\u5b66\u4e60\u662f\u4eba\u5de5\u667a\u80fd\u7684\u4e00\u4e2a\u5206\u652f\uff0c\u5b83\u4f7f\u7528\u7edf\u8ba1\u5b66\u65b9\u6cd5\u8ba9\u8ba1\u7b97\u673a\u7cfb\u7edf\u5728\u4e0d\u88ab\u660e\u786e\u7f16\u7a0b\u7684\u60c5\u51b5\u4e0b\u4e5f\u80fd\u591f\u5b66\u4e60\u3002\",\n \"keywords\": \"\u76d1\u7763\u5b66\u4e60,\u65e0\u76d1\u7763\u5b66\u4e60,\u5f3a\u5316\u5b66\u4e60\",\n \"entity_type\": \"\u6280\u672f\u9886\u57df\",\n }\n print(f\"\u63d2\u5165\u8282\u70b92: {node2_id}\")\n await storage.upsert_node(node2_id, node2_data)\n\n # 3. \u63d2\u5165\u8fde\u63a5\u8fb9\n edge_data = {\n \"relationship\": \"\u5305\u542b\",\n \"weight\": 1.0,\n \"description\": \"\u4eba\u5de5\u667a\u80fd\u9886\u57df\u5305\u542b\u673a\u5668\u5b66\u4e60\u8fd9\u4e2a\u5b50\u9886\u57df\",\n }\n print(f\"\u63d2\u5165\u8fb9: {node1_id} -> {node2_id}\")\n await storage.upsert_edge(node1_id, node2_id, edge_data)\n\n # 4. \u8bfb\u53d6\u8282\u70b9\u5c5e\u6027\n print(f\"\u8bfb\u53d6\u8282\u70b9\u5c5e\u6027: {node1_id}\")\n node1_props = await storage.get_node(node1_id)\n if node1_props:\n print(f\"\u6210\u529f\u8bfb\u53d6\u8282\u70b9\u5c5e\u6027: {node1_id}\")\n print(f\"\u8282\u70b9\u63cf\u8ff0: {node1_props.get('description', '\u65e0\u63cf\u8ff0')}\")\n print(f\"\u8282\u70b9\u7c7b\u578b: {node1_props.get('entity_type', '\u65e0\u7c7b\u578b')}\")\n print(f\"\u8282\u70b9\u5173\u952e\u8bcd: {node1_props.get('keywords', '\u65e0\u5173\u952e\u8bcd')}\")\n # \u9a8c\u8bc1\u8fd4\u56de\u7684\u5c5e\u6027\u662f\u5426\u6b63\u786e\n assert (\n node1_props.get(\"entity_id\") == node1_id\n ), f\"\u8282\u70b9ID\u4e0d\u5339\u914d: \u671f\u671b {node1_id}, \u5b9e\u9645 {node1_props.get('entity_id')}\"\n assert (\n node1_props.get(\"description\") == node1_data[\"description\"]\n ), \"\u8282\u70b9\u63cf\u8ff0\u4e0d\u5339\u914d\"\n assert (\n node1_props.get(\"entity_type\") == node1_data[\"entity_type\"]\n ), \"\u8282\u70b9\u7c7b\u578b\u4e0d\u5339\u914d\"\n else:\n print(f\"\u8bfb\u53d6\u8282\u70b9\u5c5e\u6027\u5931\u8d25: {node1_id}\")\n assert False, f\"\u672a\u80fd\u8bfb\u53d6\u8282\u70b9\u5c5e\u6027: {node1_id}\"\n\n # 5. \u8bfb\u53d6\u8fb9\u5c5e\u6027\n print(f\"\u8bfb\u53d6\u8fb9\u5c5e\u6027: {node1_id} -> {node2_id}\")\n edge_props = await storage.get_edge(node1_id, node2_id)\n if edge_props:\n print(f\"\u6210\u529f\u8bfb\u53d6\u8fb9\u5c5e\u6027: {node1_id} -> {node2_id}\")\n print(f\"\u8fb9\u5173\u7cfb: {edge_props.get('relationship', '\u65e0\u5173\u7cfb')}\")\n print(f\"\u8fb9\u63cf\u8ff0: {edge_props.get('description', '\u65e0\u63cf\u8ff0')}\")\n print(f\"\u8fb9\u6743\u91cd: {edge_props.get('weight', '\u65e0\u6743\u91cd')}\")\n # \u9a8c\u8bc1\u8fd4\u56de\u7684\u5c5e\u6027\u662f\u5426\u6b63\u786e\n assert (\n edge_props.get(\"relationship\") == edge_data[\"relationship\"]\n ), \"\u8fb9\u5173\u7cfb\u4e0d\u5339\u914d\"\n assert (\n edge_props.get(\"description\") == edge_data[\"description\"]\n ), \"\u8fb9\u63cf\u8ff0\u4e0d\u5339\u914d\"\n assert edge_props.get(\"weight\") == edge_data[\"weight\"], \"\u8fb9\u6743\u91cd\u4e0d\u5339\u914d\"\n else:\n print(f\"\u8bfb\u53d6\u8fb9\u5c5e\u6027\u5931\u8d25: {node1_id} -> {node2_id}\")\n assert False, f\"\u672a\u80fd\u8bfb\u53d6\u8fb9\u5c5e\u6027: {node1_id} -> {node2_id}\"\n\n # 5.1 \u9a8c\u8bc1\u65e0\u5411\u56fe\u7279\u6027 - \u8bfb\u53d6\u53cd\u5411\u8fb9\u5c5e\u6027\n print(f\"\u8bfb\u53d6\u53cd\u5411\u8fb9\u5c5e\u6027: {node2_id} -> {node1_id}\")\n reverse_edge_props = await storage.get_edge(node2_id, node1_id)\n if reverse_edge_props:\n print(f\"\u6210\u529f\u8bfb\u53d6\u53cd\u5411\u8fb9\u5c5e\u6027: {node2_id} -> {node1_id}\")\n print(f\"\u53cd\u5411\u8fb9\u5173\u7cfb: {reverse_edge_props.get('relationship', '\u65e0\u5173\u7cfb')}\")\n print(f\"\u53cd\u5411\u8fb9\u63cf\u8ff0: {reverse_edge_props.get('description', '\u65e0\u63cf\u8ff0')}\")\n print(f\"\u53cd\u5411\u8fb9\u6743\u91cd: {reverse_edge_props.get('weight', '\u65e0\u6743\u91cd')}\")\n # \u9a8c\u8bc1\u6b63\u5411\u548c\u53cd\u5411\u8fb9\u5c5e\u6027\u662f\u5426\u76f8\u540c\n assert (\n edge_props == reverse_edge_props\n ), \"\u6b63\u5411\u548c\u53cd\u5411\u8fb9\u5c5e\u6027\u4e0d\u4e00\u81f4\uff0c\u65e0\u5411\u56fe\u7279\u6027\u9a8c\u8bc1\u5931\u8d25\"\n print(\"\u65e0\u5411\u56fe\u7279\u6027\u9a8c\u8bc1\u6210\u529f\uff1a\u6b63\u5411\u548c\u53cd\u5411\u8fb9\u5c5e\u6027\u4e00\u81f4\")\n else:\n print(f\"\u8bfb\u53d6\u53cd\u5411\u8fb9\u5c5e\u6027\u5931\u8d25: {node2_id} -> {node1_id}\")\n assert (\n False\n ), f\"\u672a\u80fd\u8bfb\u53d6\u53cd\u5411\u8fb9\u5c5e\u6027: {node2_id} -> {node1_id}\uff0c\u65e0\u5411\u56fe\u7279\u6027\u9a8c\u8bc1\u5931\u8d25\"\n\n print(\"\u57fa\u672c\u6d4b\u8bd5\u5b8c\u6210\uff0c\u6570\u636e\u5df2\u4fdd\u7559\u5728\u6570\u636e\u5e93\u4e2d\")\n return True\n\n except Exception as e:\n ASCIIColors.red(f\"\u6d4b\u8bd5\u8fc7\u7a0b\u4e2d\u53d1\u751f\u9519\u8bef: {str(e)}\")\n return False", "creation_date": "2025-04-03T19:40:46Z", "repo": "HKUDS/LightRAG", "file_path": "tests/test_graph_storage.py", "stars": 18349, "label": 0} +{"function": "async def test_graph_advanced(storage):\n \"\"\"\n \u6d4b\u8bd5\u56fe\u6570\u636e\u5e93\u7684\u9ad8\u7ea7\u64cd\u4f5c:\n 1. \u4f7f\u7528 node_degree \u83b7\u53d6\u8282\u70b9\u7684\u5ea6\u6570\n 2. \u4f7f\u7528 edge_degree \u83b7\u53d6\u8fb9\u7684\u5ea6\u6570\n 3. \u4f7f\u7528 get_node_edges \u83b7\u53d6\u8282\u70b9\u7684\u6240\u6709\u8fb9\n 4. \u4f7f\u7528 get_all_labels \u83b7\u53d6\u6240\u6709\u6807\u7b7e\n 5. \u4f7f\u7528 get_knowledge_graph \u83b7\u53d6\u77e5\u8bc6\u56fe\u8c31\n 6. \u4f7f\u7528 delete_node \u5220\u9664\u8282\u70b9\n 7. \u4f7f\u7528 remove_nodes \u6279\u91cf\u5220\u9664\u8282\u70b9\n 8. \u4f7f\u7528 remove_edges \u5220\u9664\u8fb9\n 9. \u4f7f\u7528 drop \u6e05\u7406\u6570\u636e\n \"\"\"\n try:\n # 1. \u63d2\u5165\u6d4b\u8bd5\u6570\u636e\n # \u63d2\u5165\u8282\u70b91: \u4eba\u5de5\u667a\u80fd\n node1_id = \"\u4eba\u5de5\u667a\u80fd\"\n node1_data = {\n \"entity_id\": node1_id,\n \"description\": \"\u4eba\u5de5\u667a\u80fd\u662f\u8ba1\u7b97\u673a\u79d1\u5b66\u7684\u4e00\u4e2a\u5206\u652f\uff0c\u5b83\u4f01\u56fe\u4e86\u89e3\u667a\u80fd\u7684\u5b9e\u8d28\uff0c\u5e76\u751f\u4ea7\u51fa\u4e00\u79cd\u65b0\u7684\u80fd\u4ee5\u4eba\u7c7b\u667a\u80fd\u76f8\u4f3c\u7684\u65b9\u5f0f\u505a\u51fa\u53cd\u5e94\u7684\u667a\u80fd\u673a\u5668\u3002\",\n \"keywords\": \"AI,\u673a\u5668\u5b66\u4e60,\u6df1\u5ea6\u5b66\u4e60\",\n \"entity_type\": \"\u6280\u672f\u9886\u57df\",\n }\n print(f\"\u63d2\u5165\u8282\u70b91: {node1_id}\")\n await storage.upsert_node(node1_id, node1_data)\n\n # \u63d2\u5165\u8282\u70b92: \u673a\u5668\u5b66\u4e60\n node2_id = \"\u673a\u5668\u5b66\u4e60\"\n node2_data = {\n \"entity_id\": node2_id,\n \"description\": \"\u673a\u5668\u5b66\u4e60\u662f\u4eba\u5de5\u667a\u80fd\u7684\u4e00\u4e2a\u5206\u652f\uff0c\u5b83\u4f7f\u7528\u7edf\u8ba1\u5b66\u65b9\u6cd5\u8ba9\u8ba1\u7b97\u673a\u7cfb\u7edf\u5728\u4e0d\u88ab\u660e\u786e\u7f16\u7a0b\u7684\u60c5\u51b5\u4e0b\u4e5f\u80fd\u591f\u5b66\u4e60\u3002\",\n \"keywords\": \"\u76d1\u7763\u5b66\u4e60,\u65e0\u76d1\u7763\u5b66\u4e60,\u5f3a\u5316\u5b66\u4e60\",\n \"entity_type\": \"\u6280\u672f\u9886\u57df\",\n }\n print(f\"\u63d2\u5165\u8282\u70b92: {node2_id}\")\n await storage.upsert_node(node2_id, node2_data)\n\n # \u63d2\u5165\u8282\u70b93: \u6df1\u5ea6\u5b66\u4e60\n node3_id = \"\u6df1\u5ea6\u5b66\u4e60\"\n node3_data = {\n \"entity_id\": node3_id,\n \"description\": \"\u6df1\u5ea6\u5b66\u4e60\u662f\u673a\u5668\u5b66\u4e60\u7684\u4e00\u4e2a\u5206\u652f\uff0c\u5b83\u4f7f\u7528\u591a\u5c42\u795e\u7ecf\u7f51\u7edc\u6765\u6a21\u62df\u4eba\u8111\u7684\u5b66\u4e60\u8fc7\u7a0b\u3002\",\n \"keywords\": \"\u795e\u7ecf\u7f51\u7edc,CNN,RNN\",\n \"entity_type\": \"\u6280\u672f\u9886\u57df\",\n }\n print(f\"\u63d2\u5165\u8282\u70b93: {node3_id}\")\n await storage.upsert_node(node3_id, node3_data)\n\n # \u63d2\u5165\u8fb91: \u4eba\u5de5\u667a\u80fd -> \u673a\u5668\u5b66\u4e60\n edge1_data = {\n \"relationship\": \"\u5305\u542b\",\n \"weight\": 1.0,\n \"description\": \"\u4eba\u5de5\u667a\u80fd\u9886\u57df\u5305\u542b\u673a\u5668\u5b66\u4e60\u8fd9\u4e2a\u5b50\u9886\u57df\",\n }\n print(f\"\u63d2\u5165\u8fb91: {node1_id} -> {node2_id}\")\n await storage.upsert_edge(node1_id, node2_id, edge1_data)\n\n # \u63d2\u5165\u8fb92: \u673a\u5668\u5b66\u4e60 -> \u6df1\u5ea6\u5b66\u4e60\n edge2_data = {\n \"relationship\": \"\u5305\u542b\",\n \"weight\": 1.0,\n \"description\": \"\u673a\u5668\u5b66\u4e60\u9886\u57df\u5305\u542b\u6df1\u5ea6\u5b66\u4e60\u8fd9\u4e2a\u5b50\u9886\u57df\",\n }\n print(f\"\u63d2\u5165\u8fb92: {node2_id} -> {node3_id}\")\n await storage.upsert_edge(node2_id, node3_id, edge2_data)\n\n # 2. \u6d4b\u8bd5 node_degree - \u83b7\u53d6\u8282\u70b9\u7684\u5ea6\u6570\n print(f\"== \u6d4b\u8bd5 node_degree: {node1_id}\")\n node1_degree = await storage.node_degree(node1_id)\n print(f\"\u8282\u70b9 {node1_id} \u7684\u5ea6\u6570: {node1_degree}\")\n assert node1_degree == 1, f\"\u8282\u70b9 {node1_id} \u7684\u5ea6\u6570\u5e94\u4e3a1\uff0c\u5b9e\u9645\u4e3a {node1_degree}\"\n\n # 2.1 \u6d4b\u8bd5\u6240\u6709\u8282\u70b9\u7684\u5ea6\u6570\n print(\"== \u6d4b\u8bd5\u6240\u6709\u8282\u70b9\u7684\u5ea6\u6570\")\n node2_degree = await storage.node_degree(node2_id)\n node3_degree = await storage.node_degree(node3_id)\n print(f\"\u8282\u70b9 {node2_id} \u7684\u5ea6\u6570: {node2_degree}\")\n print(f\"\u8282\u70b9 {node3_id} \u7684\u5ea6\u6570: {node3_degree}\")\n assert node2_degree == 2, f\"\u8282\u70b9 {node2_id} \u7684\u5ea6\u6570\u5e94\u4e3a2\uff0c\u5b9e\u9645\u4e3a {node2_degree}\"\n assert node3_degree == 1, f\"\u8282\u70b9 {node3_id} \u7684\u5ea6\u6570\u5e94\u4e3a1\uff0c\u5b9e\u9645\u4e3a {node3_degree}\"\n\n # 3. \u6d4b\u8bd5 edge_degree - \u83b7\u53d6\u8fb9\u7684\u5ea6\u6570\n print(f\"== \u6d4b\u8bd5 edge_degree: {node1_id} -> {node2_id}\")\n edge_degree = await storage.edge_degree(node1_id, node2_id)\n print(f\"\u8fb9 {node1_id} -> {node2_id} \u7684\u5ea6\u6570: {edge_degree}\")\n assert (\n edge_degree == 3\n ), f\"\u8fb9 {node1_id} -> {node2_id} \u7684\u5ea6\u6570\u5e94\u4e3a3\uff0c\u5b9e\u9645\u4e3a {edge_degree}\"\n\n # 3.1 \u6d4b\u8bd5\u53cd\u5411\u8fb9\u7684\u5ea6\u6570 - \u9a8c\u8bc1\u65e0\u5411\u56fe\u7279\u6027\n print(f\"== \u6d4b\u8bd5\u53cd\u5411\u8fb9\u7684\u5ea6\u6570: {node2_id} -> {node1_id}\")\n reverse_edge_degree = await storage.edge_degree(node2_id, node1_id)\n print(f\"\u53cd\u5411\u8fb9 {node2_id} -> {node1_id} \u7684\u5ea6\u6570: {reverse_edge_degree}\")\n assert (\n edge_degree == reverse_edge_degree\n ), \"\u6b63\u5411\u8fb9\u548c\u53cd\u5411\u8fb9\u7684\u5ea6\u6570\u4e0d\u4e00\u81f4\uff0c\u65e0\u5411\u56fe\u7279\u6027\u9a8c\u8bc1\u5931\u8d25\"\n print(\"\u65e0\u5411\u56fe\u7279\u6027\u9a8c\u8bc1\u6210\u529f\uff1a\u6b63\u5411\u8fb9\u548c\u53cd\u5411\u8fb9\u7684\u5ea6\u6570\u4e00\u81f4\")\n\n # 4. \u6d4b\u8bd5 get_node_edges - \u83b7\u53d6\u8282\u70b9\u7684\u6240\u6709\u8fb9\n print(f\"== \u6d4b\u8bd5 get_node_edges: {node2_id}\")\n node2_edges = await storage.get_node_edges(node2_id)\n print(f\"\u8282\u70b9 {node2_id} \u7684\u6240\u6709\u8fb9: {node2_edges}\")\n assert (\n len(node2_edges) == 2\n ), f\"\u8282\u70b9 {node2_id} \u5e94\u67092\u6761\u8fb9\uff0c\u5b9e\u9645\u6709 {len(node2_edges)}\"\n\n # 4.1 \u9a8c\u8bc1\u8282\u70b9\u8fb9\u7684\u65e0\u5411\u56fe\u7279\u6027\n print(\"== \u9a8c\u8bc1\u8282\u70b9\u8fb9\u7684\u65e0\u5411\u56fe\u7279\u6027\")\n # \u68c0\u67e5\u662f\u5426\u5305\u542b\u4e0enode1\u548cnode3\u7684\u8fde\u63a5\u5173\u7cfb\uff08\u65e0\u8bba\u65b9\u5411\uff09\n has_connection_with_node1 = False\n has_connection_with_node3 = False\n for edge in node2_edges:\n # \u68c0\u67e5\u662f\u5426\u6709\u4e0enode1\u7684\u8fde\u63a5\uff08\u65e0\u8bba\u65b9\u5411\uff09\n if (edge[0] == node1_id and edge[1] == node2_id) or (\n edge[0] == node2_id and edge[1] == node1_id\n ):\n has_connection_with_node1 = True\n # \u68c0\u67e5\u662f\u5426\u6709\u4e0enode3\u7684\u8fde\u63a5\uff08\u65e0\u8bba\u65b9\u5411\uff09\n if (edge[0] == node2_id and edge[1] == node3_id) or (\n edge[0] == node3_id and edge[1] == node2_id\n ):\n has_connection_with_node3 = True\n\n assert (\n has_connection_with_node1\n ), f\"\u8282\u70b9 {node2_id} \u7684\u8fb9\u5217\u8868\u4e2d\u5e94\u5305\u542b\u4e0e {node1_id} \u7684\u8fde\u63a5\"\n assert (\n has_connection_with_node3\n ), f\"\u8282\u70b9 {node2_id} \u7684\u8fb9\u5217\u8868\u4e2d\u5e94\u5305\u542b\u4e0e {node3_id} \u7684\u8fde\u63a5\"\n print(f\"\u65e0\u5411\u56fe\u7279\u6027\u9a8c\u8bc1\u6210\u529f\uff1a\u8282\u70b9 {node2_id} \u7684\u8fb9\u5217\u8868\u5305\u542b\u6240\u6709\u76f8\u5173\u7684\u8fb9\")\n\n # 5. \u6d4b\u8bd5 get_all_labels - \u83b7\u53d6\u6240\u6709\u6807\u7b7e\n print(\"== \u6d4b\u8bd5 get_all_labels\")\n all_labels = await storage.get_all_labels()\n print(f\"\u6240\u6709\u6807\u7b7e: {all_labels}\")\n assert len(all_labels) == 3, f\"\u5e94\u67093\u4e2a\u6807\u7b7e\uff0c\u5b9e\u9645\u6709 {len(all_labels)}\"\n assert node1_id in all_labels, f\"{node1_id} \u5e94\u5728\u6807\u7b7e\u5217\u8868\u4e2d\"\n assert node2_id in all_labels, f\"{node2_id} \u5e94\u5728\u6807\u7b7e\u5217\u8868\u4e2d\"\n assert node3_id in all_labels, f\"{node3_id} \u5e94\u5728\u6807\u7b7e\u5217\u8868\u4e2d\"\n\n # 6. \u6d4b\u8bd5 get_knowledge_graph - \u83b7\u53d6\u77e5\u8bc6\u56fe\u8c31\n print(\"== \u6d4b\u8bd5 get_knowledge_graph\")\n kg = await storage.get_knowledge_graph(\"*\", max_depth=2, max_nodes=10)\n print(f\"\u77e5\u8bc6\u56fe\u8c31\u8282\u70b9\u6570: {len(kg.nodes)}\")\n print(f\"\u77e5\u8bc6\u56fe\u8c31\u8fb9\u6570: {len(kg.edges)}\")\n assert isinstance(kg, KnowledgeGraph), \"\u8fd4\u56de\u7ed3\u679c\u5e94\u4e3a KnowledgeGraph \u7c7b\u578b\"\n assert len(kg.nodes) == 3, f\"\u77e5\u8bc6\u56fe\u8c31\u5e94\u67093\u4e2a\u8282\u70b9\uff0c\u5b9e\u9645\u6709 {len(kg.nodes)}\"\n assert len(kg.edges) == 2, f\"\u77e5\u8bc6\u56fe\u8c31\u5e94\u67092\u6761\u8fb9\uff0c\u5b9e\u9645\u6709 {len(kg.edges)}\"\n\n # 7. \u6d4b\u8bd5 delete_node - \u5220\u9664\u8282\u70b9\n print(f\"== \u6d4b\u8bd5 delete_node: {node3_id}\")\n await storage.delete_node(node3_id)\n node3_props = await storage.get_node(node3_id)\n print(f\"\u5220\u9664\u540e\u67e5\u8be2\u8282\u70b9\u5c5e\u6027 {node3_id}: {node3_props}\")\n assert node3_props is None, f\"\u8282\u70b9 {node3_id} \u5e94\u5df2\u88ab\u5220\u9664\"\n\n # \u91cd\u65b0\u63d2\u5165\u8282\u70b93\u7528\u4e8e\u540e\u7eed\u6d4b\u8bd5\n await storage.upsert_node(node3_id, node3_data)\n await storage.upsert_edge(node2_id, node3_id, edge2_data)\n\n # 8. \u6d4b\u8bd5 remove_edges - \u5220\u9664\u8fb9\n print(f\"== \u6d4b\u8bd5 remove_edges: {node2_id} -> {node3_id}\")\n await storage.remove_edges([(node2_id, node3_id)])\n edge_props = await storage.get_edge(node2_id, node3_id)\n print(f\"\u5220\u9664\u540e\u67e5\u8be2\u8fb9\u5c5e\u6027 {node2_id} -> {node3_id}: {edge_props}\")\n assert edge_props is None, f\"\u8fb9 {node2_id} -> {node3_id} \u5e94\u5df2\u88ab\u5220\u9664\"\n\n # 8.1 \u9a8c\u8bc1\u5220\u9664\u8fb9\u7684\u65e0\u5411\u56fe\u7279\u6027\n print(f\"== \u9a8c\u8bc1\u5220\u9664\u8fb9\u7684\u65e0\u5411\u56fe\u7279\u6027: {node3_id} -> {node2_id}\")\n reverse_edge_props = await storage.get_edge(node3_id, node2_id)\n print(f\"\u5220\u9664\u540e\u67e5\u8be2\u53cd\u5411\u8fb9\u5c5e\u6027 {node3_id} -> {node2_id}: {reverse_edge_props}\")\n assert (\n reverse_edge_props is None\n ), f\"\u53cd\u5411\u8fb9 {node3_id} -> {node2_id} \u4e5f\u5e94\u88ab\u5220\u9664\uff0c\u65e0\u5411\u56fe\u7279\u6027\u9a8c\u8bc1\u5931\u8d25\"\n print(\"\u65e0\u5411\u56fe\u7279\u6027\u9a8c\u8bc1\u6210\u529f\uff1a\u5220\u9664\u4e00\u4e2a\u65b9\u5411\u7684\u8fb9\u540e\uff0c\u53cd\u5411\u8fb9\u4e5f\u88ab\u5220\u9664\")\n\n # 9. \u6d4b\u8bd5 remove_nodes - \u6279\u91cf\u5220\u9664\u8282\u70b9\n print(f\"== \u6d4b\u8bd5 remove_nodes: [{node2_id}, {node3_id}]\")\n await storage.remove_nodes([node2_id, node3_id])\n node2_props = await storage.get_node(node2_id)\n node3_props = await storage.get_node(node3_id)\n print(f\"\u5220\u9664\u540e\u67e5\u8be2\u8282\u70b9\u5c5e\u6027 {node2_id}: {node2_props}\")\n print(f\"\u5220\u9664\u540e\u67e5\u8be2\u8282\u70b9\u5c5e\u6027 {node3_id}: {node3_props}\")\n assert node2_props is None, f\"\u8282\u70b9 {node2_id} \u5e94\u5df2\u88ab\u5220\u9664\"\n assert node3_props is None, f\"\u8282\u70b9 {node3_id} \u5e94\u5df2\u88ab\u5220\u9664\"\n\n print(\"\\n\u9ad8\u7ea7\u6d4b\u8bd5\u5b8c\u6210\")\n return True\n\n except Exception as e:\n ASCIIColors.red(f\"\u6d4b\u8bd5\u8fc7\u7a0b\u4e2d\u53d1\u751f\u9519\u8bef: {str(e)}\")\n return False", "creation_date": "2025-04-03T19:40:46Z", "repo": "HKUDS/LightRAG", "file_path": "tests/test_graph_storage.py", "stars": 18349, "label": 0} +{"function": "async def test_graph_batch_operations(storage):\n \"\"\"\n \u6d4b\u8bd5\u56fe\u6570\u636e\u5e93\u7684\u6279\u91cf\u64cd\u4f5c:\n 1. \u4f7f\u7528 get_nodes_batch \u6279\u91cf\u83b7\u53d6\u591a\u4e2a\u8282\u70b9\u7684\u5c5e\u6027\n 2. \u4f7f\u7528 node_degrees_batch \u6279\u91cf\u83b7\u53d6\u591a\u4e2a\u8282\u70b9\u7684\u5ea6\u6570\n 3. \u4f7f\u7528 edge_degrees_batch \u6279\u91cf\u83b7\u53d6\u591a\u4e2a\u8fb9\u7684\u5ea6\u6570\n 4. \u4f7f\u7528 get_edges_batch \u6279\u91cf\u83b7\u53d6\u591a\u4e2a\u8fb9\u7684\u5c5e\u6027\n 5. \u4f7f\u7528 get_nodes_edges_batch \u6279\u91cf\u83b7\u53d6\u591a\u4e2a\u8282\u70b9\u7684\u6240\u6709\u8fb9\n \"\"\"\n try:\n chunk1_id = \"1\"\n chunk2_id = \"2\"\n chunk3_id = \"3\"\n # 1. \u63d2\u5165\u6d4b\u8bd5\u6570\u636e\n # \u63d2\u5165\u8282\u70b91: \u4eba\u5de5\u667a\u80fd\n node1_id = \"\u4eba\u5de5\u667a\u80fd\"\n node1_data = {\n \"entity_id\": node1_id,\n \"description\": \"\u4eba\u5de5\u667a\u80fd\u662f\u8ba1\u7b97\u673a\u79d1\u5b66\u7684\u4e00\u4e2a\u5206\u652f\uff0c\u5b83\u4f01\u56fe\u4e86\u89e3\u667a\u80fd\u7684\u5b9e\u8d28\uff0c\u5e76\u751f\u4ea7\u51fa\u4e00\u79cd\u65b0\u7684\u80fd\u4ee5\u4eba\u7c7b\u667a\u80fd\u76f8\u4f3c\u7684\u65b9\u5f0f\u505a\u51fa\u53cd\u5e94\u7684\u667a\u80fd\u673a\u5668\u3002\",\n \"keywords\": \"AI,\u673a\u5668\u5b66\u4e60,\u6df1\u5ea6\u5b66\u4e60\",\n \"entity_type\": \"\u6280\u672f\u9886\u57df\",\n \"source_id\": GRAPH_FIELD_SEP.join([chunk1_id, chunk2_id]),\n }\n print(f\"\u63d2\u5165\u8282\u70b91: {node1_id}\")\n await storage.upsert_node(node1_id, node1_data)\n\n # \u63d2\u5165\u8282\u70b92: \u673a\u5668\u5b66\u4e60\n node2_id = \"\u673a\u5668\u5b66\u4e60\"\n node2_data = {\n \"entity_id\": node2_id,\n \"description\": \"\u673a\u5668\u5b66\u4e60\u662f\u4eba\u5de5\u667a\u80fd\u7684\u4e00\u4e2a\u5206\u652f\uff0c\u5b83\u4f7f\u7528\u7edf\u8ba1\u5b66\u65b9\u6cd5\u8ba9\u8ba1\u7b97\u673a\u7cfb\u7edf\u5728\u4e0d\u88ab\u660e\u786e\u7f16\u7a0b\u7684\u60c5\u51b5\u4e0b\u4e5f\u80fd\u591f\u5b66\u4e60\u3002\",\n \"keywords\": \"\u76d1\u7763\u5b66\u4e60,\u65e0\u76d1\u7763\u5b66\u4e60,\u5f3a\u5316\u5b66\u4e60\",\n \"entity_type\": \"\u6280\u672f\u9886\u57df\",\n \"source_id\": GRAPH_FIELD_SEP.join([chunk2_id, chunk3_id]),\n }\n print(f\"\u63d2\u5165\u8282\u70b92: {node2_id}\")\n await storage.upsert_node(node2_id, node2_data)\n\n # \u63d2\u5165\u8282\u70b93: \u6df1\u5ea6\u5b66\u4e60\n node3_id = \"\u6df1\u5ea6\u5b66\u4e60\"\n node3_data = {\n \"entity_id\": node3_id,\n \"description\": \"\u6df1\u5ea6\u5b66\u4e60\u662f\u673a\u5668\u5b66\u4e60\u7684\u4e00\u4e2a\u5206\u652f\uff0c\u5b83\u4f7f\u7528\u591a\u5c42\u795e\u7ecf\u7f51\u7edc\u6765\u6a21\u62df\u4eba\u8111\u7684\u5b66\u4e60\u8fc7\u7a0b\u3002\",\n \"keywords\": \"\u795e\u7ecf\u7f51\u7edc,CNN,RNN\",\n \"entity_type\": \"\u6280\u672f\u9886\u57df\",\n \"source_id\": GRAPH_FIELD_SEP.join([chunk3_id]),\n }\n print(f\"\u63d2\u5165\u8282\u70b93: {node3_id}\")\n await storage.upsert_node(node3_id, node3_data)\n\n # \u63d2\u5165\u8282\u70b94: \u81ea\u7136\u8bed\u8a00\u5904\u7406\n node4_id = \"\u81ea\u7136\u8bed\u8a00\u5904\u7406\"\n node4_data = {\n \"entity_id\": node4_id,\n \"description\": \"\u81ea\u7136\u8bed\u8a00\u5904\u7406\u662f\u4eba\u5de5\u667a\u80fd\u7684\u4e00\u4e2a\u5206\u652f\uff0c\u4e13\u6ce8\u4e8e\u4f7f\u8ba1\u7b97\u673a\u7406\u89e3\u548c\u5904\u7406\u4eba\u7c7b\u8bed\u8a00\u3002\",\n \"keywords\": \"NLP,\u6587\u672c\u5206\u6790,\u8bed\u8a00\u6a21\u578b\",\n \"entity_type\": \"\u6280\u672f\u9886\u57df\",\n }\n print(f\"\u63d2\u5165\u8282\u70b94: {node4_id}\")\n await storage.upsert_node(node4_id, node4_data)\n\n # \u63d2\u5165\u8282\u70b95: \u8ba1\u7b97\u673a\u89c6\u89c9\n node5_id = \"\u8ba1\u7b97\u673a\u89c6\u89c9\"\n node5_data = {\n \"entity_id\": node5_id,\n \"description\": \"\u8ba1\u7b97\u673a\u89c6\u89c9\u662f\u4eba\u5de5\u667a\u80fd\u7684\u4e00\u4e2a\u5206\u652f\uff0c\u4e13\u6ce8\u4e8e\u4f7f\u8ba1\u7b97\u673a\u80fd\u591f\u4ece\u56fe\u50cf\u6216\u89c6\u9891\u4e2d\u83b7\u53d6\u4fe1\u606f\u3002\",\n \"keywords\": \"CV,\u56fe\u50cf\u8bc6\u522b,\u76ee\u6807\u68c0\u6d4b\",\n \"entity_type\": \"\u6280\u672f\u9886\u57df\",\n }\n print(f\"\u63d2\u5165\u8282\u70b95: {node5_id}\")\n await storage.upsert_node(node5_id, node5_data)\n\n # \u63d2\u5165\u8fb91: \u4eba\u5de5\u667a\u80fd -> \u673a\u5668\u5b66\u4e60\n edge1_data = {\n \"relationship\": \"\u5305\u542b\",\n \"weight\": 1.0,\n \"description\": \"\u4eba\u5de5\u667a\u80fd\u9886\u57df\u5305\u542b\u673a\u5668\u5b66\u4e60\u8fd9\u4e2a\u5b50\u9886\u57df\",\n \"source_id\": GRAPH_FIELD_SEP.join([chunk1_id, chunk2_id]),\n }\n print(f\"\u63d2\u5165\u8fb91: {node1_id} -> {node2_id}\")\n await storage.upsert_edge(node1_id, node2_id, edge1_data)\n\n # \u63d2\u5165\u8fb92: \u673a\u5668\u5b66\u4e60 -> \u6df1\u5ea6\u5b66\u4e60\n edge2_data = {\n \"relationship\": \"\u5305\u542b\",\n \"weight\": 1.0,\n \"description\": \"\u673a\u5668\u5b66\u4e60\u9886\u57df\u5305\u542b\u6df1\u5ea6\u5b66\u4e60\u8fd9\u4e2a\u5b50\u9886\u57df\",\n \"source_id\": GRAPH_FIELD_SEP.join([chunk2_id, chunk3_id]),\n }\n print(f\"\u63d2\u5165\u8fb92: {node2_id} -> {node3_id}\")\n await storage.upsert_edge(node2_id, node3_id, edge2_data)\n\n # \u63d2\u5165\u8fb93: \u4eba\u5de5\u667a\u80fd -> \u81ea\u7136\u8bed\u8a00\u5904\u7406\n edge3_data = {\n \"relationship\": \"\u5305\u542b\",\n \"weight\": 1.0,\n \"description\": \"\u4eba\u5de5\u667a\u80fd\u9886\u57df\u5305\u542b\u81ea\u7136\u8bed\u8a00\u5904\u7406\u8fd9\u4e2a\u5b50\u9886\u57df\",\n \"source_id\": GRAPH_FIELD_SEP.join([chunk3_id]),\n }\n print(f\"\u63d2\u5165\u8fb93: {node1_id} -> {node4_id}\")\n await storage.upsert_edge(node1_id, node4_id, edge3_data)\n\n # \u63d2\u5165\u8fb94: \u4eba\u5de5\u667a\u80fd -> \u8ba1\u7b97\u673a\u89c6\u89c9\n edge4_data = {\n \"relationship\": \"\u5305\u542b\",\n \"weight\": 1.0,\n \"description\": \"\u4eba\u5de5\u667a\u80fd\u9886\u57df\u5305\u542b\u8ba1\u7b97\u673a\u89c6\u89c9\u8fd9\u4e2a\u5b50\u9886\u57df\",\n }\n print(f\"\u63d2\u5165\u8fb94: {node1_id} -> {node5_id}\")\n await storage.upsert_edge(node1_id, node5_id, edge4_data)\n\n # \u63d2\u5165\u8fb95: \u6df1\u5ea6\u5b66\u4e60 -> \u81ea\u7136\u8bed\u8a00\u5904\u7406\n edge5_data = {\n \"relationship\": \"\u5e94\u7528\u4e8e\",\n \"weight\": 0.8,\n \"description\": \"\u6df1\u5ea6\u5b66\u4e60\u6280\u672f\u5e94\u7528\u4e8e\u81ea\u7136\u8bed\u8a00\u5904\u7406\u9886\u57df\",\n }\n print(f\"\u63d2\u5165\u8fb95: {node3_id} -> {node4_id}\")\n await storage.upsert_edge(node3_id, node4_id, edge5_data)\n\n # \u63d2\u5165\u8fb96: \u6df1\u5ea6\u5b66\u4e60 -> \u8ba1\u7b97\u673a\u89c6\u89c9\n edge6_data = {\n \"relationship\": \"\u5e94\u7528\u4e8e\",\n \"weight\": 0.8,\n \"description\": \"\u6df1\u5ea6\u5b66\u4e60\u6280\u672f\u5e94\u7528\u4e8e\u8ba1\u7b97\u673a\u89c6\u89c9\u9886\u57df\",\n }\n print(f\"\u63d2\u5165\u8fb96: {node3_id} -> {node5_id}\")\n await storage.upsert_edge(node3_id, node5_id, edge6_data)\n\n # 2. \u6d4b\u8bd5 get_nodes_batch - \u6279\u91cf\u83b7\u53d6\u591a\u4e2a\u8282\u70b9\u7684\u5c5e\u6027\n print(\"== \u6d4b\u8bd5 get_nodes_batch\")\n node_ids = [node1_id, node2_id, node3_id]\n nodes_dict = await storage.get_nodes_batch(node_ids)\n print(f\"\u6279\u91cf\u83b7\u53d6\u8282\u70b9\u5c5e\u6027\u7ed3\u679c: {nodes_dict.keys()}\")\n assert len(nodes_dict) == 3, f\"\u5e94\u8fd4\u56de3\u4e2a\u8282\u70b9\uff0c\u5b9e\u9645\u8fd4\u56de {len(nodes_dict)} \u4e2a\"\n assert node1_id in nodes_dict, f\"{node1_id} \u5e94\u5728\u8fd4\u56de\u7ed3\u679c\u4e2d\"\n assert node2_id in nodes_dict, f\"{node2_id} \u5e94\u5728\u8fd4\u56de\u7ed3\u679c\u4e2d\"\n assert node3_id in nodes_dict, f\"{node3_id} \u5e94\u5728\u8fd4\u56de\u7ed3\u679c\u4e2d\"\n assert (\n nodes_dict[node1_id][\"description\"] == node1_data[\"description\"]\n ), f\"{node1_id} \u63cf\u8ff0\u4e0d\u5339\u914d\"\n assert (\n nodes_dict[node2_id][\"description\"] == node2_data[\"description\"]\n ), f\"{node2_id} \u63cf\u8ff0\u4e0d\u5339\u914d\"\n assert (\n nodes_dict[node3_id][\"description\"] == node3_data[\"description\"]\n ), f\"{node3_id} \u63cf\u8ff0\u4e0d\u5339\u914d\"\n\n # 3. \u6d4b\u8bd5 node_degrees_batch - \u6279\u91cf\u83b7\u53d6\u591a\u4e2a\u8282\u70b9\u7684\u5ea6\u6570\n print(\"== \u6d4b\u8bd5 node_degrees_batch\")\n node_degrees = await storage.node_degrees_batch(node_ids)\n print(f\"\u6279\u91cf\u83b7\u53d6\u8282\u70b9\u5ea6\u6570\u7ed3\u679c: {node_degrees}\")\n assert (\n len(node_degrees) == 3\n ), f\"\u5e94\u8fd4\u56de3\u4e2a\u8282\u70b9\u7684\u5ea6\u6570\uff0c\u5b9e\u9645\u8fd4\u56de {len(node_degrees)} \u4e2a\"\n assert node1_id in node_degrees, f\"{node1_id} \u5e94\u5728\u8fd4\u56de\u7ed3\u679c\u4e2d\"\n assert node2_id in node_degrees, f\"{node2_id} \u5e94\u5728\u8fd4\u56de\u7ed3\u679c\u4e2d\"\n assert node3_id in node_degrees, f\"{node3_id} \u5e94\u5728\u8fd4\u56de\u7ed3\u679c\u4e2d\"\n assert (\n node_degrees[node1_id] == 3\n ), f\"{node1_id} \u5ea6\u6570\u5e94\u4e3a3\uff0c\u5b9e\u9645\u4e3a {node_degrees[node1_id]}\"\n assert (\n node_degrees[node2_id] == 2\n ), f\"{node2_id} \u5ea6\u6570\u5e94\u4e3a2\uff0c\u5b9e\u9645\u4e3a {node_degrees[node2_id]}\"\n assert (\n node_degrees[node3_id] == 3\n ), f\"{node3_id} \u5ea6\u6570\u5e94\u4e3a3\uff0c\u5b9e\u9645\u4e3a {node_degrees[node3_id]}\"\n\n # 4. \u6d4b\u8bd5 edge_degrees_batch - \u6279\u91cf\u83b7\u53d6\u591a\u4e2a\u8fb9\u7684\u5ea6\u6570\n print(\"== \u6d4b\u8bd5 edge_degrees_batch\")\n edges = [(node1_id, node2_id), (node2_id, node3_id), (node3_id, node4_id)]\n edge_degrees = await storage.edge_degrees_batch(edges)\n print(f\"\u6279\u91cf\u83b7\u53d6\u8fb9\u5ea6\u6570\u7ed3\u679c: {edge_degrees}\")\n assert (\n len(edge_degrees) == 3\n ), f\"\u5e94\u8fd4\u56de3\u6761\u8fb9\u7684\u5ea6\u6570\uff0c\u5b9e\u9645\u8fd4\u56de {len(edge_degrees)} \u6761\"\n assert (\n node1_id,\n node2_id,\n ) in edge_degrees, f\"\u8fb9 {node1_id} -> {node2_id} \u5e94\u5728\u8fd4\u56de\u7ed3\u679c\u4e2d\"\n assert (\n node2_id,\n node3_id,\n ) in edge_degrees, f\"\u8fb9 {node2_id} -> {node3_id} \u5e94\u5728\u8fd4\u56de\u7ed3\u679c\u4e2d\"\n assert (\n node3_id,\n node4_id,\n ) in edge_degrees, f\"\u8fb9 {node3_id} -> {node4_id} \u5e94\u5728\u8fd4\u56de\u7ed3\u679c\u4e2d\"\n # \u9a8c\u8bc1\u8fb9\u7684\u5ea6\u6570\u662f\u5426\u6b63\u786e\uff08\u6e90\u8282\u70b9\u5ea6\u6570 + \u76ee\u6807\u8282\u70b9\u5ea6\u6570\uff09\n assert (\n edge_degrees[(node1_id, node2_id)] == 5\n ), f\"\u8fb9 {node1_id} -> {node2_id} \u5ea6\u6570\u5e94\u4e3a5\uff0c\u5b9e\u9645\u4e3a {edge_degrees[(node1_id, node2_id)]}\"\n assert (\n edge_degrees[(node2_id, node3_id)] == 5\n ), f\"\u8fb9 {node2_id} -> {node3_id} \u5ea6\u6570\u5e94\u4e3a5\uff0c\u5b9e\u9645\u4e3a {edge_degrees[(node2_id, node3_id)]}\"\n assert (\n edge_degrees[(node3_id, node4_id)] == 5\n ), f\"\u8fb9 {node3_id} -> {node4_id} \u5ea6\u6570\u5e94\u4e3a5\uff0c\u5b9e\u9645\u4e3a {edge_degrees[(node3_id, node4_id)]}\"\n\n # 5. \u6d4b\u8bd5 get_edges_batch - \u6279\u91cf\u83b7\u53d6\u591a\u4e2a\u8fb9\u7684\u5c5e\u6027\n print(\"== \u6d4b\u8bd5 get_edges_batch\")\n # \u5c06\u5143\u7ec4\u5217\u8868\u8f6c\u6362\u4e3aNeo4j\u98ce\u683c\u7684\u5b57\u5178\u5217\u8868\n edge_dicts = [{\"src\": src, \"tgt\": tgt} for src, tgt in edges]\n edges_dict = await storage.get_edges_batch(edge_dicts)\n print(f\"\u6279\u91cf\u83b7\u53d6\u8fb9\u5c5e\u6027\u7ed3\u679c: {edges_dict.keys()}\")\n assert len(edges_dict) == 3, f\"\u5e94\u8fd4\u56de3\u6761\u8fb9\u7684\u5c5e\u6027\uff0c\u5b9e\u9645\u8fd4\u56de {len(edges_dict)} \u6761\"\n assert (\n node1_id,\n node2_id,\n ) in edges_dict, f\"\u8fb9 {node1_id} -> {node2_id} \u5e94\u5728\u8fd4\u56de\u7ed3\u679c\u4e2d\"\n assert (\n node2_id,\n node3_id,\n ) in edges_dict, f\"\u8fb9 {node2_id} -> {node3_id} \u5e94\u5728\u8fd4\u56de\u7ed3\u679c\u4e2d\"\n assert (\n node3_id,\n node4_id,\n ) in edges_dict, f\"\u8fb9 {node3_id} -> {node4_id} \u5e94\u5728\u8fd4\u56de\u7ed3\u679c\u4e2d\"\n assert (\n edges_dict[(node1_id, node2_id)][\"relationship\"]\n == edge1_data[\"relationship\"]\n ), f\"\u8fb9 {node1_id} -> {node2_id} \u5173\u7cfb\u4e0d\u5339\u914d\"\n assert (\n edges_dict[(node2_id, node3_id)][\"relationship\"]\n == edge2_data[\"relationship\"]\n ), f\"\u8fb9 {node2_id} -> {node3_id} \u5173\u7cfb\u4e0d\u5339\u914d\"\n assert (\n edges_dict[(node3_id, node4_id)][\"relationship\"]\n == edge5_data[\"relationship\"]\n ), f\"\u8fb9 {node3_id} -> {node4_id} \u5173\u7cfb\u4e0d\u5339\u914d\"\n\n # 5.1 \u6d4b\u8bd5\u53cd\u5411\u8fb9\u7684\u6279\u91cf\u83b7\u53d6 - \u9a8c\u8bc1\u65e0\u5411\u56fe\u7279\u6027\n print(\"== \u6d4b\u8bd5\u53cd\u5411\u8fb9\u7684\u6279\u91cf\u83b7\u53d6\")\n # \u521b\u5efa\u53cd\u5411\u8fb9\u7684\u5b57\u5178\u5217\u8868\n reverse_edge_dicts = [{\"src\": tgt, \"tgt\": src} for src, tgt in edges]\n reverse_edges_dict = await storage.get_edges_batch(reverse_edge_dicts)\n print(f\"\u6279\u91cf\u83b7\u53d6\u53cd\u5411\u8fb9\u5c5e\u6027\u7ed3\u679c: {reverse_edges_dict.keys()}\")\n assert (\n len(reverse_edges_dict) == 3\n ), f\"\u5e94\u8fd4\u56de3\u6761\u53cd\u5411\u8fb9\u7684\u5c5e\u6027\uff0c\u5b9e\u9645\u8fd4\u56de {len(reverse_edges_dict)} \u6761\"\n\n # \u9a8c\u8bc1\u6b63\u5411\u548c\u53cd\u5411\u8fb9\u7684\u5c5e\u6027\u662f\u5426\u4e00\u81f4\n for (src, tgt), props in edges_dict.items():\n assert (\n tgt,\n src,\n ) in reverse_edges_dict, f\"\u53cd\u5411\u8fb9 {tgt} -> {src} \u5e94\u5728\u8fd4\u56de\u7ed3\u679c\u4e2d\"\n assert (\n props == reverse_edges_dict[(tgt, src)]\n ), f\"\u8fb9 {src} -> {tgt} \u548c\u53cd\u5411\u8fb9 {tgt} -> {src} \u7684\u5c5e\u6027\u4e0d\u4e00\u81f4\"\n\n print(\"\u65e0\u5411\u56fe\u7279\u6027\u9a8c\u8bc1\u6210\u529f\uff1a\u6279\u91cf\u83b7\u53d6\u7684\u6b63\u5411\u548c\u53cd\u5411\u8fb9\u5c5e\u6027\u4e00\u81f4\")\n\n # 6. \u6d4b\u8bd5 get_nodes_edges_batch - \u6279\u91cf\u83b7\u53d6\u591a\u4e2a\u8282\u70b9\u7684\u6240\u6709\u8fb9\n print(\"== \u6d4b\u8bd5 get_nodes_edges_batch\")\n nodes_edges = await storage.get_nodes_edges_batch([node1_id, node3_id])\n print(f\"\u6279\u91cf\u83b7\u53d6\u8282\u70b9\u8fb9\u7ed3\u679c: {nodes_edges.keys()}\")\n assert (\n len(nodes_edges) == 2\n ), f\"\u5e94\u8fd4\u56de2\u4e2a\u8282\u70b9\u7684\u8fb9\uff0c\u5b9e\u9645\u8fd4\u56de {len(nodes_edges)} \u4e2a\"\n assert node1_id in nodes_edges, f\"{node1_id} \u5e94\u5728\u8fd4\u56de\u7ed3\u679c\u4e2d\"\n assert node3_id in nodes_edges, f\"{node3_id} \u5e94\u5728\u8fd4\u56de\u7ed3\u679c\u4e2d\"\n assert (\n len(nodes_edges[node1_id]) == 3\n ), f\"{node1_id} \u5e94\u67093\u6761\u8fb9\uff0c\u5b9e\u9645\u6709 {len(nodes_edges[node1_id])} \u6761\"\n assert (\n len(nodes_edges[node3_id]) == 3\n ), f\"{node3_id} \u5e94\u67093\u6761\u8fb9\uff0c\u5b9e\u9645\u6709 {len(nodes_edges[node3_id])} \u6761\"\n\n # 6.1 \u9a8c\u8bc1\u6279\u91cf\u83b7\u53d6\u8282\u70b9\u8fb9\u7684\u65e0\u5411\u56fe\u7279\u6027\n print(\"== \u9a8c\u8bc1\u6279\u91cf\u83b7\u53d6\u8282\u70b9\u8fb9\u7684\u65e0\u5411\u56fe\u7279\u6027\")\n\n # \u68c0\u67e5\u8282\u70b91\u7684\u8fb9\u662f\u5426\u5305\u542b\u6240\u6709\u76f8\u5173\u7684\u8fb9\uff08\u65e0\u8bba\u65b9\u5411\uff09\n node1_outgoing_edges = [\n (src, tgt) for src, tgt in nodes_edges[node1_id] if src == node1_id\n ]\n node1_incoming_edges = [\n (src, tgt) for src, tgt in nodes_edges[node1_id] if tgt == node1_id\n ]\n print(f\"\u8282\u70b9 {node1_id} \u7684\u51fa\u8fb9: {node1_outgoing_edges}\")\n print(f\"\u8282\u70b9 {node1_id} \u7684\u5165\u8fb9: {node1_incoming_edges}\")\n\n # \u68c0\u67e5\u662f\u5426\u5305\u542b\u5230\u673a\u5668\u5b66\u4e60\u3001\u81ea\u7136\u8bed\u8a00\u5904\u7406\u548c\u8ba1\u7b97\u673a\u89c6\u89c9\u7684\u8fb9\n has_edge_to_node2 = any(tgt == node2_id for _, tgt in node1_outgoing_edges)\n has_edge_to_node4 = any(tgt == node4_id for _, tgt in node1_outgoing_edges)\n has_edge_to_node5 = any(tgt == node5_id for _, tgt in node1_outgoing_edges)\n\n assert has_edge_to_node2, f\"\u8282\u70b9 {node1_id} \u7684\u8fb9\u5217\u8868\u4e2d\u5e94\u5305\u542b\u5230 {node2_id} \u7684\u8fb9\"\n assert has_edge_to_node4, f\"\u8282\u70b9 {node1_id} \u7684\u8fb9\u5217\u8868\u4e2d\u5e94\u5305\u542b\u5230 {node4_id} \u7684\u8fb9\"\n assert has_edge_to_node5, f\"\u8282\u70b9 {node1_id} \u7684\u8fb9\u5217\u8868\u4e2d\u5e94\u5305\u542b\u5230 {node5_id} \u7684\u8fb9\"\n\n # \u68c0\u67e5\u8282\u70b93\u7684\u8fb9\u662f\u5426\u5305\u542b\u6240\u6709\u76f8\u5173\u7684\u8fb9\uff08\u65e0\u8bba\u65b9\u5411\uff09\n node3_outgoing_edges = [\n (src, tgt) for src, tgt in nodes_edges[node3_id] if src == node3_id\n ]\n node3_incoming_edges = [\n (src, tgt) for src, tgt in nodes_edges[node3_id] if tgt == node3_id\n ]\n print(f\"\u8282\u70b9 {node3_id} \u7684\u51fa\u8fb9: {node3_outgoing_edges}\")\n print(f\"\u8282\u70b9 {node3_id} \u7684\u5165\u8fb9: {node3_incoming_edges}\")\n\n # \u68c0\u67e5\u662f\u5426\u5305\u542b\u4e0e\u673a\u5668\u5b66\u4e60\u3001\u81ea\u7136\u8bed\u8a00\u5904\u7406\u548c\u8ba1\u7b97\u673a\u89c6\u89c9\u7684\u8fde\u63a5\uff08\u5ffd\u7565\u65b9\u5411\uff09\n has_connection_with_node2 = any(\n (src == node2_id and tgt == node3_id)\n or (src == node3_id and tgt == node2_id)\n for src, tgt in nodes_edges[node3_id]\n )\n has_connection_with_node4 = any(\n (src == node3_id and tgt == node4_id)\n or (src == node4_id and tgt == node3_id)\n for src, tgt in nodes_edges[node3_id]\n )\n has_connection_with_node5 = any(\n (src == node3_id and tgt == node5_id)\n or (src == node5_id and tgt == node3_id)\n for src, tgt in nodes_edges[node3_id]\n )\n\n assert (\n has_connection_with_node2\n ), f\"\u8282\u70b9 {node3_id} \u7684\u8fb9\u5217\u8868\u4e2d\u5e94\u5305\u542b\u4e0e {node2_id} \u7684\u8fde\u63a5\"\n assert (\n has_connection_with_node4\n ), f\"\u8282\u70b9 {node3_id} \u7684\u8fb9\u5217\u8868\u4e2d\u5e94\u5305\u542b\u4e0e {node4_id} \u7684\u8fde\u63a5\"\n assert (\n has_connection_with_node5\n ), f\"\u8282\u70b9 {node3_id} \u7684\u8fb9\u5217\u8868\u4e2d\u5e94\u5305\u542b\u4e0e {node5_id} \u7684\u8fde\u63a5\"\n\n print(\"\u65e0\u5411\u56fe\u7279\u6027\u9a8c\u8bc1\u6210\u529f\uff1a\u6279\u91cf\u83b7\u53d6\u7684\u8282\u70b9\u8fb9\u5305\u542b\u6240\u6709\u76f8\u5173\u7684\u8fb9\uff08\u65e0\u8bba\u65b9\u5411\uff09\")\n\n # 7. \u6d4b\u8bd5 get_nodes_by_chunk_ids - \u6279\u91cf\u6839\u636e chunk_ids \u83b7\u53d6\u591a\u4e2a\u8282\u70b9\n print(\"== \u6d4b\u8bd5 get_nodes_by_chunk_ids\")\n\n print(\"== \u6d4b\u8bd5\u5355\u4e2a chunk_id\uff0c\u5339\u914d\u591a\u4e2a\u8282\u70b9\")\n nodes = await storage.get_nodes_by_chunk_ids([chunk2_id])\n assert len(nodes) == 2, f\"{chunk1_id} \u5e94\u67092\u4e2a\u8282\u70b9\uff0c\u5b9e\u9645\u6709 {len(nodes)} \u4e2a\"\n\n has_node1 = any(node[\"entity_id\"] == node1_id for node in nodes)\n has_node2 = any(node[\"entity_id\"] == node2_id for node in nodes)\n\n assert has_node1, f\"\u8282\u70b9 {node1_id} \u5e94\u5728\u8fd4\u56de\u7ed3\u679c\u4e2d\"\n assert has_node2, f\"\u8282\u70b9 {node2_id} \u5e94\u5728\u8fd4\u56de\u7ed3\u679c\u4e2d\"\n\n print(\"== \u6d4b\u8bd5\u591a\u4e2a chunk_id\uff0c\u90e8\u5206\u5339\u914d\u591a\u4e2a\u8282\u70b9\")\n nodes = await storage.get_nodes_by_chunk_ids([chunk2_id, chunk3_id])\n assert (\n len(nodes) == 3\n ), f\"{chunk2_id}, {chunk3_id} \u5e94\u67093\u4e2a\u8282\u70b9\uff0c\u5b9e\u9645\u6709 {len(nodes)} \u4e2a\"\n\n has_node1 = any(node[\"entity_id\"] == node1_id for node in nodes)\n has_node2 = any(node[\"entity_id\"] == node2_id for node in nodes)\n has_node3 = any(node[\"entity_id\"] == node3_id for node in nodes)\n\n assert has_node1, f\"\u8282\u70b9 {node1_id} \u5e94\u5728\u8fd4\u56de\u7ed3\u679c\u4e2d\"\n assert has_node2, f\"\u8282\u70b9 {node2_id} \u5e94\u5728\u8fd4\u56de\u7ed3\u679c\u4e2d\"\n assert has_node3, f\"\u8282\u70b9 {node3_id} \u5e94\u5728\u8fd4\u56de\u7ed3\u679c\u4e2d\"\n\n # 8. \u6d4b\u8bd5 get_edges_by_chunk_ids - \u6279\u91cf\u6839\u636e chunk_ids \u83b7\u53d6\u591a\u6761\u8fb9\n print(\"== \u6d4b\u8bd5 get_edges_by_chunk_ids\")\n\n print(\"== \u6d4b\u8bd5\u5355\u4e2a chunk_id\uff0c\u5339\u914d\u591a\u6761\u8fb9\")\n edges = await storage.get_edges_by_chunk_ids([chunk2_id])\n assert len(edges) == 2, f\"{chunk2_id} \u5e94\u67092\u6761\u8fb9\uff0c\u5b9e\u9645\u6709 {len(edges)} \u6761\"\n\n has_edge_node1_node2 = any(\n edge[\"source\"] == node1_id and edge[\"target\"] == node2_id for edge in edges\n )\n has_edge_node2_node3 = any(\n edge[\"source\"] == node2_id and edge[\"target\"] == node3_id for edge in edges\n )\n\n assert has_edge_node1_node2, f\"{chunk2_id} \u5e94\u5305\u542b {node1_id} \u5230 {node2_id} \u7684\u8fb9\"\n assert has_edge_node2_node3, f\"{chunk2_id} \u5e94\u5305\u542b {node2_id} \u5230 {node3_id} \u7684\u8fb9\"\n\n print(\"== \u6d4b\u8bd5\u591a\u4e2a chunk_id\uff0c\u90e8\u5206\u5339\u914d\u591a\u6761\u8fb9\")\n edges = await storage.get_edges_by_chunk_ids([chunk2_id, chunk3_id])\n assert (\n len(edges) == 3\n ), f\"{chunk2_id}, {chunk3_id} \u5e94\u67093\u6761\u8fb9\uff0c\u5b9e\u9645\u6709 {len(edges)} \u6761\"\n\n has_edge_node1_node2 = any(\n edge[\"source\"] == node1_id and edge[\"target\"] == node2_id for edge in edges\n )\n has_edge_node2_node3 = any(\n edge[\"source\"] == node2_id and edge[\"target\"] == node3_id for edge in edges\n )\n has_edge_node1_node4 = any(\n edge[\"source\"] == node1_id and edge[\"target\"] == node4_id for edge in edges\n )\n\n assert (\n has_edge_node1_node2\n ), f\"{chunk2_id}, {chunk3_id} \u5e94\u5305\u542b {node1_id} \u5230 {node2_id} \u7684\u8fb9\"\n assert (\n has_edge_node2_node3\n ), f\"{chunk2_id}, {chunk3_id} \u5e94\u5305\u542b {node2_id} \u5230 {node3_id} \u7684\u8fb9\"\n assert (\n has_edge_node1_node4\n ), f\"{chunk2_id}, {chunk3_id} \u5e94\u5305\u542b {node1_id} \u5230 {node4_id} \u7684\u8fb9\"\n\n print(\"\\n\u6279\u91cf\u64cd\u4f5c\u6d4b\u8bd5\u5b8c\u6210\")\n return True\n\n except Exception as e:\n ASCIIColors.red(f\"\u6d4b\u8bd5\u8fc7\u7a0b\u4e2d\u53d1\u751f\u9519\u8bef: {str(e)}\")\n return False", "creation_date": "2025-04-03T19:40:46Z", "repo": "HKUDS/LightRAG", "file_path": "tests/test_graph_storage.py", "stars": 18349, "label": 0} +{"function": "async def test_graph_special_characters(storage):\n \"\"\"\n \u6d4b\u8bd5\u56fe\u6570\u636e\u5e93\u5bf9\u7279\u6b8a\u5b57\u7b26\u7684\u5904\u7406:\n 1. \u6d4b\u8bd5\u8282\u70b9\u540d\u79f0\u548c\u63cf\u8ff0\u4e2d\u5305\u542b\u5355\u5f15\u53f7\u3001\u53cc\u5f15\u53f7\u548c\u53cd\u659c\u6760\n 2. \u6d4b\u8bd5\u8fb9\u7684\u63cf\u8ff0\u4e2d\u5305\u542b\u5355\u5f15\u53f7\u3001\u53cc\u5f15\u53f7\u548c\u53cd\u659c\u6760\n 3. \u9a8c\u8bc1\u7279\u6b8a\u5b57\u7b26\u662f\u5426\u88ab\u6b63\u786e\u4fdd\u5b58\u548c\u68c0\u7d22\n \"\"\"\n try:\n # 1. \u6d4b\u8bd5\u8282\u70b9\u540d\u79f0\u4e2d\u7684\u7279\u6b8a\u5b57\u7b26\n node1_id = \"\u5305\u542b'\u5355\u5f15\u53f7'\u7684\u8282\u70b9\"\n node1_data = {\n \"entity_id\": node1_id,\n \"description\": \"\u8fd9\u4e2a\u63cf\u8ff0\u5305\u542b'\u5355\u5f15\u53f7'\u3001\\\"\u53cc\u5f15\u53f7\\\"\u548c\\\\\u53cd\u659c\u6760\",\n \"keywords\": \"\u7279\u6b8a\u5b57\u7b26,\u5f15\u53f7,\u8f6c\u4e49\",\n \"entity_type\": \"\u6d4b\u8bd5\u8282\u70b9\",\n }\n print(f\"\u63d2\u5165\u5305\u542b\u7279\u6b8a\u5b57\u7b26\u7684\u8282\u70b91: {node1_id}\")\n await storage.upsert_node(node1_id, node1_data)\n\n # 2. \u6d4b\u8bd5\u8282\u70b9\u540d\u79f0\u4e2d\u7684\u53cc\u5f15\u53f7\n node2_id = '\u5305\u542b\"\u53cc\u5f15\u53f7\"\u7684\u8282\u70b9'\n node2_data = {\n \"entity_id\": node2_id,\n \"description\": \"\u8fd9\u4e2a\u63cf\u8ff0\u540c\u65f6\u5305\u542b'\u5355\u5f15\u53f7'\u548c\\\"\u53cc\u5f15\u53f7\\\"\u4ee5\u53ca\\\\\u53cd\u659c\u6760\\\\\u8def\u5f84\",\n \"keywords\": \"\u7279\u6b8a\u5b57\u7b26,\u5f15\u53f7,JSON\",\n \"entity_type\": \"\u6d4b\u8bd5\u8282\u70b9\",\n }\n print(f\"\u63d2\u5165\u5305\u542b\u7279\u6b8a\u5b57\u7b26\u7684\u8282\u70b92: {node2_id}\")\n await storage.upsert_node(node2_id, node2_data)\n\n # 3. \u6d4b\u8bd5\u8282\u70b9\u540d\u79f0\u4e2d\u7684\u53cd\u659c\u6760\n node3_id = \"\u5305\u542b\\\\\u53cd\u659c\u6760\\\\\u7684\u8282\u70b9\"\n node3_data = {\n \"entity_id\": node3_id,\n \"description\": \"\u8fd9\u4e2a\u63cf\u8ff0\u5305\u542bWindows\u8def\u5f84C:\\\\Program Files\\\\\u548c\u8f6c\u4e49\u5b57\u7b26\\\\n\\\\t\",\n \"keywords\": \"\u53cd\u659c\u6760,\u8def\u5f84,\u8f6c\u4e49\",\n \"entity_type\": \"\u6d4b\u8bd5\u8282\u70b9\",\n }\n print(f\"\u63d2\u5165\u5305\u542b\u7279\u6b8a\u5b57\u7b26\u7684\u8282\u70b93: {node3_id}\")\n await storage.upsert_node(node3_id, node3_data)\n\n # 4. \u6d4b\u8bd5\u8fb9\u63cf\u8ff0\u4e2d\u7684\u7279\u6b8a\u5b57\u7b26\n edge1_data = {\n \"relationship\": \"\u7279\u6b8a'\u5173\u7cfb'\",\n \"weight\": 1.0,\n \"description\": \"\u8fd9\u4e2a\u8fb9\u63cf\u8ff0\u5305\u542b'\u5355\u5f15\u53f7'\u3001\\\"\u53cc\u5f15\u53f7\\\"\u548c\\\\\u53cd\u659c\u6760\",\n }\n print(f\"\u63d2\u5165\u5305\u542b\u7279\u6b8a\u5b57\u7b26\u7684\u8fb9: {node1_id} -> {node2_id}\")\n await storage.upsert_edge(node1_id, node2_id, edge1_data)\n\n # 5. \u6d4b\u8bd5\u8fb9\u63cf\u8ff0\u4e2d\u7684\u66f4\u590d\u6742\u7279\u6b8a\u5b57\u7b26\u7ec4\u5408\n edge2_data = {\n \"relationship\": '\u590d\u6742\"\u5173\u7cfb\"\\\\\u7c7b\u578b',\n \"weight\": 0.8,\n \"description\": \"\u5305\u542bSQL\u6ce8\u5165\u5c1d\u8bd5: SELECT * FROM users WHERE name='admin'--\",\n }\n print(f\"\u63d2\u5165\u5305\u542b\u590d\u6742\u7279\u6b8a\u5b57\u7b26\u7684\u8fb9: {node2_id} -> {node3_id}\")\n await storage.upsert_edge(node2_id, node3_id, edge2_data)\n\n # 6. \u9a8c\u8bc1\u8282\u70b9\u7279\u6b8a\u5b57\u7b26\u662f\u5426\u6b63\u786e\u4fdd\u5b58\n print(\"\\n== \u9a8c\u8bc1\u8282\u70b9\u7279\u6b8a\u5b57\u7b26\")\n for node_id, original_data in [\n (node1_id, node1_data),\n (node2_id, node2_data),\n (node3_id, node3_data),\n ]:\n node_props = await storage.get_node(node_id)\n if node_props:\n print(f\"\u6210\u529f\u8bfb\u53d6\u8282\u70b9: {node_id}\")\n print(f\"\u8282\u70b9\u63cf\u8ff0: {node_props.get('description', '\u65e0\u63cf\u8ff0')}\")\n\n # \u9a8c\u8bc1\u8282\u70b9ID\u662f\u5426\u6b63\u786e\u4fdd\u5b58\n assert (\n node_props.get(\"entity_id\") == node_id\n ), f\"\u8282\u70b9ID\u4e0d\u5339\u914d: \u671f\u671b {node_id}, \u5b9e\u9645 {node_props.get('entity_id')}\"\n\n # \u9a8c\u8bc1\u63cf\u8ff0\u662f\u5426\u6b63\u786e\u4fdd\u5b58\n assert (\n node_props.get(\"description\") == original_data[\"description\"]\n ), f\"\u8282\u70b9\u63cf\u8ff0\u4e0d\u5339\u914d: \u671f\u671b {original_data['description']}, \u5b9e\u9645 {node_props.get('description')}\"\n\n print(f\"\u8282\u70b9 {node_id} \u7279\u6b8a\u5b57\u7b26\u9a8c\u8bc1\u6210\u529f\")\n else:\n print(f\"\u8bfb\u53d6\u8282\u70b9\u5c5e\u6027\u5931\u8d25: {node_id}\")\n assert False, f\"\u672a\u80fd\u8bfb\u53d6\u8282\u70b9\u5c5e\u6027: {node_id}\"\n\n # 7. \u9a8c\u8bc1\u8fb9\u7279\u6b8a\u5b57\u7b26\u662f\u5426\u6b63\u786e\u4fdd\u5b58\n print(\"\\n== \u9a8c\u8bc1\u8fb9\u7279\u6b8a\u5b57\u7b26\")\n edge1_props = await storage.get_edge(node1_id, node2_id)\n if edge1_props:\n print(f\"\u6210\u529f\u8bfb\u53d6\u8fb9: {node1_id} -> {node2_id}\")\n print(f\"\u8fb9\u5173\u7cfb: {edge1_props.get('relationship', '\u65e0\u5173\u7cfb')}\")\n print(f\"\u8fb9\u63cf\u8ff0: {edge1_props.get('description', '\u65e0\u63cf\u8ff0')}\")\n\n # \u9a8c\u8bc1\u8fb9\u5173\u7cfb\u662f\u5426\u6b63\u786e\u4fdd\u5b58\n assert (\n edge1_props.get(\"relationship\") == edge1_data[\"relationship\"]\n ), f\"\u8fb9\u5173\u7cfb\u4e0d\u5339\u914d: \u671f\u671b {edge1_data['relationship']}, \u5b9e\u9645 {edge1_props.get('relationship')}\"\n\n # \u9a8c\u8bc1\u8fb9\u63cf\u8ff0\u662f\u5426\u6b63\u786e\u4fdd\u5b58\n assert (\n edge1_props.get(\"description\") == edge1_data[\"description\"]\n ), f\"\u8fb9\u63cf\u8ff0\u4e0d\u5339\u914d: \u671f\u671b {edge1_data['description']}, \u5b9e\u9645 {edge1_props.get('description')}\"\n\n print(f\"\u8fb9 {node1_id} -> {node2_id} \u7279\u6b8a\u5b57\u7b26\u9a8c\u8bc1\u6210\u529f\")\n else:\n print(f\"\u8bfb\u53d6\u8fb9\u5c5e\u6027\u5931\u8d25: {node1_id} -> {node2_id}\")\n assert False, f\"\u672a\u80fd\u8bfb\u53d6\u8fb9\u5c5e\u6027: {node1_id} -> {node2_id}\"\n\n edge2_props = await storage.get_edge(node2_id, node3_id)\n if edge2_props:\n print(f\"\u6210\u529f\u8bfb\u53d6\u8fb9: {node2_id} -> {node3_id}\")\n print(f\"\u8fb9\u5173\u7cfb: {edge2_props.get('relationship', '\u65e0\u5173\u7cfb')}\")\n print(f\"\u8fb9\u63cf\u8ff0: {edge2_props.get('description', '\u65e0\u63cf\u8ff0')}\")\n\n # \u9a8c\u8bc1\u8fb9\u5173\u7cfb\u662f\u5426\u6b63\u786e\u4fdd\u5b58\n assert (\n edge2_props.get(\"relationship\") == edge2_data[\"relationship\"]\n ), f\"\u8fb9\u5173\u7cfb\u4e0d\u5339\u914d: \u671f\u671b {edge2_data['relationship']}, \u5b9e\u9645 {edge2_props.get('relationship')}\"\n\n # \u9a8c\u8bc1\u8fb9\u63cf\u8ff0\u662f\u5426\u6b63\u786e\u4fdd\u5b58\n assert (\n edge2_props.get(\"description\") == edge2_data[\"description\"]\n ), f\"\u8fb9\u63cf\u8ff0\u4e0d\u5339\u914d: \u671f\u671b {edge2_data['description']}, \u5b9e\u9645 {edge2_props.get('description')}\"\n\n print(f\"\u8fb9 {node2_id} -> {node3_id} \u7279\u6b8a\u5b57\u7b26\u9a8c\u8bc1\u6210\u529f\")\n else:\n print(f\"\u8bfb\u53d6\u8fb9\u5c5e\u6027\u5931\u8d25: {node2_id} -> {node3_id}\")\n assert False, f\"\u672a\u80fd\u8bfb\u53d6\u8fb9\u5c5e\u6027: {node2_id} -> {node3_id}\"\n\n print(\"\\n\u7279\u6b8a\u5b57\u7b26\u6d4b\u8bd5\u5b8c\u6210\uff0c\u6570\u636e\u5df2\u4fdd\u7559\u5728\u6570\u636e\u5e93\u4e2d\")\n return True\n\n except Exception as e:\n ASCIIColors.red(f\"\u6d4b\u8bd5\u8fc7\u7a0b\u4e2d\u53d1\u751f\u9519\u8bef: {str(e)}\")\n return False", "creation_date": "2025-04-03T19:40:46Z", "repo": "HKUDS/LightRAG", "file_path": "tests/test_graph_storage.py", "stars": 18349, "label": 0} +{"function": "async def test_graph_undirected_property(storage):\n \"\"\"\n \u4e13\u95e8\u6d4b\u8bd5\u56fe\u5b58\u50a8\u7684\u65e0\u5411\u56fe\u7279\u6027:\n 1. \u9a8c\u8bc1\u63d2\u5165\u4e00\u4e2a\u65b9\u5411\u7684\u8fb9\u540e\uff0c\u53cd\u5411\u67e5\u8be2\u662f\u5426\u80fd\u83b7\u5f97\u76f8\u540c\u7684\u7ed3\u679c\n 2. \u9a8c\u8bc1\u8fb9\u7684\u5c5e\u6027\u5728\u6b63\u5411\u548c\u53cd\u5411\u67e5\u8be2\u4e2d\u662f\u5426\u4e00\u81f4\n 3. \u9a8c\u8bc1\u5220\u9664\u4e00\u4e2a\u65b9\u5411\u7684\u8fb9\u540e\uff0c\u53e6\u4e00\u4e2a\u65b9\u5411\u7684\u8fb9\u662f\u5426\u4e5f\u88ab\u5220\u9664\n 4. \u9a8c\u8bc1\u6279\u91cf\u64cd\u4f5c\u4e2d\u7684\u65e0\u5411\u56fe\u7279\u6027\n \"\"\"\n try:\n # 1. \u63d2\u5165\u6d4b\u8bd5\u6570\u636e\n # \u63d2\u5165\u8282\u70b91: \u8ba1\u7b97\u673a\u79d1\u5b66\n node1_id = \"\u8ba1\u7b97\u673a\u79d1\u5b66\"\n node1_data = {\n \"entity_id\": node1_id,\n \"description\": \"\u8ba1\u7b97\u673a\u79d1\u5b66\u662f\u7814\u7a76\u8ba1\u7b97\u673a\u53ca\u5176\u5e94\u7528\u7684\u79d1\u5b66\u3002\",\n \"keywords\": \"\u8ba1\u7b97\u673a,\u79d1\u5b66,\u6280\u672f\",\n \"entity_type\": \"\u5b66\u79d1\",\n }\n print(f\"\u63d2\u5165\u8282\u70b91: {node1_id}\")\n await storage.upsert_node(node1_id, node1_data)\n\n # \u63d2\u5165\u8282\u70b92: \u6570\u636e\u7ed3\u6784\n node2_id = \"\u6570\u636e\u7ed3\u6784\"\n node2_data = {\n \"entity_id\": node2_id,\n \"description\": \"\u6570\u636e\u7ed3\u6784\u662f\u8ba1\u7b97\u673a\u79d1\u5b66\u4e2d\u7684\u4e00\u4e2a\u57fa\u7840\u6982\u5ff5\uff0c\u7528\u4e8e\u7ec4\u7ec7\u548c\u5b58\u50a8\u6570\u636e\u3002\",\n \"keywords\": \"\u6570\u636e,\u7ed3\u6784,\u7ec4\u7ec7\",\n \"entity_type\": \"\u6982\u5ff5\",\n }\n print(f\"\u63d2\u5165\u8282\u70b92: {node2_id}\")\n await storage.upsert_node(node2_id, node2_data)\n\n # \u63d2\u5165\u8282\u70b93: \u7b97\u6cd5\n node3_id = \"\u7b97\u6cd5\"\n node3_data = {\n \"entity_id\": node3_id,\n \"description\": \"\u7b97\u6cd5\u662f\u89e3\u51b3\u95ee\u9898\u7684\u6b65\u9aa4\u548c\u65b9\u6cd5\u3002\",\n \"keywords\": \"\u7b97\u6cd5,\u6b65\u9aa4,\u65b9\u6cd5\",\n \"entity_type\": \"\u6982\u5ff5\",\n }\n print(f\"\u63d2\u5165\u8282\u70b93: {node3_id}\")\n await storage.upsert_node(node3_id, node3_data)\n\n # 2. \u6d4b\u8bd5\u63d2\u5165\u8fb9\u540e\u7684\u65e0\u5411\u56fe\u7279\u6027\n print(\"\\n== \u6d4b\u8bd5\u63d2\u5165\u8fb9\u540e\u7684\u65e0\u5411\u56fe\u7279\u6027\")\n\n # \u63d2\u5165\u8fb91: \u8ba1\u7b97\u673a\u79d1\u5b66 -> \u6570\u636e\u7ed3\u6784\n edge1_data = {\n \"relationship\": \"\u5305\u542b\",\n \"weight\": 1.0,\n \"description\": \"\u8ba1\u7b97\u673a\u79d1\u5b66\u5305\u542b\u6570\u636e\u7ed3\u6784\u8fd9\u4e2a\u6982\u5ff5\",\n }\n print(f\"\u63d2\u5165\u8fb91: {node1_id} -> {node2_id}\")\n await storage.upsert_edge(node1_id, node2_id, edge1_data)\n\n # \u9a8c\u8bc1\u6b63\u5411\u67e5\u8be2\n forward_edge = await storage.get_edge(node1_id, node2_id)\n print(f\"\u6b63\u5411\u8fb9\u5c5e\u6027: {forward_edge}\")\n assert forward_edge is not None, f\"\u672a\u80fd\u8bfb\u53d6\u6b63\u5411\u8fb9\u5c5e\u6027: {node1_id} -> {node2_id}\"\n\n # \u9a8c\u8bc1\u53cd\u5411\u67e5\u8be2\n reverse_edge = await storage.get_edge(node2_id, node1_id)\n print(f\"\u53cd\u5411\u8fb9\u5c5e\u6027: {reverse_edge}\")\n assert reverse_edge is not None, f\"\u672a\u80fd\u8bfb\u53d6\u53cd\u5411\u8fb9\u5c5e\u6027: {node2_id} -> {node1_id}\"\n\n # \u9a8c\u8bc1\u6b63\u5411\u548c\u53cd\u5411\u8fb9\u5c5e\u6027\u662f\u5426\u4e00\u81f4\n assert (\n forward_edge == reverse_edge\n ), \"\u6b63\u5411\u548c\u53cd\u5411\u8fb9\u5c5e\u6027\u4e0d\u4e00\u81f4\uff0c\u65e0\u5411\u56fe\u7279\u6027\u9a8c\u8bc1\u5931\u8d25\"\n print(\"\u65e0\u5411\u56fe\u7279\u6027\u9a8c\u8bc1\u6210\u529f\uff1a\u6b63\u5411\u548c\u53cd\u5411\u8fb9\u5c5e\u6027\u4e00\u81f4\")\n\n # 3. \u6d4b\u8bd5\u8fb9\u7684\u5ea6\u6570\u7684\u65e0\u5411\u56fe\u7279\u6027\n print(\"\\n== \u6d4b\u8bd5\u8fb9\u7684\u5ea6\u6570\u7684\u65e0\u5411\u56fe\u7279\u6027\")\n\n # \u63d2\u5165\u8fb92: \u8ba1\u7b97\u673a\u79d1\u5b66 -> \u7b97\u6cd5\n edge2_data = {\n \"relationship\": \"\u5305\u542b\",\n \"weight\": 1.0,\n \"description\": \"\u8ba1\u7b97\u673a\u79d1\u5b66\u5305\u542b\u7b97\u6cd5\u8fd9\u4e2a\u6982\u5ff5\",\n }\n print(f\"\u63d2\u5165\u8fb92: {node1_id} -> {node3_id}\")\n await storage.upsert_edge(node1_id, node3_id, edge2_data)\n\n # \u9a8c\u8bc1\u6b63\u5411\u548c\u53cd\u5411\u8fb9\u7684\u5ea6\u6570\n forward_degree = await storage.edge_degree(node1_id, node2_id)\n reverse_degree = await storage.edge_degree(node2_id, node1_id)\n print(f\"\u6b63\u5411\u8fb9 {node1_id} -> {node2_id} \u7684\u5ea6\u6570: {forward_degree}\")\n print(f\"\u53cd\u5411\u8fb9 {node2_id} -> {node1_id} \u7684\u5ea6\u6570: {reverse_degree}\")\n assert (\n forward_degree == reverse_degree\n ), \"\u6b63\u5411\u548c\u53cd\u5411\u8fb9\u7684\u5ea6\u6570\u4e0d\u4e00\u81f4\uff0c\u65e0\u5411\u56fe\u7279\u6027\u9a8c\u8bc1\u5931\u8d25\"\n print(\"\u65e0\u5411\u56fe\u7279\u6027\u9a8c\u8bc1\u6210\u529f\uff1a\u6b63\u5411\u548c\u53cd\u5411\u8fb9\u7684\u5ea6\u6570\u4e00\u81f4\")\n\n # 4. \u6d4b\u8bd5\u5220\u9664\u8fb9\u7684\u65e0\u5411\u56fe\u7279\u6027\n print(\"\\n== \u6d4b\u8bd5\u5220\u9664\u8fb9\u7684\u65e0\u5411\u56fe\u7279\u6027\")\n\n # \u5220\u9664\u6b63\u5411\u8fb9\n print(f\"\u5220\u9664\u8fb9: {node1_id} -> {node2_id}\")\n await storage.remove_edges([(node1_id, node2_id)])\n\n # \u9a8c\u8bc1\u6b63\u5411\u8fb9\u662f\u5426\u88ab\u5220\u9664\n forward_edge = await storage.get_edge(node1_id, node2_id)\n print(f\"\u5220\u9664\u540e\u67e5\u8be2\u6b63\u5411\u8fb9\u5c5e\u6027 {node1_id} -> {node2_id}: {forward_edge}\")\n assert forward_edge is None, f\"\u8fb9 {node1_id} -> {node2_id} \u5e94\u5df2\u88ab\u5220\u9664\"\n\n # \u9a8c\u8bc1\u53cd\u5411\u8fb9\u662f\u5426\u4e5f\u88ab\u5220\u9664\n reverse_edge = await storage.get_edge(node2_id, node1_id)\n print(f\"\u5220\u9664\u540e\u67e5\u8be2\u53cd\u5411\u8fb9\u5c5e\u6027 {node2_id} -> {node1_id}: {reverse_edge}\")\n assert (\n reverse_edge is None\n ), f\"\u53cd\u5411\u8fb9 {node2_id} -> {node1_id} \u4e5f\u5e94\u88ab\u5220\u9664\uff0c\u65e0\u5411\u56fe\u7279\u6027\u9a8c\u8bc1\u5931\u8d25\"\n print(\"\u65e0\u5411\u56fe\u7279\u6027\u9a8c\u8bc1\u6210\u529f\uff1a\u5220\u9664\u4e00\u4e2a\u65b9\u5411\u7684\u8fb9\u540e\uff0c\u53cd\u5411\u8fb9\u4e5f\u88ab\u5220\u9664\")\n\n # 5. \u6d4b\u8bd5\u6279\u91cf\u64cd\u4f5c\u4e2d\u7684\u65e0\u5411\u56fe\u7279\u6027\n print(\"\\n== \u6d4b\u8bd5\u6279\u91cf\u64cd\u4f5c\u4e2d\u7684\u65e0\u5411\u56fe\u7279\u6027\")\n\n # \u91cd\u65b0\u63d2\u5165\u8fb9\n await storage.upsert_edge(node1_id, node2_id, edge1_data)\n\n # \u6279\u91cf\u83b7\u53d6\u8fb9\u5c5e\u6027\n edge_dicts = [\n {\"src\": node1_id, \"tgt\": node2_id},\n {\"src\": node1_id, \"tgt\": node3_id},\n ]\n reverse_edge_dicts = [\n {\"src\": node2_id, \"tgt\": node1_id},\n {\"src\": node3_id, \"tgt\": node1_id},\n ]\n\n edges_dict = await storage.get_edges_batch(edge_dicts)\n reverse_edges_dict = await storage.get_edges_batch(reverse_edge_dicts)\n\n print(f\"\u6279\u91cf\u83b7\u53d6\u6b63\u5411\u8fb9\u5c5e\u6027\u7ed3\u679c: {edges_dict.keys()}\")\n print(f\"\u6279\u91cf\u83b7\u53d6\u53cd\u5411\u8fb9\u5c5e\u6027\u7ed3\u679c: {reverse_edges_dict.keys()}\")\n\n # \u9a8c\u8bc1\u6b63\u5411\u548c\u53cd\u5411\u8fb9\u7684\u5c5e\u6027\u662f\u5426\u4e00\u81f4\n for (src, tgt), props in edges_dict.items():\n assert (\n tgt,\n src,\n ) in reverse_edges_dict, f\"\u53cd\u5411\u8fb9 {tgt} -> {src} \u5e94\u5728\u8fd4\u56de\u7ed3\u679c\u4e2d\"\n assert (\n props == reverse_edges_dict[(tgt, src)]\n ), f\"\u8fb9 {src} -> {tgt} \u548c\u53cd\u5411\u8fb9 {tgt} -> {src} \u7684\u5c5e\u6027\u4e0d\u4e00\u81f4\"\n\n print(\"\u65e0\u5411\u56fe\u7279\u6027\u9a8c\u8bc1\u6210\u529f\uff1a\u6279\u91cf\u83b7\u53d6\u7684\u6b63\u5411\u548c\u53cd\u5411\u8fb9\u5c5e\u6027\u4e00\u81f4\")\n\n # 6. \u6d4b\u8bd5\u6279\u91cf\u83b7\u53d6\u8282\u70b9\u8fb9\u7684\u65e0\u5411\u56fe\u7279\u6027\n print(\"\\n== \u6d4b\u8bd5\u6279\u91cf\u83b7\u53d6\u8282\u70b9\u8fb9\u7684\u65e0\u5411\u56fe\u7279\u6027\")\n\n nodes_edges = await storage.get_nodes_edges_batch([node1_id, node2_id])\n print(f\"\u6279\u91cf\u83b7\u53d6\u8282\u70b9\u8fb9\u7ed3\u679c: {nodes_edges.keys()}\")\n\n # \u68c0\u67e5\u8282\u70b91\u7684\u8fb9\u662f\u5426\u5305\u542b\u6240\u6709\u76f8\u5173\u7684\u8fb9\uff08\u65e0\u8bba\u65b9\u5411\uff09\n node1_edges = nodes_edges[node1_id]\n node2_edges = nodes_edges[node2_id]\n\n # \u68c0\u67e5\u8282\u70b91\u662f\u5426\u6709\u5230\u8282\u70b92\u548c\u8282\u70b93\u7684\u8fb9\n has_edge_to_node2 = any(\n (src == node1_id and tgt == node2_id) for src, tgt in node1_edges\n )\n has_edge_to_node3 = any(\n (src == node1_id and tgt == node3_id) for src, tgt in node1_edges\n )\n\n assert has_edge_to_node2, f\"\u8282\u70b9 {node1_id} \u7684\u8fb9\u5217\u8868\u4e2d\u5e94\u5305\u542b\u5230 {node2_id} \u7684\u8fb9\"\n assert has_edge_to_node3, f\"\u8282\u70b9 {node1_id} \u7684\u8fb9\u5217\u8868\u4e2d\u5e94\u5305\u542b\u5230 {node3_id} \u7684\u8fb9\"\n\n # \u68c0\u67e5\u8282\u70b92\u662f\u5426\u6709\u5230\u8282\u70b91\u7684\u8fb9\n has_edge_to_node1 = any(\n (src == node2_id and tgt == node1_id)\n or (src == node1_id and tgt == node2_id)\n for src, tgt in node2_edges\n )\n assert (\n has_edge_to_node1\n ), f\"\u8282\u70b9 {node2_id} \u7684\u8fb9\u5217\u8868\u4e2d\u5e94\u5305\u542b\u4e0e {node1_id} \u7684\u8fde\u63a5\"\n\n print(\"\u65e0\u5411\u56fe\u7279\u6027\u9a8c\u8bc1\u6210\u529f\uff1a\u6279\u91cf\u83b7\u53d6\u7684\u8282\u70b9\u8fb9\u5305\u542b\u6240\u6709\u76f8\u5173\u7684\u8fb9\uff08\u65e0\u8bba\u65b9\u5411\uff09\")\n\n print(\"\\n\u65e0\u5411\u56fe\u7279\u6027\u6d4b\u8bd5\u5b8c\u6210\")\n return True\n\n except Exception as e:\n ASCIIColors.red(f\"\u6d4b\u8bd5\u8fc7\u7a0b\u4e2d\u53d1\u751f\u9519\u8bef: {str(e)}\")\n return False", "creation_date": "2025-04-03T19:40:46Z", "repo": "HKUDS/LightRAG", "file_path": "tests/test_graph_storage.py", "stars": 18349, "label": 0} +{"function": "async def main():\n \"\"\"\u4e3b\u51fd\u6570\"\"\"\n # \u663e\u793a\u7a0b\u5e8f\u6807\u9898\n ASCIIColors.cyan(\"\"\"\n \u2554\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2557\n \u2551 \u901a\u7528\u56fe\u5b58\u50a8\u6d4b\u8bd5\u7a0b\u5e8f \u2551\n \u255a\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u255d\n \"\"\")\n\n # \u68c0\u67e5.env\u6587\u4ef6\n if not check_env_file():\n return\n\n # \u52a0\u8f7d\u73af\u5883\u53d8\u91cf\n load_dotenv(dotenv_path=\".env\", override=False)\n\n # \u83b7\u53d6\u56fe\u5b58\u50a8\u7c7b\u578b\n graph_storage_type = os.getenv(\"LIGHTRAG_GRAPH_STORAGE\", \"NetworkXStorage\")\n ASCIIColors.magenta(f\"\\n\u5f53\u524d\u914d\u7f6e\u7684\u56fe\u5b58\u50a8\u7c7b\u578b: {graph_storage_type}\")\n ASCIIColors.white(\n f\"\u652f\u6301\u7684\u56fe\u5b58\u50a8\u7c7b\u578b: {', '.join(STORAGE_IMPLEMENTATIONS['GRAPH_STORAGE']['implementations'])}\"\n )\n\n # \u521d\u59cb\u5316\u5b58\u50a8\u5b9e\u4f8b\n storage = await initialize_graph_storage()\n if not storage:\n ASCIIColors.red(\"\u521d\u59cb\u5316\u5b58\u50a8\u5b9e\u4f8b\u5931\u8d25\uff0c\u6d4b\u8bd5\u7a0b\u5e8f\u9000\u51fa\")\n return\n\n try:\n # \u663e\u793a\u6d4b\u8bd5\u9009\u9879\n ASCIIColors.yellow(\"\\n\u8bf7\u9009\u62e9\u6d4b\u8bd5\u7c7b\u578b:\")\n ASCIIColors.white(\"1. \u57fa\u672c\u6d4b\u8bd5 (\u8282\u70b9\u548c\u8fb9\u7684\u63d2\u5165\u3001\u8bfb\u53d6)\")\n ASCIIColors.white(\"2. \u9ad8\u7ea7\u6d4b\u8bd5 (\u5ea6\u6570\u3001\u6807\u7b7e\u3001\u77e5\u8bc6\u56fe\u8c31\u3001\u5220\u9664\u64cd\u4f5c\u7b49)\")\n ASCIIColors.white(\"3. \u6279\u91cf\u64cd\u4f5c\u6d4b\u8bd5 (\u6279\u91cf\u83b7\u53d6\u8282\u70b9\u3001\u8fb9\u5c5e\u6027\u548c\u5ea6\u6570\u7b49)\")\n ASCIIColors.white(\"4. \u65e0\u5411\u56fe\u7279\u6027\u6d4b\u8bd5 (\u9a8c\u8bc1\u5b58\u50a8\u7684\u65e0\u5411\u56fe\u7279\u6027)\")\n ASCIIColors.white(\"5. \u7279\u6b8a\u5b57\u7b26\u6d4b\u8bd5 (\u9a8c\u8bc1\u5355\u5f15\u53f7\u3001\u53cc\u5f15\u53f7\u548c\u53cd\u659c\u6760\u7b49\u7279\u6b8a\u5b57\u7b26)\")\n ASCIIColors.white(\"6. \u5168\u90e8\u6d4b\u8bd5\")\n\n choice = input(\"\\n\u8bf7\u8f93\u5165\u9009\u9879 (1/2/3/4/5/6): \")\n\n # \u5728\u6267\u884c\u6d4b\u8bd5\u524d\u6e05\u7406\u6570\u636e\n if choice in [\"1\", \"2\", \"3\", \"4\", \"5\", \"6\"]:\n ASCIIColors.yellow(\"\\n\u6267\u884c\u6d4b\u8bd5\u524d\u6e05\u7406\u6570\u636e...\")\n await storage.drop()\n ASCIIColors.green(\"\u6570\u636e\u6e05\u7406\u5b8c\u6210\\n\")\n\n if choice == \"1\":\n await test_graph_basic(storage)\n elif choice == \"2\":\n await test_graph_advanced(storage)\n elif choice == \"3\":\n await test_graph_batch_operations(storage)\n elif choice == \"4\":\n await test_graph_undirected_property(storage)\n elif choice == \"5\":\n await test_graph_special_characters(storage)\n elif choice == \"6\":\n ASCIIColors.cyan(\"\\n=== \u5f00\u59cb\u57fa\u672c\u6d4b\u8bd5 ===\")\n basic_result = await test_graph_basic(storage)\n\n if basic_result:\n ASCIIColors.cyan(\"\\n=== \u5f00\u59cb\u9ad8\u7ea7\u6d4b\u8bd5 ===\")\n advanced_result = await test_graph_advanced(storage)\n\n if advanced_result:\n ASCIIColors.cyan(\"\\n=== \u5f00\u59cb\u6279\u91cf\u64cd\u4f5c\u6d4b\u8bd5 ===\")\n batch_result = await test_graph_batch_operations(storage)\n\n if batch_result:\n ASCIIColors.cyan(\"\\n=== \u5f00\u59cb\u65e0\u5411\u56fe\u7279\u6027\u6d4b\u8bd5 ===\")\n undirected_result = await test_graph_undirected_property(\n storage\n )\n\n if undirected_result:\n ASCIIColors.cyan(\"\\n=== \u5f00\u59cb\u7279\u6b8a\u5b57\u7b26\u6d4b\u8bd5 ===\")\n await test_graph_special_characters(storage)\n else:\n ASCIIColors.red(\"\u65e0\u6548\u7684\u9009\u9879\")\n\n finally:\n # \u5173\u95ed\u8fde\u63a5\n if storage:\n await storage.finalize()\n ASCIIColors.green(\"\\n\u5b58\u50a8\u8fde\u63a5\u5df2\u5173\u95ed\")", "creation_date": "2025-04-03T19:40:46Z", "repo": "HKUDS/LightRAG", "file_path": "tests/test_graph_storage.py", "stars": 18349, "label": 0} +{"function": "def make_request(\n url: str, data: Dict[str, Any], stream: bool = False, check_status: bool = True\n) -> requests.Response:\n \"\"\"Send an HTTP request with retry mechanism\n Args:\n url: Request URL\n data: Request data\n stream: Whether to use streaming response\n check_status: Whether to check HTTP status code (default: True)\n Returns:\n requests.Response: Response object\n\n Raises:\n requests.exceptions.RequestException: Request failed after all retries\n requests.exceptions.HTTPError: HTTP status code is not 200 (when check_status is True)\n \"\"\"\n server_config = CONFIG[\"server\"]\n max_retries = server_config[\"max_retries\"]\n retry_delay = server_config[\"retry_delay\"]\n timeout = server_config[\"timeout\"]\n\n for attempt in range(max_retries):\n try:\n response = requests.post(url, json=data, stream=stream, timeout=timeout)\n if check_status and response.status_code != 200:\n response.raise_for_status()\n return response\n except requests.exceptions.RequestException as e:\n if attempt == max_retries - 1: # Last retry\n raise\n print(f\"\\nRequest failed, retrying in {retry_delay} seconds: {str(e)}\")\n time.sleep(retry_delay)", "creation_date": "2025-02-18T20:12:06Z", "repo": "HKUDS/LightRAG", "file_path": "tests/test_lightrag_ollama_chat.py", "stars": 18349, "label": 0} +{"function": "def ocr_error_predictor() -> OCRErrorPredictor:\n ocr_error_predictor = OCRErrorPredictor()\n yield ocr_error_predictor\n del ocr_error_predictor", "creation_date": "2024-12-19T16:15:03Z", "repo": "datalab-to/surya", "file_path": "tests/conftest.py", "stars": 17783, "label": 0} +{"function": "def layout_predictor() -> LayoutPredictor:\n layout_predictor = LayoutPredictor()\n yield layout_predictor\n del layout_predictor", "creation_date": "2024-12-19T16:15:03Z", "repo": "datalab-to/surya", "file_path": "tests/conftest.py", "stars": 17783, "label": 0} +{"function": "def detection_predictor() -> DetectionPredictor:\n detection_predictor = DetectionPredictor()\n yield detection_predictor\n del detection_predictor", "creation_date": "2024-12-19T16:15:03Z", "repo": "datalab-to/surya", "file_path": "tests/conftest.py", "stars": 17783, "label": 0} +{"function": "def recognition_predictor() -> RecognitionPredictor:\n recognition_predictor = RecognitionPredictor()\n yield recognition_predictor\n del recognition_predictor", "creation_date": "2024-12-19T16:15:03Z", "repo": "datalab-to/surya", "file_path": "tests/conftest.py", "stars": 17783, "label": 0} +{"function": "def table_rec_predictor() -> TableRecPredictor:\n table_rec_predictor = TableRecPredictor()\n yield table_rec_predictor\n del table_rec_predictor", "creation_date": "2024-12-19T16:15:03Z", "repo": "datalab-to/surya", "file_path": "tests/conftest.py", "stars": 17783, "label": 0} +{"function": "def test_image():\n image = Image.new(\"RGB\", (1024, 1024), \"white\")\n draw = ImageDraw.Draw(image)\n draw.text((10, 10), \"Hello World\", fill=\"black\", font_size=72)\n draw.text(\n (10, 200),\n \"This is a sentence of text.\\nNow it is a paragraph.\\nA three-line one.\",\n fill=\"black\",\n font_size=24,\n )\n return image", "creation_date": "2024-12-19T16:15:03Z", "repo": "datalab-to/surya", "file_path": "tests/conftest.py", "stars": 17783, "label": 0} +{"function": "def test_image_tall():\n image = Image.new(\"RGB\", (4096, 4096), \"white\")\n draw = ImageDraw.Draw(image)\n draw.text((10, 10), \"Hello World\", fill=\"black\", font_size=72)\n draw.text(\n (4000, 4000),\n \"This is a sentence of text.\\n\\nNow it is a paragraph.\\n\\nA three-line one.\",\n fill=\"black\",\n font_size=24,\n )\n return image", "creation_date": "2024-12-19T16:15:03Z", "repo": "datalab-to/surya", "file_path": "tests/conftest.py", "stars": 17783, "label": 0} +{"function": "def test_detection(detection_predictor, test_image):\n detection_results = detection_predictor([test_image])\n\n assert len(detection_results) == 1\n assert detection_results[0].image_bbox == [0, 0, 1024, 1024]\n\n bboxes = detection_results[0].bboxes\n assert len(bboxes) == 4", "creation_date": "2025-01-07T14:52:42Z", "repo": "datalab-to/surya", "file_path": "tests/test_detection.py", "stars": 17783, "label": 0} +{"function": "def test_detection_chunking(detection_predictor, test_image_tall):\n detection_results = detection_predictor([test_image_tall])\n\n assert len(detection_results) == 1\n assert detection_results[0].image_bbox == [0, 0, 4096, 4096]\n\n bboxes = detection_results[0].bboxes\n assert len(bboxes) >= 3 # Sometimes merges into 3\n assert abs(4000 - bboxes[1].polygon[0][0]) < 50", "creation_date": "2025-01-07T14:52:42Z", "repo": "datalab-to/surya", "file_path": "tests/test_detection.py", "stars": 17783, "label": 0} +{"function": "def test_latex_ocr(recognition_predictor):\n img = Image.new(\"RGB\", (200, 100), color=\"white\")\n draw = ImageDraw.Draw(img)\n draw.text((10, 10), \"E = mc2\", fill=\"black\", font_size=48)\n\n results: List[OCRResult] = recognition_predictor(\n [img], [TaskNames.block_without_boxes], bboxes=[[[0, 0, 200, 100]]]\n )\n text = results[0].text_lines[0].text\n assert len(results) == 1\n\n assert text.startswith(\"\")", "creation_date": "2025-01-27T16:25:29Z", "repo": "datalab-to/surya", "file_path": "tests/test_latex_ocr.py", "stars": 17783, "label": 0} +{"function": "def generate(\n mmgpt: MultiModalityCausalLM,\n vl_chat_processor: VLChatProcessor,\n prompt: str,\n temperature: float = 1,\n parallel_size: int = 16,\n cfg_weight: float = 5,\n image_token_num_per_image: int = 576,\n img_size: int = 384,\n patch_size: int = 16,\n):\n input_ids = vl_chat_processor.tokenizer.encode(prompt)\n input_ids = torch.LongTensor(input_ids)\n\n tokens = torch.zeros((parallel_size*2, len(input_ids)), dtype=torch.int).cuda()\n for i in range(parallel_size*2):\n tokens[i, :] = input_ids\n if i % 2 != 0:\n tokens[i, 1:-1] = vl_chat_processor.pad_id\n\n inputs_embeds = mmgpt.language_model.get_input_embeddings()(tokens)\n\n generated_tokens = torch.zeros((parallel_size, image_token_num_per_image), dtype=torch.int).cuda()\n\n for i in range(image_token_num_per_image):\n outputs = mmgpt.language_model.model(inputs_embeds=inputs_embeds, use_cache=True, past_key_values=outputs.past_key_values if i != 0 else None)\n hidden_states = outputs.last_hidden_state\n \n logits = mmgpt.gen_head(hidden_states[:, -1, :])\n logit_cond = logits[0::2, :]\n logit_uncond = logits[1::2, :]\n \n logits = logit_uncond + cfg_weight * (logit_cond-logit_uncond)\n probs = torch.softmax(logits / temperature, dim=-1)\n\n next_token = torch.multinomial(probs, num_samples=1)\n generated_tokens[:, i] = next_token.squeeze(dim=-1)\n\n next_token = torch.cat([next_token.unsqueeze(dim=1), next_token.unsqueeze(dim=1)], dim=1).view(-1)\n img_embeds = mmgpt.prepare_gen_img_embeds(next_token)\n inputs_embeds = img_embeds.unsqueeze(dim=1)\n\n\n dec = mmgpt.gen_vision_model.decode_code(generated_tokens.to(dtype=torch.int), shape=[parallel_size, 8, img_size//patch_size, img_size//patch_size])\n dec = dec.to(torch.float32).cpu().numpy().transpose(0, 2, 3, 1)\n\n dec = np.clip((dec + 1) / 2 * 255, 0, 255)\n\n visual_img = np.zeros((parallel_size, img_size, img_size, 3), dtype=np.uint8)\n visual_img[:, :, :] = dec\n\n os.makedirs('generated_samples', exist_ok=True)\n for i in range(parallel_size):\n save_path = os.path.join('generated_samples', \"img_{}.jpg\".format(i))\n PIL.Image.fromarray(visual_img[i]).save(save_path)", "creation_date": "2024-10-18T03:58:52Z", "repo": "deepseek-ai/Janus", "file_path": "generation_inference.py", "stars": 17447, "label": 0} +{"function": "def create_prompt(user_input: str) -> str:\n conversation = [\n {\n \"role\": \"User\",\n \"content\": user_input,\n },\n {\"role\": \"Assistant\", \"content\": \"\"},\n ]\n\n sft_format = vl_chat_processor.apply_sft_template_for_multi_turn_prompts(\n conversations=conversation,\n sft_format=vl_chat_processor.sft_format,\n system_prompt=\"\",\n )\n prompt = sft_format + vl_chat_processor.image_start_tag\n return prompt", "creation_date": "2024-10-19T11:45:08Z", "repo": "deepseek-ai/Janus", "file_path": "interactivechat.py", "stars": 17447, "label": 0} +{"function": "def generate(\n mmgpt: MultiModalityCausalLM,\n vl_chat_processor: VLChatProcessor,\n prompt: str,\n short_prompt: str,\n parallel_size: int = 16,\n temperature: float = 1,\n cfg_weight: float = 5,\n image_token_num_per_image: int = 576,\n img_size: int = 384,\n patch_size: int = 16,\n):\n input_ids = vl_chat_processor.tokenizer.encode(prompt)\n input_ids = torch.LongTensor(input_ids)\n\n tokens = torch.zeros((parallel_size * 2, len(input_ids)), dtype=torch.int).cuda()\n for i in range(parallel_size * 2):\n tokens[i, :] = input_ids\n if i % 2 != 0:\n tokens[i, 1:-1] = vl_chat_processor.pad_id\n\n inputs_embeds = mmgpt.language_model.get_input_embeddings()(tokens)\n\n generated_tokens = torch.zeros((parallel_size, image_token_num_per_image), dtype=torch.int).cuda()\n outputs = None # Initialize outputs for use in the loop\n\n for i in range(image_token_num_per_image):\n outputs = mmgpt.language_model.model(\n inputs_embeds=inputs_embeds,\n use_cache=True,\n past_key_values=outputs.past_key_values if i != 0 else None\n )\n hidden_states = outputs.last_hidden_state\n\n logits = mmgpt.gen_head(hidden_states[:, -1, :])\n logit_cond = logits[0::2, :]\n logit_uncond = logits[1::2, :]\n\n logits = logit_uncond + cfg_weight * (logit_cond - logit_uncond)\n probs = torch.softmax(logits / temperature, dim=-1)\n\n next_token = torch.multinomial(probs, num_samples=1)\n generated_tokens[:, i] = next_token.squeeze(dim=-1)\n\n next_token = torch.cat([next_token.unsqueeze(dim=1), next_token.unsqueeze(dim=1)], dim=1).view(-1)\n img_embeds = mmgpt.prepare_gen_img_embeds(next_token)\n inputs_embeds = img_embeds.unsqueeze(dim=1)\n\n dec = mmgpt.gen_vision_model.decode_code(\n generated_tokens.to(dtype=torch.int),\n shape=[parallel_size, 8, img_size // patch_size, img_size // patch_size]\n )\n dec = dec.to(torch.float32).cpu().numpy().transpose(0, 2, 3, 1)\n\n dec = np.clip((dec + 1) / 2 * 255, 0, 255)\n\n visual_img = np.zeros((parallel_size, img_size, img_size, 3), dtype=np.uint8)\n visual_img[:, :, :] = dec\n\n os.makedirs('generated_samples', exist_ok=True)\n\n # Create a timestamp\n timestamp = time.strftime(\"%Y%m%d-%H%M%S\")\n\n # Sanitize the short_prompt to ensure it's safe for filenames\n short_prompt = re.sub(r'\\W+', '_', short_prompt)[:50]\n\n # Save images with timestamp and part of the user prompt in the filename\n for i in range(parallel_size):\n save_path = os.path.join('generated_samples', f\"img_{timestamp}_{short_prompt}_{i}.jpg\")\n PIL.Image.fromarray(visual_img[i]).save(save_path)", "creation_date": "2024-10-19T11:45:08Z", "repo": "deepseek-ai/Janus", "file_path": "interactivechat.py", "stars": 17447, "label": 0} +{"function": "def interactive_image_generator():\n print(\"Welcome to the interactive image generator!\")\n\n # Ask for the number of images at the start of the session\n while True:\n num_images_input = input(\"How many images would you like to generate per prompt? (Enter a positive integer): \")\n if num_images_input.isdigit() and int(num_images_input) > 0:\n parallel_size = int(num_images_input)\n break\n else:\n print(\"Invalid input. Please enter a positive integer.\")\n\n while True:\n user_input = input(\"Please describe the image you'd like to generate (or type 'exit' to quit): \")\n\n if user_input.lower() == 'exit':\n print(\"Exiting the image generator. Goodbye!\")\n break\n\n prompt = create_prompt(user_input)\n\n # Create a sanitized version of user_input for the filename\n short_prompt = re.sub(r'\\W+', '_', user_input)[:50]\n\n print(f\"Generating {parallel_size} image(s) for: '{user_input}'\")\n generate(\n mmgpt=vl_gpt,\n vl_chat_processor=vl_chat_processor,\n prompt=prompt,\n short_prompt=short_prompt,\n parallel_size=parallel_size # Pass the user-specified number of images\n )\n\n print(\"Image generation complete! Check the 'generated_samples' folder for the output.\\n\")", "creation_date": "2024-10-19T11:45:08Z", "repo": "deepseek-ai/Janus", "file_path": "interactivechat.py", "stars": 17447, "label": 0} +{"function": "def register_conv_template(template: Conversation, override: bool = False):\n \"\"\"Register a new conversation template.\"\"\"\n if not override:\n assert (\n template.name not in conv_templates\n ), f\"{template.name} has been registered.\"\n\n conv_templates[template.name] = template", "creation_date": "2024-10-18T03:58:52Z", "repo": "deepseek-ai/Janus", "file_path": "janus/utils/conversation.py", "stars": 17447, "label": 0} +{"function": "def get_conv_template(name: str) -> Conversation:\n \"\"\"Get a conversation template.\"\"\"\n return conv_templates[name].copy()", "creation_date": "2024-10-18T03:58:52Z", "repo": "deepseek-ai/Janus", "file_path": "janus/utils/conversation.py", "stars": 17447, "label": 0} +{"function": " def get_prompt(self) -> str:\n \"\"\"Get the prompt for generation.\"\"\"\n system_prompt = self.system_template.format(system_message=self.system_message)\n\n if self.sep_style == SeparatorStyle.DeepSeek:\n seps = [self.sep, self.sep2]\n if system_prompt == \"\" or system_prompt is None:\n ret = \"\"\n else:\n ret = system_prompt + seps[0]\n for i, (role, message) in enumerate(self.messages):\n if message:\n ret += role + \": \" + message + seps[i % 2]\n else:\n ret += role + \":\"\n return ret\n elif self.sep_style == SeparatorStyle.LLAMA2:\n seps = [self.sep, self.sep2]\n if self.system_message:\n ret = system_prompt\n else:\n ret = \"[INST] \"\n for i, (role, message) in enumerate(self.messages):\n tag = self.roles[i % 2]\n if message:\n if type(message) is tuple: # multimodal message\n message, _ = message\n if i == 0:\n ret += message + \" \"\n else:\n ret += tag + \" \" + message + seps[i % 2]\n else:\n ret += tag\n return ret\n elif self.sep_style == SeparatorStyle.PLAIN:\n seps = [self.sep, self.sep2]\n ret = \"\"\n for i, (role, message) in enumerate(self.messages):\n if message:\n if type(message) is tuple:\n message, _, _ = message\n if i % 2 == 0:\n ret += message + seps[i % 2]\n else:\n ret += message + seps[i % 2]\n else:\n ret += \"\"\n return ret\n elif self.sep_style == SeparatorStyle.ALIGNMENT:\n seps = [self.sep, self.sep2]\n ret = \"\"\n for i, (role, message) in enumerate(self.messages):\n if message:\n if type(message) is tuple:\n message, _, _ = message\n if i % 2 == 0:\n ret += \"\\n\" + seps[i % 2]\n else:\n ret += message + seps[i % 2]\n else:\n ret += \"\"\n return ret\n else:\n raise ValueError(f\"Invalid style: {self.sep_style}\")", "creation_date": "2024-10-18T03:58:52Z", "repo": "deepseek-ai/Janus", "file_path": "janus/utils/conversation.py", "stars": 17447, "label": 0} +{"function": " def get_prompt_for_current_round(self, content=None):\n \"\"\"Get current round formatted question prompt during sft training\"\"\"\n if self.sep_style == SeparatorStyle.PLAIN:\n formatted_question = \"\\n\"\n elif self.sep_style == SeparatorStyle.DeepSeek:\n formatted_question = (\n f\"{self.roles[0]}: \" + content.strip() + self.sep + f\"{self.roles[1]}:\"\n )\n else:\n raise ValueError(f\"Unsupported sep_style: {self.sep_style}\")\n return formatted_question", "creation_date": "2024-10-18T03:58:52Z", "repo": "deepseek-ai/Janus", "file_path": "janus/utils/conversation.py", "stars": 17447, "label": 0} +{"function": " def set_system_message(self, system_message: str):\n \"\"\"Set the system message.\"\"\"\n self.system_message = system_message", "creation_date": "2024-10-18T03:58:52Z", "repo": "deepseek-ai/Janus", "file_path": "janus/utils/conversation.py", "stars": 17447, "label": 0} +{"function": " def append_message(self, role: str, message: str):\n \"\"\"Append a new message.\"\"\"\n self.messages.append([role, message])", "creation_date": "2024-10-18T03:58:52Z", "repo": "deepseek-ai/Janus", "file_path": "janus/utils/conversation.py", "stars": 17447, "label": 0} +{"function": "def setup_logging():\n \"\"\"Configure logging system to output logs to file, memory queue, and console\"\"\"\n # Create logs directory (if it doesn't exist)\n logs_dir = os.path.join(os.path.dirname(__file__), \"logs\")\n os.makedirs(logs_dir, exist_ok=True)\n\n # Generate log filename (using current date)\n current_date = datetime.datetime.now().strftime(\"%Y-%m-%d\")\n log_file = os.path.join(logs_dir, f\"gradio_log_{current_date}.txt\")\n\n # Configure root logger (captures all logs)\n root_logger = logging.getLogger()\n\n # Clear existing handlers to avoid duplicate logs\n for handler in root_logger.handlers[:]:\n root_logger.removeHandler(handler)\n\n root_logger.setLevel(logging.INFO)\n\n # Create file handler\n file_handler = logging.FileHandler(log_file, encoding=\"utf-8\", mode=\"a\")\n file_handler.setLevel(logging.INFO)\n\n # Create console handler\n console_handler = logging.StreamHandler()\n console_handler.setLevel(logging.INFO)\n\n # Create formatter\n formatter = logging.Formatter(\n \"%(asctime)s - %(name)s - %(levelname)s - %(message)s\"\n )\n file_handler.setFormatter(formatter)\n console_handler.setFormatter(formatter)\n\n # Add handlers to root logger\n root_logger.addHandler(file_handler)\n root_logger.addHandler(console_handler)\n\n logging.info(\"Logging system initialized, log file: %s\", log_file)\n return log_file", "creation_date": "2025-03-13T04:22:12Z", "repo": "camel-ai/owl", "file_path": "owl/webapp.py", "stars": 17445, "label": 0} +{"function": "def log_reader_thread(log_file):\n \"\"\"Background thread that continuously reads the log file and adds new lines to the queue\"\"\"\n try:\n with open(log_file, \"r\", encoding=\"utf-8\") as f:\n # Move to the end of file\n f.seek(0, 2)\n\n while not STOP_LOG_THREAD.is_set():\n line = f.readline()\n if line:\n LOG_QUEUE.put(line) # Add to conversation record queue\n else:\n # No new lines, wait for a short time\n time.sleep(0.1)\n except Exception as e:\n logging.error(f\"Log reader thread error: {str(e)}\")", "creation_date": "2025-03-13T04:22:12Z", "repo": "camel-ai/owl", "file_path": "owl/webapp.py", "stars": 17445, "label": 0} +{"function": "def get_latest_logs(max_lines=100, queue_source=None):\n \"\"\"Get the latest log lines from the queue, or read directly from the file if the queue is empty\n\n Args:\n max_lines: Maximum number of lines to return\n queue_source: Specify which queue to use, default is LOG_QUEUE\n\n Returns:\n str: Log content\n \"\"\"\n logs = []\n log_queue = queue_source if queue_source else LOG_QUEUE\n\n # Create a temporary queue to store logs so we can process them without removing them from the original queue\n temp_queue = queue.Queue()\n temp_logs = []\n\n try:\n # Try to get all available log lines from the queue\n while not log_queue.empty() and len(temp_logs) < max_lines:\n log = log_queue.get_nowait()\n temp_logs.append(log)\n temp_queue.put(log) # Put the log back into the temporary queue\n except queue.Empty:\n pass\n\n # Process conversation records\n logs = temp_logs\n\n # If there are no new logs or not enough logs, try to read the last few lines directly from the file\n if len(logs) < max_lines and LOG_FILE and os.path.exists(LOG_FILE):\n try:\n with open(LOG_FILE, \"r\", encoding=\"utf-8\") as f:\n all_lines = f.readlines()\n # If there are already some logs in the queue, only read the remaining needed lines\n remaining_lines = max_lines - len(logs)\n file_logs = (\n all_lines[-remaining_lines:]\n if len(all_lines) > remaining_lines\n else all_lines\n )\n\n # Add file logs before queue logs\n logs = file_logs + logs\n except Exception as e:\n error_msg = f\"Error reading log file: {str(e)}\"\n logging.error(error_msg)\n if not logs: # Only add error message if there are no logs\n logs = [error_msg]\n\n # If there are still no logs, return a prompt message\n if not logs:\n return \"Initialization in progress...\"\n\n # Filter logs, only keep logs with 'camel.agents.chat_agent - INFO'\n filtered_logs = []\n for log in logs:\n if \"camel.agents.chat_agent - INFO\" in log:\n filtered_logs.append(log)\n\n # If there are no logs after filtering, return a prompt message\n if not filtered_logs:\n return \"No conversation records yet.\"\n\n # Process log content, extract the latest user and assistant messages\n simplified_logs = []\n\n # Use a set to track messages that have already been processed, to avoid duplicates\n processed_messages = set()\n\n def process_message(role, content):\n # Create a unique identifier to track messages\n msg_id = f\"{role}:{content}\"\n if msg_id in processed_messages:\n return None\n\n processed_messages.add(msg_id)\n content = content.replace(\"\\\\n\", \"\\n\")\n lines = [line.strip() for line in content.split(\"\\n\")]\n content = \"\\n\".join(lines)\n\n role_emoji = \"\ud83d\ude4b\" if role.lower() == \"user\" else \"\ud83e\udd16\"\n return f\"\"\"### {role_emoji} {role.title()} Agent\n\n{content}\"\"\"\n\n for log in filtered_logs:\n formatted_messages = []\n # Try to extract message array\n messages_match = re.search(\n r\"Model (.*?), index (\\d+), processed these messages: (\\[.*\\])\", log\n )\n\n if messages_match:\n try:\n messages = json.loads(messages_match.group(3))\n for msg in messages:\n if msg.get(\"role\") in [\"user\", \"assistant\"]:\n formatted_msg = process_message(\n msg.get(\"role\"), msg.get(\"content\", \"\")\n )\n if formatted_msg:\n formatted_messages.append(formatted_msg)\n except json.JSONDecodeError:\n pass\n\n # If JSON parsing fails or no message array is found, try to extract conversation content directly\n if not formatted_messages:\n user_pattern = re.compile(r\"\\{'role': 'user', 'content': '(.*?)'\\}\")\n assistant_pattern = re.compile(\n r\"\\{'role': 'assistant', 'content': '(.*?)'\\}\"\n )\n\n for content in user_pattern.findall(log):\n formatted_msg = process_message(\"user\", content)\n if formatted_msg:\n formatted_messages.append(formatted_msg)\n\n for content in assistant_pattern.findall(log):\n formatted_msg = process_message(\"assistant\", content)\n if formatted_msg:\n formatted_messages.append(formatted_msg)\n\n if formatted_messages:\n simplified_logs.append(\"\\n\\n\".join(formatted_messages))\n\n # Format log output, ensure appropriate separation between each conversation record\n formatted_logs = []\n for i, log in enumerate(simplified_logs):\n # Remove excess whitespace characters from beginning and end\n log = log.strip()\n\n formatted_logs.append(log)\n\n # Ensure each conversation record ends with a newline\n if not log.endswith(\"\\n\"):\n formatted_logs.append(\"\\n\")\n\n return \"\\n\".join(formatted_logs)", "creation_date": "2025-03-13T04:22:12Z", "repo": "camel-ai/owl", "file_path": "owl/webapp.py", "stars": 17445, "label": 0} +{"function": "def validate_input(question: str) -> bool:\n \"\"\"Validate if user input is valid\n\n Args:\n question: User question\n\n Returns:\n bool: Whether the input is valid\n \"\"\"\n # Check if input is empty or contains only spaces\n if not question or question.strip() == \"\":\n return False\n return True", "creation_date": "2025-03-13T04:22:12Z", "repo": "camel-ai/owl", "file_path": "owl/webapp.py", "stars": 17445, "label": 0} +{"function": "def run_owl(question: str, example_module: str) -> Tuple[str, str, str]:\n \"\"\"Run the OWL system and return results\n\n Args:\n question: User question\n example_module: Example module name to import (e.g., \"run_terminal_zh\" or \"run_deep\")\n\n Returns:\n Tuple[...]: Answer, token count, status\n \"\"\"\n global CURRENT_PROCESS\n\n # Validate input\n if not validate_input(question):\n logging.warning(\"User submitted invalid input\")\n return (\n \"Please enter a valid question\",\n \"0\",\n \"\u274c Error: Invalid input question\",\n )\n\n try:\n # Ensure environment variables are loaded\n load_dotenv(find_dotenv(), override=True)\n logging.info(\n f\"Processing question: '{question}', using module: {example_module}\"\n )\n\n # Check if the module is in MODULE_DESCRIPTIONS\n if example_module not in MODULE_DESCRIPTIONS:\n logging.error(f\"User selected an unsupported module: {example_module}\")\n return (\n f\"Selected module '{example_module}' is not supported\",\n \"0\",\n \"\u274c Error: Unsupported module\",\n )\n\n # Dynamically import target module\n module_path = f\"examples.{example_module}\"\n try:\n logging.info(f\"Importing module: {module_path}\")\n module = importlib.import_module(module_path)\n except ImportError as ie:\n logging.error(f\"Unable to import module {module_path}: {str(ie)}\")\n return (\n f\"Unable to import module: {module_path}\",\n \"0\",\n f\"\u274c Error: Module {example_module} does not exist or cannot be loaded - {str(ie)}\",\n )\n except Exception as e:\n logging.error(\n f\"Error occurred while importing module {module_path}: {str(e)}\"\n )\n return (\n f\"Error occurred while importing module: {module_path}\",\n \"0\",\n f\"\u274c Error: {str(e)}\",\n )\n\n # Check if it contains the construct_society function\n if not hasattr(module, \"construct_society\"):\n logging.error(\n f\"construct_society function not found in module {module_path}\"\n )\n return (\n f\"construct_society function not found in module {module_path}\",\n \"0\",\n \"\u274c Error: Module interface incompatible\",\n )\n\n # Build society simulation\n try:\n logging.info(\"Building society simulation...\")\n society = module.construct_society(question)\n\n except Exception as e:\n logging.error(f\"Error occurred while building society simulation: {str(e)}\")\n return (\n f\"Error occurred while building society simulation: {str(e)}\",\n \"0\",\n f\"\u274c Error: Build failed - {str(e)}\",\n )\n\n # Run society simulation\n try:\n logging.info(\"Running society simulation...\")\n answer, chat_history, token_info = run_society(society)\n logging.info(\"Society simulation completed\")\n except Exception as e:\n logging.error(f\"Error occurred while running society simulation: {str(e)}\")\n return (\n f\"Error occurred while running society simulation: {str(e)}\",\n \"0\",\n f\"\u274c Error: Run failed - {str(e)}\",\n )\n\n # Safely get token count\n if not isinstance(token_info, dict):\n token_info = {}\n\n completion_tokens = token_info.get(\"completion_token_count\", 0)\n prompt_tokens = token_info.get(\"prompt_token_count\", 0)\n total_tokens = completion_tokens + prompt_tokens\n\n logging.info(\n f\"Processing completed, token usage: completion={completion_tokens}, prompt={prompt_tokens}, total={total_tokens}\"\n )\n\n return (\n answer,\n f\"Completion tokens: {completion_tokens:,} | Prompt tokens: {prompt_tokens:,} | Total: {total_tokens:,}\",\n \"\u2705 Successfully completed\",\n )\n\n except Exception as e:\n logging.error(\n f\"Uncaught error occurred while processing the question: {str(e)}\"\n )\n return (f\"Error occurred: {str(e)}\", \"0\", f\"\u274c Error: {str(e)}\")", "creation_date": "2025-03-13T04:22:12Z", "repo": "camel-ai/owl", "file_path": "owl/webapp.py", "stars": 17445, "label": 0} +{"function": "def update_module_description(module_name: str) -> str:\n \"\"\"Return the description of the selected module\"\"\"\n return MODULE_DESCRIPTIONS.get(module_name, \"No description available\")", "creation_date": "2025-03-13T04:22:12Z", "repo": "camel-ai/owl", "file_path": "owl/webapp.py", "stars": 17445, "label": 0} +{"function": "def init_env_file():\n \"\"\"Initialize .env file if it doesn't exist\"\"\"\n dotenv_path = find_dotenv()\n if not dotenv_path:\n with open(\".env\", \"w\") as f:\n f.write(DEFAULT_ENV_TEMPLATE)\n dotenv_path = find_dotenv()\n return dotenv_path", "creation_date": "2025-03-13T04:22:12Z", "repo": "camel-ai/owl", "file_path": "owl/webapp.py", "stars": 17445, "label": 0} +{"function": "def load_env_vars():\n \"\"\"Load environment variables and return as dictionary format\n\n Returns:\n dict: Environment variable dictionary, each value is a tuple containing value and source (value, source)\n \"\"\"\n dotenv_path = init_env_file()\n load_dotenv(dotenv_path, override=True)\n\n # Read environment variables from .env file\n env_file_vars = {}\n with open(dotenv_path, \"r\") as f:\n for line in f:\n line = line.strip()\n if line and not line.startswith(\"#\"):\n if \"=\" in line:\n key, value = line.split(\"=\", 1)\n env_file_vars[key.strip()] = value.strip().strip(\"\\\"'\")\n\n # Get from system environment variables\n system_env_vars = {\n k: v\n for k, v in os.environ.items()\n if k not in env_file_vars and k not in WEB_FRONTEND_ENV_VARS\n }\n\n # Merge environment variables and mark sources\n env_vars = {}\n\n # Add system environment variables (lowest priority)\n for key, value in system_env_vars.items():\n env_vars[key] = (value, \"System\")\n\n # Add .env file environment variables (medium priority)\n for key, value in env_file_vars.items():\n env_vars[key] = (value, \".env file\")\n\n # Add frontend configured environment variables (highest priority)\n for key, value in WEB_FRONTEND_ENV_VARS.items():\n env_vars[key] = (value, \"Frontend configuration\")\n # Ensure operating system environment variables are also updated\n os.environ[key] = value\n\n return env_vars", "creation_date": "2025-03-13T04:22:12Z", "repo": "camel-ai/owl", "file_path": "owl/webapp.py", "stars": 17445, "label": 0} +{"function": "def save_env_vars(env_vars):\n \"\"\"Save environment variables to .env file\n\n Args:\n env_vars: Dictionary, keys are environment variable names, values can be strings or (value, source) tuples\n \"\"\"\n try:\n dotenv_path = init_env_file()\n\n # Save each environment variable\n for key, value_data in env_vars.items():\n if key and key.strip(): # Ensure key is not empty\n # Handle case where value might be a tuple\n if isinstance(value_data, tuple):\n value = value_data[0]\n else:\n value = value_data\n\n set_key(dotenv_path, key.strip(), value.strip())\n\n # Reload environment variables to ensure they take effect\n load_dotenv(dotenv_path, override=True)\n\n return True, \"Environment variables have been successfully saved!\"\n except Exception as e:\n return False, f\"Error saving environment variables: {str(e)}\"", "creation_date": "2025-03-13T04:22:12Z", "repo": "camel-ai/owl", "file_path": "owl/webapp.py", "stars": 17445, "label": 0} +{"function": "def add_env_var(key, value, from_frontend=True):\n \"\"\"Add or update a single environment variable\n\n Args:\n key: Environment variable name\n value: Environment variable value\n from_frontend: Whether it's from frontend configuration, default is True\n \"\"\"\n try:\n if not key or not key.strip():\n return False, \"Variable name cannot be empty\"\n\n key = key.strip()\n value = value.strip()\n\n # If from frontend, add to frontend environment variable dictionary\n if from_frontend:\n WEB_FRONTEND_ENV_VARS[key] = value\n # Directly update system environment variables\n os.environ[key] = value\n\n # Also update .env file\n dotenv_path = init_env_file()\n set_key(dotenv_path, key, value)\n load_dotenv(dotenv_path, override=True)\n\n return True, f\"Environment variable {key} has been successfully added/updated!\"\n except Exception as e:\n return False, f\"Error adding environment variable: {str(e)}\"", "creation_date": "2025-03-13T04:22:12Z", "repo": "camel-ai/owl", "file_path": "owl/webapp.py", "stars": 17445, "label": 0} +{"function": "def set_seed(seed: int):\n \"\"\"Sets the random seed for reproducibility.\"\"\"\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n if torch.cuda.is_available():\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False", "creation_date": "2025-04-21T15:13:40Z", "repo": "nari-labs/dia", "file_path": "app.py", "stars": 17445, "label": 0} +{"function": "def run_inference(\n text_input: str,\n audio_prompt_text_input: str,\n audio_prompt_input: Optional[Tuple[int, np.ndarray]],\n max_new_tokens: int,\n cfg_scale: float,\n temperature: float,\n top_p: float,\n cfg_filter_top_k: int,\n speed_factor: float,\n seed: Optional[int] = None,\n):\n \"\"\"\n Runs Nari inference using the globally loaded model and provided inputs.\n Uses temporary files for text and audio prompt compatibility with inference.generate.\n \"\"\"\n global model, device # Access global model, config, device\n console_output_buffer = io.StringIO()\n\n with contextlib.redirect_stdout(console_output_buffer):\n # Prepend transcript text if audio_prompt provided\n if audio_prompt_input and audio_prompt_text_input and not audio_prompt_text_input.isspace():\n text_input = audio_prompt_text_input + \"\\n\" + text_input\n text_input = text_input.strip()\n\n if audio_prompt_input and (not audio_prompt_text_input or audio_prompt_text_input.isspace()):\n raise gr.Error(\"Audio Prompt Text input cannot be empty.\")\n\n if not text_input or text_input.isspace():\n raise gr.Error(\"Text input cannot be empty.\")\n\n # Preprocess Audio\n temp_txt_file_path = None\n temp_audio_prompt_path = None\n output_audio = (44100, np.zeros(1, dtype=np.float32))\n\n try:\n prompt_path_for_generate = None\n if audio_prompt_input is not None:\n sr, audio_data = audio_prompt_input\n # Check if audio_data is valid\n if audio_data is None or audio_data.size == 0 or audio_data.max() == 0: # Check for silence/empty\n gr.Warning(\"Audio prompt seems empty or silent, ignoring prompt.\")\n else:\n # Save prompt audio to a temporary WAV file\n with tempfile.NamedTemporaryFile(mode=\"wb\", suffix=\".wav\", delete=False) as f_audio:\n temp_audio_prompt_path = f_audio.name # Store path for cleanup\n\n # Basic audio preprocessing for consistency\n # Convert to float32 in [-1, 1] range if integer type\n if np.issubdtype(audio_data.dtype, np.integer):\n max_val = np.iinfo(audio_data.dtype).max\n audio_data = audio_data.astype(np.float32) / max_val\n elif not np.issubdtype(audio_data.dtype, np.floating):\n gr.Warning(f\"Unsupported audio prompt dtype {audio_data.dtype}, attempting conversion.\")\n # Attempt conversion, might fail for complex types\n try:\n audio_data = audio_data.astype(np.float32)\n except Exception as conv_e:\n raise gr.Error(f\"Failed to convert audio prompt to float32: {conv_e}\")\n\n # Ensure mono (average channels if stereo)\n if audio_data.ndim > 1:\n if audio_data.shape[0] == 2: # Assume (2, N)\n audio_data = np.mean(audio_data, axis=0)\n elif audio_data.shape[1] == 2: # Assume (N, 2)\n audio_data = np.mean(audio_data, axis=1)\n else:\n gr.Warning(\n f\"Audio prompt has unexpected shape {audio_data.shape}, taking first channel/axis.\"\n )\n audio_data = (\n audio_data[0] if audio_data.shape[0] < audio_data.shape[1] else audio_data[:, 0]\n )\n audio_data = np.ascontiguousarray(audio_data) # Ensure contiguous after slicing/mean\n\n # Write using soundfile\n try:\n sf.write(\n temp_audio_prompt_path, audio_data, sr, subtype=\"FLOAT\"\n ) # Explicitly use FLOAT subtype\n prompt_path_for_generate = temp_audio_prompt_path\n print(f\"Created temporary audio prompt file: {temp_audio_prompt_path} (orig sr: {sr})\")\n except Exception as write_e:\n print(f\"Error writing temporary audio file: {write_e}\")\n raise gr.Error(f\"Failed to save audio prompt: {write_e}\")\n\n # Set and Display Generation Seed\n if seed is None or seed < 0:\n seed = random.randint(0, 2**32 - 1)\n print(f\"\\nNo seed provided, generated random seed: {seed}\\n\")\n else:\n print(f\"\\nUsing user-selected seed: {seed}\\n\")\n set_seed(seed)\n\n # Run Generation\n print(f'Generating speech: \\n\"{text_input}\"\\n')\n\n start_time = time.time()\n\n # Use torch.inference_mode() context manager for the generation call\n with torch.inference_mode():\n output_audio_np = model.generate(\n text_input,\n max_tokens=max_new_tokens,\n cfg_scale=cfg_scale,\n temperature=temperature,\n top_p=top_p,\n cfg_filter_top_k=cfg_filter_top_k, # Pass the value here\n use_torch_compile=False, # Keep False for Gradio stability\n audio_prompt=prompt_path_for_generate,\n verbose=True,\n )\n\n end_time = time.time()\n print(f\"Generation finished in {end_time - start_time:.2f} seconds.\\n\")\n\n # 4. Convert Codes to Audio\n if output_audio_np is not None:\n # Get sample rate from the loaded DAC model\n output_sr = 44100\n\n # --- Slow down audio ---\n original_len = len(output_audio_np)\n # Ensure speed_factor is positive and not excessively small/large to avoid issues\n speed_factor = max(0.1, min(speed_factor, 5.0))\n target_len = int(original_len / speed_factor) # Target length based on speed_factor\n if target_len != original_len and target_len > 0: # Only interpolate if length changes and is valid\n x_original = np.arange(original_len)\n x_resampled = np.linspace(0, original_len - 1, target_len)\n resampled_audio_np = np.interp(x_resampled, x_original, output_audio_np)\n output_audio = (\n output_sr,\n resampled_audio_np.astype(np.float32),\n ) # Use resampled audio\n print(\n f\"Resampled audio from {original_len} to {target_len} samples for {speed_factor:.2f}x speed.\"\n )\n else:\n output_audio = (\n output_sr,\n output_audio_np,\n ) # Keep original if calculation fails or no change\n print(f\"Skipping audio speed adjustment (factor: {speed_factor:.2f}).\")\n # --- End slowdown ---\n\n print(f\"Audio conversion successful. Final shape: {output_audio[1].shape}, Sample Rate: {output_sr}\")\n\n # Explicitly convert to int16 to prevent Gradio warning\n if output_audio[1].dtype == np.float32 or output_audio[1].dtype == np.float64:\n audio_for_gradio = np.clip(output_audio[1], -1.0, 1.0)\n audio_for_gradio = (audio_for_gradio * 32767).astype(np.int16)\n output_audio = (output_sr, audio_for_gradio)\n print(\"Converted audio to int16 for Gradio output.\")\n\n else:\n print(\"\\nGeneration finished, but no valid tokens were produced.\")\n # Return default silence\n gr.Warning(\"Generation produced no output.\")\n\n except Exception as e:\n print(f\"Error during inference: {e}\")\n import traceback\n\n traceback.print_exc()\n # Re-raise as Gradio error to display nicely in the UI\n raise gr.Error(f\"Inference failed: {e}\")\n\n finally:\n # Cleanup Temporary Files defensively\n if temp_txt_file_path and Path(temp_txt_file_path).exists():\n try:\n Path(temp_txt_file_path).unlink()\n print(f\"Deleted temporary text file: {temp_txt_file_path}\")\n except OSError as e:\n print(f\"Warning: Error deleting temporary text file {temp_txt_file_path}: {e}\")\n if temp_audio_prompt_path and Path(temp_audio_prompt_path).exists():\n try:\n Path(temp_audio_prompt_path).unlink()\n print(f\"Deleted temporary audio prompt file: {temp_audio_prompt_path}\")\n except OSError as e:\n print(f\"Warning: Error deleting temporary audio prompt file {temp_audio_prompt_path}: {e}\")\n\n # After generation, capture the printed output\n console_output = console_output_buffer.getvalue()\n\n return output_audio, seed, console_output", "creation_date": "2025-04-21T15:13:40Z", "repo": "nari-labs/dia", "file_path": "app.py", "stars": 17445, "label": 0} +{"function": "def set_seed(seed: int):\n \"\"\"Sets the random seed for reproducibility.\"\"\"\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n if torch.cuda.is_available():\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n # Ensure deterministic behavior for cuDNN (if used)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False", "creation_date": "2025-04-21T15:13:40Z", "repo": "nari-labs/dia", "file_path": "cli.py", "stars": 17445, "label": 0} +{"function": "def main():\n parser = argparse.ArgumentParser(description=\"Generate audio using the Dia model.\")\n\n parser.add_argument(\"text\", type=str, help=\"Input text for speech generation.\")\n parser.add_argument(\n \"--output\", type=str, required=True, help=\"Path to save the generated audio file (e.g., output.wav).\"\n )\n\n parser.add_argument(\n \"--repo-id\",\n type=str,\n default=\"nari-labs/Dia-1.6B-0626\",\n help=\"Hugging Face repository ID (e.g., nari-labs/Dia-1.6B-0626).\",\n )\n parser.add_argument(\n \"--local-paths\", action=\"store_true\", help=\"Load model from local config and checkpoint files.\"\n )\n\n parser.add_argument(\n \"--config\", type=str, help=\"Path to local config.json file (required if --local-paths is set).\"\n )\n parser.add_argument(\n \"--checkpoint\", type=str, help=\"Path to local model checkpoint .pth file (required if --local-paths is set).\"\n )\n parser.add_argument(\n \"--audio-prompt\", type=str, default=None, help=\"Path to an optional audio prompt WAV file for voice cloning.\"\n )\n\n gen_group = parser.add_argument_group(\"Generation Parameters\")\n gen_group.add_argument(\n \"--max-tokens\",\n type=int,\n default=None,\n help=\"Maximum number of audio tokens to generate (defaults to config value).\",\n )\n gen_group.add_argument(\n \"--cfg-scale\", type=float, default=3.0, help=\"Classifier-Free Guidance scale (default: 3.0).\"\n )\n gen_group.add_argument(\n \"--temperature\", type=float, default=1.3, help=\"Sampling temperature (higher is more random, default: 0.7).\"\n )\n gen_group.add_argument(\"--top-p\", type=float, default=0.95, help=\"Nucleus sampling probability (default: 0.95).\")\n\n infra_group = parser.add_argument_group(\"Infrastructure\")\n infra_group.add_argument(\"--seed\", type=int, default=None, help=\"Random seed for reproducibility.\")\n infra_group.add_argument(\n \"--device\",\n type=str,\n default=\"cuda\" if torch.cuda.is_available() else \"cpu\",\n help=\"Device to run inference on (e.g., 'cuda', 'cpu', default: auto).\",\n )\n\n args = parser.parse_args()\n\n # Validation for local paths\n if args.local_paths:\n if not args.config:\n parser.error(\"--config is required when --local-paths is set.\")\n if not args.checkpoint:\n parser.error(\"--checkpoint is required when --local-paths is set.\")\n if not os.path.exists(args.config):\n parser.error(f\"Config file not found: {args.config}\")\n if not os.path.exists(args.checkpoint):\n parser.error(f\"Checkpoint file not found: {args.checkpoint}\")\n\n # Set seed if provided\n if args.seed is not None:\n set_seed(args.seed)\n print(f\"Using user-selected seed: {args.seed}\")\n\n # Determine device\n device = torch.device(args.device)\n print(f\"Using device: {device}\")\n\n # Load model\n print(\"Loading model...\")\n if args.local_paths:\n print(f\"Loading from local paths: config='{args.config}', checkpoint='{args.checkpoint}'\")\n try:\n model = Dia.from_local(args.config, args.checkpoint, device=device)\n except Exception as e:\n print(f\"Error loading local model: {e}\")\n exit(1)\n else:\n print(f\"Loading from Hugging Face Hub: repo_id='{args.repo_id}'\")\n try:\n model = Dia.from_pretrained(args.repo_id, device=device)\n except Exception as e:\n print(f\"Error loading model from Hub: {e}\")\n exit(1)\n print(\"Model loaded.\")\n\n # Generate audio\n print(\"Generating audio...\")\n try:\n sample_rate = 44100 # Default assumption\n\n output_audio = model.generate(\n text=args.text,\n audio_prompt=args.audio_prompt,\n max_tokens=args.max_tokens,\n cfg_scale=args.cfg_scale,\n temperature=args.temperature,\n top_p=args.top_p,\n )\n print(\"Audio generation complete.\")\n\n print(f\"Saving audio to {args.output}...\")\n os.makedirs(os.path.dirname(args.output) or \".\", exist_ok=True)\n\n sf.write(args.output, output_audio, sample_rate)\n print(f\"Audio successfully saved to {args.output}\")\n\n except Exception as e:\n print(f\"Error during audio generation or saving: {e}\")\n exit(1)", "creation_date": "2025-04-21T15:13:40Z", "repo": "nari-labs/dia", "file_path": "cli.py", "stars": 17445, "label": 0} +{"function": "def build_delay_indices(B: int, T: int, C: int, delay_pattern: tp.List[int]) -> tp.Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Precompute (t_idx_BxTxC, indices_BTCx3) so that out[t, c] = in[t - delay[c], c].\n Negative t_idx => BOS; t_idx >= T => PAD.\n \"\"\"\n delay_arr = torch.tensor(delay_pattern, dtype=torch.int32)\n\n t_idx_BxT = torch.broadcast_to(\n torch.arange(T, dtype=torch.int32)[None, :],\n [B, T],\n )\n t_idx_BxTx1 = t_idx_BxT[..., None]\n t_idx_BxTxC = t_idx_BxTx1 - delay_arr.view(1, 1, C)\n\n b_idx_BxTxC = torch.broadcast_to(\n torch.arange(B, dtype=torch.int32).view(B, 1, 1),\n [B, T, C],\n )\n c_idx_BxTxC = torch.broadcast_to(\n torch.arange(C, dtype=torch.int32).view(1, 1, C),\n [B, T, C],\n )\n\n # We must clamp time indices to [0..T-1] so gather_nd equivalent won't fail\n t_clamped_BxTxC = torch.clamp(t_idx_BxTxC, 0, T - 1)\n\n indices_BTCx3 = torch.stack(\n [\n b_idx_BxTxC.reshape(-1),\n t_clamped_BxTxC.reshape(-1),\n c_idx_BxTxC.reshape(-1),\n ],\n dim=1,\n ).long() # Ensure indices are long type for indexing\n\n return t_idx_BxTxC, indices_BTCx3", "creation_date": "2025-04-21T15:13:40Z", "repo": "nari-labs/dia", "file_path": "dia/audio.py", "stars": 17445, "label": 0} +{"function": "def apply_audio_delay(\n audio_BxTxC: torch.Tensor,\n pad_value: int,\n bos_value: int,\n precomp: tp.Tuple[torch.Tensor, torch.Tensor],\n) -> torch.Tensor:\n \"\"\"\n Applies the delay pattern to batched audio tokens using precomputed indices,\n inserting BOS where t_idx < 0 and PAD where t_idx >= T.\n\n Args:\n audio_BxTxC: [B, T, C] int16 audio tokens (or int32/float)\n pad_value: the padding token\n bos_value: the BOS token\n precomp: (t_idx_BxTxC, indices_BTCx3) from build_delay_indices\n\n Returns:\n result_BxTxC: [B, T, C] delayed audio tokens\n \"\"\"\n device = audio_BxTxC.device # Get device from input tensor\n t_idx_BxTxC, indices_BTCx3 = precomp\n t_idx_BxTxC = t_idx_BxTxC.to(device) # Move precomputed indices to device\n indices_BTCx3 = indices_BTCx3.to(device)\n\n # Equivalent of tf.gather_nd using advanced indexing\n # Ensure indices are long type if not already (build_delay_indices should handle this)\n gathered_flat = audio_BxTxC[indices_BTCx3[:, 0], indices_BTCx3[:, 1], indices_BTCx3[:, 2]]\n gathered_BxTxC = gathered_flat.view(audio_BxTxC.shape)\n\n # Create masks on the correct device\n mask_bos = t_idx_BxTxC < 0 # => place bos_value\n mask_pad = t_idx_BxTxC >= audio_BxTxC.shape[1] # => place pad_value\n\n # Create scalar tensors on the correct device\n bos_tensor = torch.tensor(bos_value, dtype=audio_BxTxC.dtype, device=device)\n pad_tensor = torch.tensor(pad_value, dtype=audio_BxTxC.dtype, device=device)\n\n # If mask_bos, BOS; else if mask_pad, PAD; else original gather\n # All tensors should now be on the same device\n result_BxTxC = torch.where(mask_bos, bos_tensor, torch.where(mask_pad, pad_tensor, gathered_BxTxC))\n\n return result_BxTxC", "creation_date": "2025-04-21T15:13:40Z", "repo": "nari-labs/dia", "file_path": "dia/audio.py", "stars": 17445, "label": 0} +{"function": "def build_revert_indices(B: int, T: int, C: int, delay_pattern: tp.List[int]) -> tp.Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Precompute indices for the revert operation using PyTorch.\n\n Returns:\n A tuple (t_idx_BxTxC, indices_BTCx3) where:\n - t_idx_BxTxC is a tensor of shape [B, T, C] computed as time indices plus the delay.\n - indices_BTCx3 is a tensor of shape [B*T*C, 3] used for gathering, computed from:\n batch indices, clamped time indices, and channel indices.\n \"\"\"\n # Use default device unless specified otherwise; assumes inputs might define device later\n device = None # Or determine dynamically if needed, e.g., from a model parameter\n\n delay_arr = torch.tensor(delay_pattern, dtype=torch.int32, device=device)\n\n t_idx_BT1 = torch.broadcast_to(torch.arange(T, device=device).unsqueeze(0), [B, T])\n t_idx_BT1 = t_idx_BT1.unsqueeze(-1)\n\n t_idx_BxTxC = torch.minimum(\n t_idx_BT1 + delay_arr.view(1, 1, C),\n torch.tensor(T - 1, device=device),\n )\n b_idx_BxTxC = torch.broadcast_to(torch.arange(B, device=device).view(B, 1, 1), [B, T, C])\n c_idx_BxTxC = torch.broadcast_to(torch.arange(C, device=device).view(1, 1, C), [B, T, C])\n\n indices_BTCx3 = torch.stack(\n [\n b_idx_BxTxC.reshape(-1),\n t_idx_BxTxC.reshape(-1),\n c_idx_BxTxC.reshape(-1),\n ],\n axis=1,\n ).long() # Ensure indices are long type\n\n return t_idx_BxTxC, indices_BTCx3", "creation_date": "2025-04-21T15:13:40Z", "repo": "nari-labs/dia", "file_path": "dia/audio.py", "stars": 17445, "label": 0} +{"function": "def revert_audio_delay(\n audio_BxTxC: torch.Tensor,\n pad_value: int,\n precomp: tp.Tuple[torch.Tensor, torch.Tensor],\n T: int,\n) -> torch.Tensor:\n \"\"\"\n Reverts a delay pattern from batched audio tokens using precomputed indices (PyTorch version).\n\n Args:\n audio_BxTxC: Input delayed audio tensor\n pad_value: Padding value for out-of-bounds indices\n precomp: Precomputed revert indices tuple containing:\n - t_idx_BxTxC: Time offset indices tensor\n - indices_BTCx3: Gather indices tensor for original audio\n T: Original sequence length before padding\n\n Returns:\n Reverted audio tensor with same shape as input\n \"\"\"\n t_idx_BxTxC, indices_BTCx3 = precomp\n device = audio_BxTxC.device # Get device from input tensor\n\n # Move precomputed indices to the same device as audio_BxTxC if they aren't already\n t_idx_BxTxC = t_idx_BxTxC.to(device)\n indices_BTCx3 = indices_BTCx3.to(device)\n\n # Using PyTorch advanced indexing (equivalent to tf.gather_nd or np equivalent)\n gathered_flat = audio_BxTxC[indices_BTCx3[:, 0], indices_BTCx3[:, 1], indices_BTCx3[:, 2]]\n gathered_BxTxC = gathered_flat.view(audio_BxTxC.size()) # Use .size() for robust reshaping\n\n # Create pad_tensor on the correct device\n pad_tensor = torch.tensor(pad_value, dtype=audio_BxTxC.dtype, device=device)\n # Create T tensor on the correct device for comparison\n T_tensor = torch.tensor(T, device=device)\n\n result_BxTxC = torch.where(t_idx_BxTxC >= T_tensor, pad_tensor, gathered_BxTxC) # Changed np.where to torch.where\n\n return result_BxTxC", "creation_date": "2025-04-21T15:13:40Z", "repo": "nari-labs/dia", "file_path": "dia/audio.py", "stars": 17445, "label": 0} +{"function": " def save(self, path: str) -> None:\n \"\"\"Save the current configuration instance to a JSON file.\n\n Ensures the parent directory exists and the file has a .json extension.\n\n Args:\n path: The target file path to save the configuration.\n\n Raises:\n ValueError: If the path is not a file with a .json extension.\n \"\"\"\n os.makedirs(os.path.dirname(path), exist_ok=True)\n config_json = self.model_dump_json(indent=2)\n with open(path, \"w\") as f:\n f.write(config_json)", "creation_date": "2025-04-21T15:13:40Z", "repo": "nari-labs/dia", "file_path": "dia/config.py", "stars": 17445, "label": 0} +{"function": " def load(cls, path: str) -> \"DiaConfig | None\":\n \"\"\"Load and validate a Dia configuration from a JSON file.\n\n Args:\n path: The path to the configuration file.\n\n Returns:\n A validated DiaConfig instance if the file exists and is valid,\n otherwise None if the file is not found.\n\n Raises:\n ValueError: If the path does not point to an existing .json file.\n pydantic.ValidationError: If the JSON content fails validation against the DiaConfig schema.\n \"\"\"\n try:\n with open(path, \"r\") as f:\n content = f.read()\n return cls.model_validate_json(content)\n except FileNotFoundError:\n return None", "creation_date": "2025-04-21T15:13:40Z", "repo": "nari-labs/dia", "file_path": "dia/config.py", "stars": 17445, "label": 0} +{"function": "def _update_previous_errors(\n previous_errors: List[Flake8Error], replacement_window: Tuple[int, int], replacement_n_lines: int\n) -> List[Flake8Error]:\n \"\"\"Update the line numbers of the previous errors to what they would be after the edit window.\n This is a helper function for `_filter_previous_errors`.\n\n All previous errors that are inside of the edit window should not be ignored,\n so they are removed from the previous errors list.\n\n Args:\n previous_errors: list of errors with old line numbers\n replacement_window: the window of the edit/lines that will be replaced\n replacement_n_lines: the number of lines that will be used to replace the text\n\n Returns:\n list of errors with updated line numbers\n \"\"\"\n updated = []\n lines_added = replacement_n_lines - (replacement_window[1] - replacement_window[0] + 1)\n for error in previous_errors:\n if error.line_number < replacement_window[0]:\n # no need to adjust the line number\n updated.append(error)\n continue\n if replacement_window[0] <= error.line_number <= replacement_window[1]:\n # The error is within the edit window, so let's not ignore it\n # either way (we wouldn't know how to adjust the line number anyway)\n continue\n # We're out of the edit window, so we need to adjust the line number\n updated.append(Flake8Error(error.filename, error.line_number + lines_added, error.col_number, error.problem))\n return updated", "creation_date": "2025-05-22T15:54:45Z", "repo": "SWE-agent/SWE-agent", "file_path": "tools/windowed/lib/flake8_utils.py", "stars": 16639, "label": 0} +{"function": "def format_flake8_output(\n input_string: str,\n show_line_numbers: bool = False,\n *,\n previous_errors_string: str = \"\",\n replacement_window: Optional[Tuple[int, int]] = None,\n replacement_n_lines: Optional[int] = None,\n) -> str:\n \"\"\"Filter flake8 output for previous errors and print it for a given file.\n\n Args:\n input_string: The flake8 output as a string\n show_line_numbers: Whether to show line numbers in the output\n previous_errors_string: The previous errors as a string\n replacement_window: The window of the edit (lines that will be replaced)\n replacement_n_lines: The number of lines used to replace the text\n\n Returns:\n The filtered flake8 output as a string\n \"\"\"\n errors = [Flake8Error.from_line(line.strip()) for line in input_string.split(\"\\n\") if line.strip()]\n # print(f\"New errors before filtering: {errors=}\")\n lines = []\n if previous_errors_string:\n assert replacement_window is not None\n assert replacement_n_lines is not None\n previous_errors = [\n Flake8Error.from_line(line.strip()) for line in previous_errors_string.split(\"\\n\") if line.strip()\n ]\n # print(f\"Previous errors before updating: {previous_errors=}\")\n previous_errors = _update_previous_errors(previous_errors, replacement_window, replacement_n_lines)\n # print(f\"Previous errors after updating: {previous_errors=}\")\n errors = [error for error in errors if error not in previous_errors]\n # Sometimes new errors appear above the replacement window that were 'shadowed' by the previous errors\n # they still clearly aren't caused by the edit.\n errors = [error for error in errors if error.line_number >= replacement_window[0]]\n # print(f\"New errors after filtering: {errors=}\")\n for error in errors:\n if not show_line_numbers:\n lines.append(f\"- {error.problem}\")\n else:\n lines.append(f\"- line {error.line_number} col {error.col_number}: {error.problem}\")\n return \"\\n\".join(lines)", "creation_date": "2025-05-22T15:54:45Z", "repo": "SWE-agent/SWE-agent", "file_path": "tools/windowed/lib/flake8_utils.py", "stars": 16639, "label": 0} +{"function": "def flake8(file_path: str) -> str:\n \"\"\"Run flake8 on a given file and return the output as a string\"\"\"\n if Path(file_path).suffix != \".py\":\n return \"\"\n cmd = registry.get(\"LINT_COMMAND\", \"flake8 --isolated --select=F821,F822,F831,E111,E112,E113,E999,E902 {file_path}\")\n # don't use capture_output because it's not compatible with python3.6\n out = subprocess.run(cmd.format(file_path=file_path), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n return out.stdout.decode()", "creation_date": "2025-05-22T15:54:45Z", "repo": "SWE-agent/SWE-agent", "file_path": "tools/windowed/lib/flake8_utils.py", "stars": 16639, "label": 0} +{"function": " def __init__(self, filename: str, line_number: int, col_number: int, problem: str):\n self.filename = filename\n self.line_number = line_number\n self.col_number = col_number\n self.problem = problem", "creation_date": "2025-05-22T15:54:45Z", "repo": "SWE-agent/SWE-agent", "file_path": "tools/windowed/lib/flake8_utils.py", "stars": 16639, "label": 0} +{"function": " def from_line(cls, line: str):\n try:\n prefix, _sep, problem = line.partition(\": \")\n filename, line_number, col_number = prefix.split(\":\")\n except (ValueError, IndexError) as e:\n msg = f\"Invalid flake8 error line: {line}\"\n raise ValueError(msg) from e\n return cls(filename, int(line_number), int(col_number), problem)", "creation_date": "2025-05-22T15:54:45Z", "repo": "SWE-agent/SWE-agent", "file_path": "tools/windowed/lib/flake8_utils.py", "stars": 16639, "label": 0} +{"function": " def __eq__(self, other):\n if not isinstance(other, Flake8Error):\n return NotImplemented\n return (\n self.filename == other.filename\n and self.line_number == other.line_number\n and self.col_number == other.col_number\n and self.problem == other.problem\n )", "creation_date": "2025-05-22T15:54:45Z", "repo": "SWE-agent/SWE-agent", "file_path": "tools/windowed/lib/flake8_utils.py", "stars": 16639, "label": 0} +{"function": " def __repr__(self):\n return f\"Flake8Error(filename={self.filename}, line_number={self.line_number}, col_number={self.col_number}, problem={self.problem})\"", "creation_date": "2025-05-22T15:54:45Z", "repo": "SWE-agent/SWE-agent", "file_path": "tools/windowed/lib/flake8_utils.py", "stars": 16639, "label": 0} +{"function": "def _find_all(a_str: str, sub: str):\n start = 0\n while True:\n start = a_str.find(sub, start)\n if start == -1:\n return\n yield start\n start += len(sub)", "creation_date": "2025-05-22T15:54:45Z", "repo": "SWE-agent/SWE-agent", "file_path": "tools/windowed/lib/windowed_file.py", "stars": 16639, "label": 0} +{"function": " def __init__(self, first_replaced_line: int, n_search_lines: int, n_replace_lines: int, n_replacements: int):\n self.first_replaced_line = first_replaced_line\n self.n_search_lines = n_search_lines\n self.n_replace_lines = n_replace_lines\n self.n_replacements = n_replacements", "creation_date": "2025-05-22T15:54:45Z", "repo": "SWE-agent/SWE-agent", "file_path": "tools/windowed/lib/windowed_file.py", "stars": 16639, "label": 0} +{"function": " def __repr__(self):\n return f\"ReplacementInfo(first_replaced_line={self.first_replaced_line}, n_search_lines={self.n_search_lines}, n_replace_lines={self.n_replace_lines}, n_replacements={self.n_replacements})\"", "creation_date": "2025-05-22T15:54:45Z", "repo": "SWE-agent/SWE-agent", "file_path": "tools/windowed/lib/windowed_file.py", "stars": 16639, "label": 0} +{"function": "def partial_fields(target_class, kwargs):\n return target_class(**{k: v for k, v in kwargs.items() if hasattr(target_class, k)})", "creation_date": "2024-07-03T20:32:47Z", "repo": "KwaiVGI/LivePortrait", "file_path": "app.py", "stars": 16573, "label": 0} +{"function": "def fast_check_ffmpeg():\n try:\n subprocess.run([\"ffmpeg\", \"-version\"], capture_output=True, check=True)\n return True\n except:\n return False", "creation_date": "2024-07-03T20:32:47Z", "repo": "KwaiVGI/LivePortrait", "file_path": "app.py", "stars": 16573, "label": 0} +{"function": "def gpu_wrapped_execute_video(*args, **kwargs):\n return gradio_pipeline.execute_video(*args, **kwargs)", "creation_date": "2024-07-03T20:32:47Z", "repo": "KwaiVGI/LivePortrait", "file_path": "app.py", "stars": 16573, "label": 0} +{"function": "def gpu_wrapped_execute_image_retargeting(*args, **kwargs):\n return gradio_pipeline.execute_image_retargeting(*args, **kwargs)", "creation_date": "2024-07-03T20:32:47Z", "repo": "KwaiVGI/LivePortrait", "file_path": "app.py", "stars": 16573, "label": 0} +{"function": "def gpu_wrapped_execute_video_retargeting(*args, **kwargs):\n return gradio_pipeline.execute_video_retargeting(*args, **kwargs)", "creation_date": "2024-07-03T20:32:47Z", "repo": "KwaiVGI/LivePortrait", "file_path": "app.py", "stars": 16573, "label": 0} +{"function": "def reset_sliders(*args, **kwargs):\n return 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.5, True, True", "creation_date": "2024-07-03T20:32:47Z", "repo": "KwaiVGI/LivePortrait", "file_path": "app.py", "stars": 16573, "label": 0} +{"function": "def partial_fields(target_class, kwargs):\n return target_class(**{k: v for k, v in kwargs.items() if hasattr(target_class, k)})", "creation_date": "2024-08-02T14:39:05Z", "repo": "KwaiVGI/LivePortrait", "file_path": "app_animals.py", "stars": 16573, "label": 0} +{"function": "def fast_check_ffmpeg():\n try:\n subprocess.run([\"ffmpeg\", \"-version\"], capture_output=True, check=True)\n return True\n except:\n return False", "creation_date": "2024-08-02T14:39:05Z", "repo": "KwaiVGI/LivePortrait", "file_path": "app_animals.py", "stars": 16573, "label": 0} +{"function": "def gpu_wrapped_execute_video(*args, **kwargs):\n return gradio_pipeline_animal.execute_video(*args, **kwargs)", "creation_date": "2024-08-02T14:39:05Z", "repo": "KwaiVGI/LivePortrait", "file_path": "app_animals.py", "stars": 16573, "label": 0} +{"function": "def partial_fields(target_class, kwargs):\n return target_class(**{k: v for k, v in kwargs.items() if hasattr(target_class, k)})", "creation_date": "2024-07-03T20:32:47Z", "repo": "KwaiVGI/LivePortrait", "file_path": "inference.py", "stars": 16573, "label": 0} +{"function": " def __init__(self):\n self.update_progress = None\n self.run_ui = False\n self.tasks = []\n self.channel_items: CategoryChannelData = {}\n self.hotel_fofa_result = {}\n self.hotel_foodie_result = {}\n self.multicast_result = {}\n self.subscribe_result = {}\n self.online_search_result = {}\n self.epg_result = {}\n self.channel_data: CategoryChannelData = {}\n self.pbar = None\n self.total = 0\n self.start_time = None\n self.stop_event = None\n self.ipv6_support = False\n self.now = None", "creation_date": "2024-07-09T09:03:35Z", "repo": "Guovin/iptv-api", "file_path": "main.py", "stars": 16557, "label": 0} +{"function": " async def visit_page(self, channel_names: list[str] = None):\n tasks_config = [\n (\"hotel_fofa\", get_channels_by_fofa, \"hotel_fofa_result\"),\n (\"multicast\", get_channels_by_multicast, \"multicast_result\"),\n (\"hotel_foodie\", get_channels_by_hotel, \"hotel_foodie_result\"),\n (\"subscribe\", get_channels_by_subscribe_urls, \"subscribe_result\"),\n (\n \"online_search\",\n get_channels_by_online_search,\n \"online_search_result\",\n ),\n (\"epg\", get_epg, \"epg_result\"),\n ]\n\n for setting, task_func, result_attr in tasks_config:\n if (\n setting == \"hotel_foodie\" or setting == \"hotel_fofa\"\n ) and config.open_hotel == False:\n continue\n if config.open_method[setting]:\n if setting == \"subscribe\":\n subscribe_urls = get_urls_from_file(constants.subscribe_path)\n whitelist_urls = get_urls_from_file(constants.whitelist_path)\n if not os.getenv(\"GITHUB_ACTIONS\") and config.cdn_url:\n subscribe_urls = [join_url(config.cdn_url, url) if \"raw.githubusercontent.com\" in url else url\n for url in subscribe_urls]\n task = asyncio.create_task(\n task_func(subscribe_urls,\n names=channel_names,\n whitelist=whitelist_urls,\n callback=self.update_progress\n )\n )\n elif setting == \"hotel_foodie\" or setting == \"hotel_fofa\":\n task = asyncio.create_task(task_func(callback=self.update_progress))\n else:\n task = asyncio.create_task(\n task_func(channel_names, callback=self.update_progress)\n )\n self.tasks.append(task)\n setattr(self, result_attr, await task)", "creation_date": "2024-07-09T09:03:35Z", "repo": "Guovin/iptv-api", "file_path": "main.py", "stars": 16557, "label": 0} +{"function": " def pbar_update(self, name: str = \"\", item_name: str = \"\"):\n if self.pbar.n < self.total:\n self.pbar.update()\n self.update_progress(\n f\"\u6b63\u5728\u8fdb\u884c{name}, \u5269\u4f59{self.total - self.pbar.n}\u4e2a{item_name}, \u9884\u8ba1\u5269\u4f59\u65f6\u95f4: {get_pbar_remaining(n=self.pbar.n, total=self.total, start_time=self.start_time)}\",\n int((self.pbar.n / self.total) * 100),\n )", "creation_date": "2024-07-09T09:03:35Z", "repo": "Guovin/iptv-api", "file_path": "main.py", "stars": 16557, "label": 0} +{"function": " async def main(self):\n try:\n main_start_time = time()\n if config.open_update:\n self.channel_items = get_channel_items()\n channel_names = [\n name\n for channel_obj in self.channel_items.values()\n for name in channel_obj.keys()\n ]\n if not channel_names:\n print(f\"\u274c No channel names found! Please check the {config.source_file}!\")\n return\n await self.visit_page(channel_names)\n self.tasks = []\n append_total_data(\n self.channel_items.items(),\n self.channel_data,\n self.hotel_fofa_result,\n self.multicast_result,\n self.hotel_foodie_result,\n self.subscribe_result,\n self.online_search_result,\n )\n cache_result = self.channel_data\n test_result = {}\n if config.open_speed_test:\n urls_total = get_urls_len(self.channel_data)\n test_data = copy.deepcopy(self.channel_data)\n process_nested_dict(\n test_data,\n seen=set(),\n filter_host=config.speed_test_filter_host,\n ipv6_support=self.ipv6_support\n )\n self.total = get_urls_len(test_data)\n print(f\"Total urls: {urls_total}, need to test speed: {self.total}\")\n self.update_progress(\n f\"\u6b63\u5728\u8fdb\u884c\u6d4b\u901f, \u5171{urls_total}\u4e2a\u63a5\u53e3, {self.total}\u4e2a\u63a5\u53e3\u9700\u8981\u8fdb\u884c\u6d4b\u901f\",\n 0,\n )\n self.start_time = time()\n self.pbar = tqdm(total=self.total, desc=\"Speed test\")\n test_result = await test_speed(\n test_data,\n ipv6=self.ipv6_support,\n callback=lambda: self.pbar_update(name=\"\u6d4b\u901f\", item_name=\"\u63a5\u53e3\"),\n )\n cache_result = merge_objects(cache_result, test_result, match_key=\"url\")\n self.pbar.close()\n self.channel_data = sort_channel_result(\n self.channel_data,\n result=test_result,\n filter_host=config.speed_test_filter_host,\n ipv6_support=self.ipv6_support\n )\n self.update_progress(f\"\u6b63\u5728\u751f\u6210\u7ed3\u679c\u6587\u4ef6\", 0)\n write_channel_to_file(\n self.channel_data,\n epg=self.epg_result,\n ipv6=self.ipv6_support,\n first_channel_name=channel_names[0],\n )\n if config.open_history:\n if os.path.exists(constants.cache_path):\n with gzip.open(constants.cache_path, \"rb\") as file:\n try:\n cache = pickle.load(file)\n except EOFError:\n cache = {}\n cache_result = merge_objects(cache, cache_result, match_key=\"url\")\n with gzip.open(constants.cache_path, \"wb\") as file:\n pickle.dump(cache_result, file)\n print(\n f\"\ud83e\udd73 Update completed! Total time spent: {format_interval(time() - main_start_time)}.\"\n )\n if self.run_ui:\n open_service = config.open_service\n service_tip = \", \u53ef\u4f7f\u7528\u4ee5\u4e0b\u5730\u5740\u8fdb\u884c\u89c2\u770b\" if open_service else \"\"\n tip = (\n f\"\u2705 \u670d\u52a1\u542f\u52a8\u6210\u529f{service_tip}\"\n if open_service and config.open_update == False\n else f\"\ud83e\udd73\u66f4\u65b0\u5b8c\u6210, \u8017\u65f6: {format_interval(time() - main_start_time)}{service_tip}\"\n )\n self.update_progress(\n tip,\n 100,\n finished=True,\n url=f\"{get_ip_address()}\" if open_service else None,\n now=self.now\n )\n except asyncio.exceptions.CancelledError:\n print(\"Update cancelled!\")", "creation_date": "2024-07-09T09:03:35Z", "repo": "Guovin/iptv-api", "file_path": "main.py", "stars": 16557, "label": 0} +{"function": " async def start(self, callback=None):\n def default_callback(self, *args, **kwargs):\n pass\n\n self.update_progress = callback or default_callback\n self.run_ui = True if callback else False\n if self.run_ui:\n self.update_progress(f\"\u6b63\u5728\u68c0\u67e5\u7f51\u7edc\u662f\u5426\u652f\u6301IPv6\", 0)\n self.ipv6_support = config.ipv6_support or check_ipv6_support()\n if not os.getenv(\"GITHUB_ACTIONS\") and config.update_interval:\n await self.scheduler(asyncio.Event())\n else:\n await self.main()", "creation_date": "2024-07-09T09:03:35Z", "repo": "Guovin/iptv-api", "file_path": "main.py", "stars": 16557, "label": 0} +{"function": " def stop(self):\n for task in self.tasks:\n task.cancel()\n self.tasks = []\n if self.pbar:\n self.pbar.close()\n if self.stop_event:\n self.stop_event.set()", "creation_date": "2024-07-09T09:03:35Z", "repo": "Guovin/iptv-api", "file_path": "main.py", "stars": 16557, "label": 0} +{"function": " async def scheduler(self, stop_event):\n self.stop_event = stop_event\n while not stop_event.is_set():\n self.now = datetime.datetime.now(pytz.timezone(config.time_zone))\n await self.main()\n next_time = self.now + datetime.timedelta(hours=config.update_interval)\n print(f\"\ud83d\udd52 Next update time: {next_time:%Y-%m-%d %H:%M:%S}\")\n try:\n await asyncio.wait_for(stop_event.wait(), timeout=config.update_interval * 3600)\n except asyncio.TimeoutError:\n continue", "creation_date": "2024-07-09T09:03:35Z", "repo": "Guovin/iptv-api", "file_path": "main.py", "stars": 16557, "label": 0} +{"function": " def default_callback(self, *args, **kwargs):\n pass", "creation_date": "2024-07-09T09:03:35Z", "repo": "Guovin/iptv-api", "file_path": "main.py", "stars": 16557, "label": 0} +{"function": " def __init__(self):\n self.primary_to_aliases: dict[str, set[str]] = {}\n self.alias_to_primary: dict[str, str] = {}\n\n real_path = get_real_path(resource_path(constants.alias_path))\n if os.path.exists(real_path):\n with open(real_path, \"r\", encoding=\"utf-8\") as f:\n for line in f:\n if line.strip() and not line.startswith(\"#\") and \",\" in line:\n parts = [p.strip() for p in line.split(\",\")]\n primary = parts[0]\n aliases = set(parts[1:])\n aliases.add(format_name(primary))\n self.primary_to_aliases[primary] = aliases\n for alias in aliases:\n self.alias_to_primary[alias] = primary\n self.alias_to_primary[primary] = primary", "creation_date": "2025-04-18T05:57:20Z", "repo": "Guovin/iptv-api", "file_path": "utils/alias.py", "stars": 16557, "label": 0} +{"function": " def get(self, name: str):\n \"\"\"\n Get the alias by name\n \"\"\"\n return self.primary_to_aliases.get(name, set())", "creation_date": "2025-04-18T05:57:20Z", "repo": "Guovin/iptv-api", "file_path": "utils/alias.py", "stars": 16557, "label": 0} +{"function": "def parse_pytest_output(output: str) -> List[Dict[str, any]]:\n # Match lines like:\n # all_llm_provider/test_all_llm_provider.py .......... [55%]\n # llamaindex_examples/legal_research_rag/test_legal_rag.py F [94%]\n test_result_pattern = re.compile(r\"^(.*\\.py)\\s+([.EF]+)\")\n results = []\n for line in output.splitlines():\n match = test_result_pattern.match(line.strip())\n if match:\n module = match.group(1)\n result_str = match.group(2)\n passed = result_str.count(\".\")\n failed = result_str.count(\"F\")\n errors = result_str.count(\"E\")\n total = len(result_str)\n results.append({\n \"module\": module,\n \"count\": total,\n \"passed\": passed,\n \"failed\": failed,\n \"errors\": errors\n })\n return results", "creation_date": "2025-04-07T13:11:16Z", "repo": "raga-ai-hub/RagaAI-Catalyst", "file_path": "tests/run_pytest_and_print_and_save_results.py", "stars": 16176, "label": 0} +{"function": "def generate_test_report(test_results, duration):\n total_tests = sum(item[\"count\"] for item in test_results)\n total_passed = sum(item[\"passed\"] for item in test_results)\n total_failed = sum(item[\"failed\"] for item in test_results)\n total_errors = sum(item[\"errors\"] for item in test_results)\n summary = f\"\"\"\nTEST EXECUTION REPORT\n=====================\nDate: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n\nSummary:\n- Duration: {duration}\n- Total Tests: {total_tests}\n- Passed: {total_passed} ({total_passed / total_tests * 100:.1f}%)\n- Failed: {total_failed} ({total_failed / total_tests * 100:.1f}%)\n- Errors: {total_errors} ({total_errors / total_tests * 100:.1f}%)\n\"\"\"\n # Create rows for tabulate\n table_data = []\n for result in test_results:\n if result[\"errors\"] > 0:\n status = \"\ud83d\udca5\" # Error symbol\n elif result[\"failed\"] > 0:\n status = \"\u274c\" # Failed symbol\n else:\n status = \"\u2705\" # Passed symbol\n \n table_data.append([\n result[\"module\"],\n result[\"count\"],\n result[\"passed\"],\n result[\"failed\"],\n result[\"errors\"],\n status\n ])\n headers = [\"Test Module\", \"Tests\", \"Passed\", \"Failed\", \"Errors\", \"Status\"]\n table = tabulate(table_data, headers=headers, tablefmt=\"fancy_grid\", \n colalign=(\"left\", \"right\", \"right\", \"right\", \"right\", \"center\"))\n report = summary + \"\\nDetailed Test Results:\\n\" + table\n \n if total_failed > 0 or total_errors > 0:\n problematic_tests = [r for r in test_results if r[\"failed\"] > 0 or r[\"errors\"] > 0]\n report += \"\\n\\nProblematic Tests:\\n\"\n for test in problematic_tests:\n issues = []\n if test[\"failed\"] > 0:\n issues.append(f\"{test['failed']} failed\")\n if test[\"errors\"] > 0:\n issues.append(f\"{test['errors']} errors\")\n report += f\"- {test['module']}: {', '.join(issues)}\\n\"\n report += f\"{'-'*50}\\n\"\n report += \" (Investigation needed - check test logs for specific issues)\\n\"\n report += f\"{'-'*50}\"\n return report", "creation_date": "2025-04-07T13:11:16Z", "repo": "raga-ai-hub/RagaAI-Catalyst", "file_path": "tests/run_pytest_and_print_and_save_results.py", "stars": 16176, "label": 0} +{"function": "def save_report(report, filename=None):\n \"\"\"Save the report to a file.\"\"\"\n if filename is None:\n filename = f\"test_report_{datetime.now().strftime('%Y%m%d_%H%M%S')}.txt\"\n with open(filename, 'w') as file:\n file.write(report)\n print(f\"Report saved to {os.path.abspath(filename)}\")", "creation_date": "2025-04-07T13:11:16Z", "repo": "raga-ai-hub/RagaAI-Catalyst", "file_path": "tests/run_pytest_and_print_and_save_results.py", "stars": 16176, "label": 0} +{"function": "def run_pytest_and_generate_report():\n start_time = datetime.now()\n\n # Run pytest\n output = subprocess.run(\n \"python -m pytest\",\n shell=True,\n capture_output=True,\n text=True\n ).stdout\n\n # duration\n end_time = datetime.now()\n duration = f\"{(end_time - start_time).total_seconds() / 60:.2f} minutes\"\n\n\n # Parse test results from output\n test_results = parse_pytest_output(output)\n # Generate report\n report = generate_test_report(test_results, duration)\n # Print and save\n print(report)\n save_report(report)", "creation_date": "2025-04-07T13:11:16Z", "repo": "raga-ai-hub/RagaAI-Catalyst", "file_path": "tests/run_pytest_and_print_and_save_results.py", "stars": 16176, "label": 0} +{"function": "def tracer():\n user_details = {\n \"project_name\": \"test_project\",\n \"dataset_name\": \"test_dataset\",\n \"project_id\": \"test_id\",\n \"trace_name\": \"test_trace\",\n \"interval_time\": 1\n }\n tracer = BaseTracer(user_details)\n tracer.trace_metrics = []\n tracer.visited_metrics = []\n return tracer", "creation_date": "2025-03-14T10:44:45Z", "repo": "raga-ai-hub/RagaAI-Catalyst", "file_path": "tests/test_catalyst/test_base_tracer_add_metrics.py", "stars": 16176, "label": 0} +{"function": "def test_add_metrics_individual_params(tracer):\n \"\"\"Test adding metrics using individual parameters\"\"\"\n tracer.trace = {} # Initialize trace\n tracer.add_metrics(\n name=\"test_metric\",\n score=0.95,\n reasoning=\"Good performance\",\n cost=0.01,\n latency=100,\n metadata={\"key\": \"value\"},\n config={\"threshold\": 0.8}\n )\n \n assert len(tracer.trace_metrics) == 1\n metric = tracer.trace_metrics[0]\n assert metric[\"name\"] == \"test_metric\"\n assert metric[\"score\"] == 0.95\n assert metric[\"reason\"] == \"Good performance\"\n assert metric[\"source\"] == \"user\"\n assert metric[\"cost\"] == 0.01\n assert metric[\"latency\"] == 100\n assert metric[\"metadata\"] == {\"key\": \"value\"}\n assert metric[\"config\"] == {\"threshold\": 0.8}", "creation_date": "2025-03-14T10:44:45Z", "repo": "raga-ai-hub/RagaAI-Catalyst", "file_path": "tests/test_catalyst/test_base_tracer_add_metrics.py", "stars": 16176, "label": 0} +{"function": "def test_add_metrics_dict_input(tracer):\n \"\"\"Test adding metrics using dictionary input\"\"\"\n tracer.trace = {}\n metric_dict = {\n \"name\": \"test_metric\",\n \"score\": 0.95,\n \"reasoning\": \"Good performance\"\n }\n tracer.add_metrics(metric_dict)\n \n assert len(tracer.trace_metrics) == 1\n metric = tracer.trace_metrics[0]\n assert metric[\"name\"] == \"test_metric\"\n assert metric[\"score\"] == 0.95\n assert metric[\"reason\"] == \"Good performance\"", "creation_date": "2025-03-14T10:44:45Z", "repo": "raga-ai-hub/RagaAI-Catalyst", "file_path": "tests/test_catalyst/test_base_tracer_add_metrics.py", "stars": 16176, "label": 0} +{"function": "def test_add_metrics_list_input(tracer):\n \"\"\"Test adding multiple metrics using list input\"\"\"\n tracer.trace = {}\n metrics = [\n {\"name\": \"metric1\", \"score\": 0.95},\n {\"name\": \"metric2\", \"score\": 0.85}\n ]\n tracer.add_metrics(metrics)\n \n assert len(tracer.trace_metrics) == 2\n assert tracer.trace_metrics[0][\"name\"] == \"metric1\"\n assert tracer.trace_metrics[1][\"name\"] == \"metric2\"", "creation_date": "2025-03-14T10:44:45Z", "repo": "raga-ai-hub/RagaAI-Catalyst", "file_path": "tests/test_catalyst/test_base_tracer_add_metrics.py", "stars": 16176, "label": 0} +{"function": "def test_add_metrics_duplicate_names(tracer):\n \"\"\"Test handling of duplicate metric names\"\"\"\n tracer.trace = {}\n metrics = [\n {\"name\": \"metric1\", \"score\": 0.95},\n {\"name\": \"metric1\", \"score\": 0.85}\n ]\n tracer.add_metrics(metrics)\n \n assert len(tracer.trace_metrics) == 2\n assert tracer.trace_metrics[0][\"name\"] == \"metric1\"\n assert tracer.trace_metrics[1][\"name\"] == \"metric1_2\"", "creation_date": "2025-03-14T10:44:45Z", "repo": "raga-ai-hub/RagaAI-Catalyst", "file_path": "tests/test_catalyst/test_base_tracer_add_metrics.py", "stars": 16176, "label": 0} +{"function": "def test_add_metrics_missing_required_fields(tracer):\n \"\"\"Test validation of required fields\"\"\"\n tracer.trace = {}\n metrics = [{\"name\": \"metric1\"}] # Missing score\n \n with patch('ragaai_catalyst.tracers.agentic_tracing.tracers.base.logger') as mock_logger:\n tracer.add_metrics(metrics)\n mock_logger.error.assert_called_once_with(\n \"Validation Error: Metric must contain 'name' and 'score' fields\"\n )\n assert len(tracer.trace_metrics) == 0", "creation_date": "2025-03-14T10:44:45Z", "repo": "raga-ai-hub/RagaAI-Catalyst", "file_path": "tests/test_catalyst/test_base_tracer_add_metrics.py", "stars": 16176, "label": 0} +{"function": "def anyio_backend():\n return \"asyncio\"", "creation_date": "2024-11-05T23:42:41Z", "repo": "modelcontextprotocol/python-sdk", "file_path": "tests/conftest.py", "stars": 16050, "label": 0} +{"function": "async def test_simple_echo():\n \"\"\"Test the simple echo server\"\"\"\n from examples.fastmcp.simple_echo import mcp\n\n async with client_session(mcp._mcp_server) as client:\n result = await client.call_tool(\"echo\", {\"text\": \"hello\"})\n assert len(result.content) == 1\n content = result.content[0]\n assert isinstance(content, TextContent)\n assert content.text == \"hello\"", "creation_date": "2024-12-21T00:41:39Z", "repo": "modelcontextprotocol/python-sdk", "file_path": "tests/test_examples.py", "stars": 16050, "label": 0} +{"function": "async def test_complex_inputs():\n \"\"\"Test the complex inputs server\"\"\"\n from examples.fastmcp.complex_inputs import mcp\n\n async with client_session(mcp._mcp_server) as client:\n tank = {\"shrimp\": [{\"name\": \"bob\"}, {\"name\": \"alice\"}]}\n result = await client.call_tool(\"name_shrimp\", {\"tank\": tank, \"extra_names\": [\"charlie\"]})\n assert len(result.content) == 3\n assert isinstance(result.content[0], TextContent)\n assert isinstance(result.content[1], TextContent)\n assert isinstance(result.content[2], TextContent)\n assert result.content[0].text == \"bob\"\n assert result.content[1].text == \"alice\"\n assert result.content[2].text == \"charlie\"", "creation_date": "2024-12-21T00:41:39Z", "repo": "modelcontextprotocol/python-sdk", "file_path": "tests/test_examples.py", "stars": 16050, "label": 0} +{"function": "async def test_desktop(monkeypatch):\n \"\"\"Test the desktop server\"\"\"\n from pathlib import Path\n\n from pydantic import AnyUrl\n\n from examples.fastmcp.desktop import mcp\n\n # Mock desktop directory listing\n mock_files = [Path(\"/fake/path/file1.txt\"), Path(\"/fake/path/file2.txt\")]\n monkeypatch.setattr(Path, \"iterdir\", lambda self: mock_files)\n monkeypatch.setattr(Path, \"home\", lambda: Path(\"/fake/home\"))\n\n async with client_session(mcp._mcp_server) as client:\n # Test the sum function\n result = await client.call_tool(\"sum\", {\"a\": 1, \"b\": 2})\n assert len(result.content) == 1\n content = result.content[0]\n assert isinstance(content, TextContent)\n assert content.text == \"3\"\n\n # Test the desktop resource\n result = await client.read_resource(AnyUrl(\"dir://desktop\"))\n assert len(result.contents) == 1\n content = result.contents[0]\n assert isinstance(content, TextResourceContents)\n assert isinstance(content.text, str)\n if sys.platform == \"win32\":\n file_1 = \"/fake/path/file1.txt\".replace(\"/\", \"\\\\\\\\\") # might be a bug\n file_2 = \"/fake/path/file2.txt\".replace(\"/\", \"\\\\\\\\\") # might be a bug\n assert file_1 in content.text\n assert file_2 in content.text\n # might be a bug, but the test is passing\n else:\n assert \"/fake/path/file1.txt\" in content.text\n assert \"/fake/path/file2.txt\" in content.text", "creation_date": "2024-12-21T00:41:39Z", "repo": "modelcontextprotocol/python-sdk", "file_path": "tests/test_examples.py", "stars": 16050, "label": 0} +{"function": "def test_docs_examples(example: CodeExample, eval_example: EvalExample):\n ruff_ignore: list[str] = [\"F841\", \"I001\", \"F821\"] # F821: undefined names (snippets lack imports)\n\n # Use project's actual line length of 120\n eval_example.set_config(ruff_ignore=ruff_ignore, target_version=\"py310\", line_length=120)\n\n # Use Ruff for both formatting and linting (skip Black)\n if eval_example.update_examples: # pragma: no cover\n eval_example.format_ruff(example)\n else:\n eval_example.lint_ruff(example)", "creation_date": "2024-12-21T00:41:39Z", "repo": "modelcontextprotocol/python-sdk", "file_path": "tests/test_examples.py", "stars": 16050, "label": 0} +{"function": "async def test_jsonrpc_request():\n json_data = {\n \"jsonrpc\": \"2.0\",\n \"id\": 1,\n \"method\": \"initialize\",\n \"params\": {\n \"protocolVersion\": LATEST_PROTOCOL_VERSION,\n \"capabilities\": {\"batch\": None, \"sampling\": None},\n \"clientInfo\": {\"name\": \"mcp\", \"version\": \"0.1.0\"},\n },\n }\n\n request = JSONRPCMessage.model_validate(json_data)\n assert isinstance(request.root, JSONRPCRequest)\n ClientRequest.model_validate(request.model_dump(by_alias=True, exclude_none=True))\n\n assert request.root.jsonrpc == \"2.0\"\n assert request.root.id == 1\n assert request.root.method == \"initialize\"\n assert request.root.params is not None\n assert request.root.params[\"protocolVersion\"] == LATEST_PROTOCOL_VERSION", "creation_date": "2024-09-24T21:04:19Z", "repo": "modelcontextprotocol/python-sdk", "file_path": "tests/test_types.py", "stars": 16050, "label": 0} +{"function": " def test_removes_fragment(self):\n \"\"\"Fragment should be removed per RFC 8707.\"\"\"\n assert resource_url_from_server_url(\"https://example.com/path#fragment\") == \"https://example.com/path\"\n assert resource_url_from_server_url(\"https://example.com/#fragment\") == \"https://example.com/\"", "creation_date": "2025-06-23T16:10:20Z", "repo": "modelcontextprotocol/python-sdk", "file_path": "tests/shared/test_auth_utils.py", "stars": 16050, "label": 0} +{"function": " def test_preserves_path(self):\n \"\"\"Path should be preserved.\"\"\"\n assert (\n resource_url_from_server_url(\"https://example.com/path/to/resource\")\n == \"https://example.com/path/to/resource\"\n )\n assert resource_url_from_server_url(\"https://example.com/\") == \"https://example.com/\"\n assert resource_url_from_server_url(\"https://example.com\") == \"https://example.com\"", "creation_date": "2025-06-23T16:10:20Z", "repo": "modelcontextprotocol/python-sdk", "file_path": "tests/shared/test_auth_utils.py", "stars": 16050, "label": 0} +{"function": " def test_preserves_query(self):\n \"\"\"Query parameters should be preserved.\"\"\"\n assert resource_url_from_server_url(\"https://example.com/path?foo=bar\") == \"https://example.com/path?foo=bar\"\n assert resource_url_from_server_url(\"https://example.com/?key=value\") == \"https://example.com/?key=value\"", "creation_date": "2025-06-23T16:10:20Z", "repo": "modelcontextprotocol/python-sdk", "file_path": "tests/shared/test_auth_utils.py", "stars": 16050, "label": 0} +{"function": " def test_preserves_port(self):\n \"\"\"Non-default ports should be preserved.\"\"\"\n assert resource_url_from_server_url(\"https://example.com:8443/path\") == \"https://example.com:8443/path\"\n assert resource_url_from_server_url(\"http://example.com:8080/\") == \"http://example.com:8080/\"", "creation_date": "2025-06-23T16:10:20Z", "repo": "modelcontextprotocol/python-sdk", "file_path": "tests/shared/test_auth_utils.py", "stars": 16050, "label": 0} +{"function": "def parse_key_info(output: str) -> str:\n \"\"\"Extract and format key information from the output\"\"\"\n key_info = []\n\n # Extract Args namespace\n args_match = re.search(r\"Namespace\\(.*?\\)\", output, re.DOTALL)\n if args_match:\n key_info.append(args_match.group(0))\n\n # Extract input/output token counts\n token_matches = re.findall(r\"#(Input|Output) tokens: \\d+\", output)\n key_info.extend(token_matches)\n\n # Extract benchmark result section\n result_match = re.search(\n r\"============ Serving Benchmark Result ============.*?={50,}\",\n output,\n re.DOTALL,\n )\n if result_match:\n key_info.append(result_match.group(0))\n\n return \"\\n\\n\".join(key_info)", "creation_date": "2024-12-08T18:29:55Z", "repo": "sgl-project/sglang", "file_path": "test/srt/experiment_runner.py", "stars": 15950, "label": 0} +{"function": "def extract_port_from_command(cmd: str, server_type: str) -> int:\n port_match = re.search(r\"--port[= ](\\d+)\", cmd)\n if port_match:\n return int(port_match.group(1))\n return SERVER_DEFAULTS.get(server_type, ServerConfig(\"\", [], 8000)).default_port", "creation_date": "2024-12-08T18:29:55Z", "repo": "sgl-project/sglang", "file_path": "test/srt/experiment_runner.py", "stars": 15950, "label": 0} +{"function": "def detect_server_type(cmd: str) -> str:\n for server_type, config in SERVER_DEFAULTS.items():\n if config.command in cmd:\n return server_type\n return \"unknown\"", "creation_date": "2024-12-08T18:29:55Z", "repo": "sgl-project/sglang", "file_path": "test/srt/experiment_runner.py", "stars": 15950, "label": 0} +{"function": "def stream_output(\n process: subprocess.Popen, prefix: str, logger: logging.Logger\n) -> queue.Queue:\n output_queue = queue.Queue()\n\n def stream_pipe(pipe, prefix):\n for line in iter(pipe.readline, \"\"):\n if prefix == \"CLIENT\":\n output_queue.put(line.rstrip())\n logger.debug(f\"{prefix} | {line.rstrip()}\")\n\n stdout_thread = threading.Thread(\n target=stream_pipe, args=(process.stdout, prefix), daemon=True\n )\n stderr_thread = threading.Thread(\n target=stream_pipe, args=(process.stderr, prefix), daemon=True\n )\n\n stdout_thread.start()\n stderr_thread.start()\n return output_queue, (stdout_thread, stderr_thread)", "creation_date": "2024-12-08T18:29:55Z", "repo": "sgl-project/sglang", "file_path": "test/srt/experiment_runner.py", "stars": 15950, "label": 0} +{"function": "def load_config(config_path: str) -> List[TaskConfig]:\n with open(config_path, \"r\") as f:\n config_data = yaml.safe_load(f)\n\n configs = []\n for idx, entry in enumerate(config_data.get(\"tasks\", [])):\n if not isinstance(entry, dict):\n raise ValueError(f\"Invalid entry at index {idx}\")\n\n config = TaskConfig(\n server_cmd=entry.get(\"server_cmd\"),\n client_cmd=entry.get(\"client_cmd\"),\n name=entry.get(\"name\", f\"task-{idx+1}\"),\n server_type=entry.get(\"server_type\"),\n )\n\n if not config.server_cmd or not config.client_cmd:\n raise ValueError(f\"Missing commands in {config.name}\")\n\n configs.append(config)\n\n return configs", "creation_date": "2024-12-08T18:29:55Z", "repo": "sgl-project/sglang", "file_path": "test/srt/experiment_runner.py", "stars": 15950, "label": 0} +{"function": "def setup_logging(debug: bool = False):\n level = logging.DEBUG if debug else logging.INFO\n logging.basicConfig(\n level=level,\n format=\"%(asctime)s - %(levelname)s - %(message)s\",\n handlers=[logging.StreamHandler(), logging.FileHandler(\"experiment.log\")],\n )", "creation_date": "2024-12-08T18:29:55Z", "repo": "sgl-project/sglang", "file_path": "test/srt/experiment_runner.py", "stars": 15950, "label": 0} +{"function": "def format_results(results: List[TaskResult]) -> str:\n \"\"\"Format experiment results in Markdown for GitHub step summary.\"\"\"\n output = [\"# Experiment Results\\n\"]\n\n for result in results:\n output.append(f\"## {result.name}\")\n output.append(f\"**Status**: {'\u2705 Success' if result.success else '\u274c Failed'}\")\n output.append(f\"**Runtime**: {result.runtime:.2f} seconds\")\n output.append(f\"**Timestamp**: {result.timestamp}\")\n output.append(\"\\n**Output**:\\n```\")\n output.append(result.output)\n output.append(\"```\\n\")\n\n return \"\\n\".join(output)", "creation_date": "2024-12-08T18:29:55Z", "repo": "sgl-project/sglang", "file_path": "test/srt/experiment_runner.py", "stars": 15950, "label": 0} +{"function": "def get_bool_env_var(name: str, default: str = \"false\") -> bool:\n value = os.getenv(name, default)\n return value.lower() in (\"true\", \"1\")", "creation_date": "2024-12-08T18:29:55Z", "repo": "sgl-project/sglang", "file_path": "test/srt/experiment_runner.py", "stars": 15950, "label": 0} +{"function": "def write_in_github_step_summary(results: List[TaskResult]):\n \"\"\"Write formatted results to GitHub step summary.\"\"\"\n if not os.environ.get(\"GITHUB_STEP_SUMMARY\"):\n logging.warning(\"GITHUB_STEP_SUMMARY environment variable not set\")\n return\n\n formatted_content = format_results(results)\n with open(os.environ[\"GITHUB_STEP_SUMMARY\"], \"a\") as f:\n f.write(formatted_content)", "creation_date": "2024-12-08T18:29:55Z", "repo": "sgl-project/sglang", "file_path": "test/srt/experiment_runner.py", "stars": 15950, "label": 0} +{"function": "def main():\n parser = argparse.ArgumentParser(description=\"Experiment Runner\")\n parser.add_argument(\n \"--config\", type=str, required=True, help=\"Path to YAML config file\"\n )\n parser.add_argument(\"--debug\", action=\"store_true\", help=\"Enable debug output\")\n args = parser.parse_args()\n\n setup_logging(args.debug)\n logger = logging.getLogger(__name__)\n results = []\n\n try:\n configs = load_config(args.config)\n runner = ExperimentRunner()\n\n for config in configs:\n logger.info(f\"Running {config.name}\")\n result = runner.run_task(config)\n results.append(result)\n\n if get_bool_env_var(\"SGLANG_IS_IN_CI\"):\n write_in_github_step_summary(results)\n except Exception as e:\n logger.error(f\"Error: {e}\")\n raise", "creation_date": "2024-12-08T18:29:55Z", "repo": "sgl-project/sglang", "file_path": "test/srt/experiment_runner.py", "stars": 15950, "label": 0} +{"function": "def pytest_collection_finish():\n print(f\"\\nTesting with {DEVICE=}\")", "creation_date": "2024-04-25T12:58:39Z", "repo": "huggingface/lerobot", "file_path": "tests/conftest.py", "stars": 15749, "label": 0} +{"function": "def _check_component_availability(component_type, available_components, make_component):\n \"\"\"Generic helper to check if a hardware component is available\"\"\"\n if component_type not in available_components:\n raise ValueError(\n f\"The {component_type} type is not valid. Expected one of these '{available_components}'\"\n )\n\n try:\n component = make_component(component_type)\n component.connect()\n del component\n return True\n\n except Exception as e:\n print(f\"\\nA {component_type} is not available.\")\n\n if isinstance(e, ModuleNotFoundError):\n print(f\"\\nInstall module '{e.name}'\")\n elif isinstance(e, SerialException):\n print(\"\\nNo physical device detected.\")\n elif isinstance(e, ValueError) and \"camera_index\" in str(e):\n print(\"\\nNo physical camera detected.\")\n else:\n traceback.print_exc()\n\n return False", "creation_date": "2024-04-25T12:58:39Z", "repo": "huggingface/lerobot", "file_path": "tests/conftest.py", "stars": 15749, "label": 0} +{"function": "def patch_builtins_input(monkeypatch):\n def print_text(text=None):\n if text is not None:\n print(text)\n\n monkeypatch.setattr(\"builtins.input\", print_text)", "creation_date": "2024-04-25T12:58:39Z", "repo": "huggingface/lerobot", "file_path": "tests/conftest.py", "stars": 15749, "label": 0} +{"function": " def print_text(text=None):\n if text is not None:\n print(text)", "creation_date": "2024-04-25T12:58:39Z", "repo": "huggingface/lerobot", "file_path": "tests/conftest.py", "stars": 15749, "label": 0} +{"function": "def test_available_env_task(env_name: str, task_name: list):\n \"\"\"\n This test verifies that all environments listed in `lerobot/__init__.py` can\n be successfully imported \u2014 if they're installed \u2014 and that their\n `available_tasks_per_env` are valid.\n \"\"\"\n package_name = f\"gym_{env_name}\"\n importlib.import_module(package_name)\n gym_handle = f\"{package_name}/{task_name}\"\n assert gym_handle in gym.envs.registry, gym_handle", "creation_date": "2024-03-26T10:10:43Z", "repo": "huggingface/lerobot", "file_path": "tests/test_available.py", "stars": 15749, "label": 0} +{"function": "def test_available_policies():\n \"\"\"\n This test verifies that the class attribute `name` for all policies is\n consistent with those listed in `lerobot/__init__.py`.\n \"\"\"\n policy_classes = [ACTPolicy, DiffusionPolicy, TDMPCPolicy, VQBeTPolicy]\n policies = [pol_cls.name for pol_cls in policy_classes]\n assert set(policies) == set(lerobot.available_policies), policies", "creation_date": "2024-03-26T10:10:43Z", "repo": "huggingface/lerobot", "file_path": "tests/test_available.py", "stars": 15749, "label": 0} +{"function": "def test_print():\n print(lerobot.available_envs)\n print(lerobot.available_tasks_per_env)\n print(lerobot.available_datasets)\n print(lerobot.available_datasets_per_env)\n print(lerobot.available_real_world_datasets)\n print(lerobot.available_policies)\n print(lerobot.available_policies_per_env)", "creation_date": "2024-03-26T10:10:43Z", "repo": "huggingface/lerobot", "file_path": "tests/test_available.py", "stars": 15749, "label": 0} +{"function": "def test_calibrate():\n robot_cfg = MockRobotConfig()\n cfg = CalibrateConfig(robot=robot_cfg)\n calibrate(cfg)", "creation_date": "2024-07-15T15:43:10Z", "repo": "huggingface/lerobot", "file_path": "tests/test_control_robot.py", "stars": 15749, "label": 0} +{"function": "def test_teleoperate():\n robot_cfg = MockRobotConfig()\n teleop_cfg = MockTeleopConfig()\n cfg = TeleoperateConfig(\n robot=robot_cfg,\n teleop=teleop_cfg,\n teleop_time_s=0.1,\n )\n teleoperate(cfg)", "creation_date": "2024-07-15T15:43:10Z", "repo": "huggingface/lerobot", "file_path": "tests/test_control_robot.py", "stars": 15749, "label": 0} +{"function": "def test_record_and_resume(tmp_path):\n robot_cfg = MockRobotConfig()\n teleop_cfg = MockTeleopConfig()\n dataset_cfg = DatasetRecordConfig(\n repo_id=DUMMY_REPO_ID,\n single_task=\"Dummy task\",\n root=tmp_path / \"record\",\n num_episodes=1,\n episode_time_s=0.1,\n reset_time_s=0,\n push_to_hub=False,\n )\n cfg = RecordConfig(\n robot=robot_cfg,\n dataset=dataset_cfg,\n teleop=teleop_cfg,\n play_sounds=False,\n )\n\n dataset = record(cfg)\n\n assert dataset.fps == 30\n assert dataset.meta.total_episodes == dataset.num_episodes == 1\n assert dataset.meta.total_frames == dataset.num_frames == 3\n assert dataset.meta.total_tasks == 1\n\n cfg.resume = True\n dataset = record(cfg)\n\n assert dataset.meta.total_episodes == dataset.num_episodes == 2\n assert dataset.meta.total_frames == dataset.num_frames == 6\n assert dataset.meta.total_tasks == 1", "creation_date": "2024-07-15T15:43:10Z", "repo": "huggingface/lerobot", "file_path": "tests/test_control_robot.py", "stars": 15749, "label": 0} +{"function": "def main():\n cosyvoice = CosyVoice2('pretrained_models/CosyVoice2-0.5B', load_jit=True, load_trt=True, load_vllm=True, fp16=True)\n prompt_speech_16k = load_wav('./asset/zero_shot_prompt.wav', 16000)\n for i in tqdm(range(100)):\n set_all_random_seed(i)\n for _, _ in enumerate(cosyvoice.inference_zero_shot('\u6536\u5230\u597d\u53cb\u4ece\u8fdc\u65b9\u5bc4\u6765\u7684\u751f\u65e5\u793c\u7269\uff0c\u90a3\u4efd\u610f\u5916\u7684\u60ca\u559c\u4e0e\u6df1\u6df1\u7684\u795d\u798f\u8ba9\u6211\u5fc3\u4e2d\u5145\u6ee1\u4e86\u751c\u871c\u7684\u5feb\u4e50\uff0c\u7b11\u5bb9\u5982\u82b1\u513f\u822c\u7efd\u653e\u3002', '\u5e0c\u671b\u4f60\u4ee5\u540e\u80fd\u591f\u505a\u7684\u6bd4\u6211\u8fd8\u597d\u5466\u3002', prompt_speech_16k, stream=False)):\n continue", "creation_date": "2025-05-30T10:06:14Z", "repo": "FunAudioLLM/CosyVoice", "file_path": "vllm_example.py", "stars": 15127, "label": 0} +{"function": "def generate_seed():\n seed = random.randint(1, 100000000)\n return {\n \"__type__\": \"update\",\n \"value\": seed\n }", "creation_date": "2024-07-04T13:15:12Z", "repo": "FunAudioLLM/CosyVoice", "file_path": "webui.py", "stars": 15127, "label": 0} +{"function": "def postprocess(speech, top_db=60, hop_length=220, win_length=440):\n speech, _ = librosa.effects.trim(\n speech, top_db=top_db,\n frame_length=win_length,\n hop_length=hop_length\n )\n if speech.abs().max() > max_val:\n speech = speech / speech.abs().max() * max_val\n speech = torch.concat([speech, torch.zeros(1, int(cosyvoice.sample_rate * 0.2))], dim=1)\n return speech", "creation_date": "2024-07-04T13:15:12Z", "repo": "FunAudioLLM/CosyVoice", "file_path": "webui.py", "stars": 15127, "label": 0} +{"function": "def change_instruction(mode_checkbox_group):\n return instruct_dict[mode_checkbox_group]", "creation_date": "2024-07-04T13:15:12Z", "repo": "FunAudioLLM/CosyVoice", "file_path": "webui.py", "stars": 15127, "label": 0} +{"function": "def generate_audio(tts_text, mode_checkbox_group, sft_dropdown, prompt_text, prompt_wav_upload, prompt_wav_record, instruct_text,\n seed, stream, speed):\n if prompt_wav_upload is not None:\n prompt_wav = prompt_wav_upload\n elif prompt_wav_record is not None:\n prompt_wav = prompt_wav_record\n else:\n prompt_wav = None\n # if instruct mode, please make sure that model is iic/CosyVoice-300M-Instruct and not cross_lingual mode\n if mode_checkbox_group in ['\u81ea\u7136\u8bed\u8a00\u63a7\u5236']:\n if cosyvoice.instruct is False:\n gr.Warning('\u60a8\u6b63\u5728\u4f7f\u7528\u81ea\u7136\u8bed\u8a00\u63a7\u5236\u6a21\u5f0f, {}\u6a21\u578b\u4e0d\u652f\u6301\u6b64\u6a21\u5f0f, \u8bf7\u4f7f\u7528iic/CosyVoice-300M-Instruct\u6a21\u578b'.format(args.model_dir))\n yield (cosyvoice.sample_rate, default_data)\n if instruct_text == '':\n gr.Warning('\u60a8\u6b63\u5728\u4f7f\u7528\u81ea\u7136\u8bed\u8a00\u63a7\u5236\u6a21\u5f0f, \u8bf7\u8f93\u5165instruct\u6587\u672c')\n yield (cosyvoice.sample_rate, default_data)\n if prompt_wav is not None or prompt_text != '':\n gr.Info('\u60a8\u6b63\u5728\u4f7f\u7528\u81ea\u7136\u8bed\u8a00\u63a7\u5236\u6a21\u5f0f, prompt\u97f3\u9891/prompt\u6587\u672c\u4f1a\u88ab\u5ffd\u7565')\n # if cross_lingual mode, please make sure that model is iic/CosyVoice-300M and tts_text prompt_text are different language\n if mode_checkbox_group in ['\u8de8\u8bed\u79cd\u590d\u523b']:\n if cosyvoice.instruct is True:\n gr.Warning('\u60a8\u6b63\u5728\u4f7f\u7528\u8de8\u8bed\u79cd\u590d\u523b\u6a21\u5f0f, {}\u6a21\u578b\u4e0d\u652f\u6301\u6b64\u6a21\u5f0f, \u8bf7\u4f7f\u7528iic/CosyVoice-300M\u6a21\u578b'.format(args.model_dir))\n yield (cosyvoice.sample_rate, default_data)\n if instruct_text != '':\n gr.Info('\u60a8\u6b63\u5728\u4f7f\u7528\u8de8\u8bed\u79cd\u590d\u523b\u6a21\u5f0f, instruct\u6587\u672c\u4f1a\u88ab\u5ffd\u7565')\n if prompt_wav is None:\n gr.Warning('\u60a8\u6b63\u5728\u4f7f\u7528\u8de8\u8bed\u79cd\u590d\u523b\u6a21\u5f0f, \u8bf7\u63d0\u4f9bprompt\u97f3\u9891')\n yield (cosyvoice.sample_rate, default_data)\n gr.Info('\u60a8\u6b63\u5728\u4f7f\u7528\u8de8\u8bed\u79cd\u590d\u523b\u6a21\u5f0f, \u8bf7\u786e\u4fdd\u5408\u6210\u6587\u672c\u548cprompt\u6587\u672c\u4e3a\u4e0d\u540c\u8bed\u8a00')\n # if in zero_shot cross_lingual, please make sure that prompt_text and prompt_wav meets requirements\n if mode_checkbox_group in ['3s\u6781\u901f\u590d\u523b', '\u8de8\u8bed\u79cd\u590d\u523b']:\n if prompt_wav is None:\n gr.Warning('prompt\u97f3\u9891\u4e3a\u7a7a\uff0c\u60a8\u662f\u5426\u5fd8\u8bb0\u8f93\u5165prompt\u97f3\u9891\uff1f')\n yield (cosyvoice.sample_rate, default_data)\n if torchaudio.info(prompt_wav).sample_rate < prompt_sr:\n gr.Warning('prompt\u97f3\u9891\u91c7\u6837\u7387{}\u4f4e\u4e8e{}'.format(torchaudio.info(prompt_wav).sample_rate, prompt_sr))\n yield (cosyvoice.sample_rate, default_data)\n # sft mode only use sft_dropdown\n if mode_checkbox_group in ['\u9884\u8bad\u7ec3\u97f3\u8272']:\n if instruct_text != '' or prompt_wav is not None or prompt_text != '':\n gr.Info('\u60a8\u6b63\u5728\u4f7f\u7528\u9884\u8bad\u7ec3\u97f3\u8272\u6a21\u5f0f\uff0cprompt\u6587\u672c/prompt\u97f3\u9891/instruct\u6587\u672c\u4f1a\u88ab\u5ffd\u7565\uff01')\n if sft_dropdown == '':\n gr.Warning('\u6ca1\u6709\u53ef\u7528\u7684\u9884\u8bad\u7ec3\u97f3\u8272\uff01')\n yield (cosyvoice.sample_rate, default_data)\n # zero_shot mode only use prompt_wav prompt text\n if mode_checkbox_group in ['3s\u6781\u901f\u590d\u523b']:\n if prompt_text == '':\n gr.Warning('prompt\u6587\u672c\u4e3a\u7a7a\uff0c\u60a8\u662f\u5426\u5fd8\u8bb0\u8f93\u5165prompt\u6587\u672c\uff1f')\n yield (cosyvoice.sample_rate, default_data)\n if instruct_text != '':\n gr.Info('\u60a8\u6b63\u5728\u4f7f\u75283s\u6781\u901f\u590d\u523b\u6a21\u5f0f\uff0c\u9884\u8bad\u7ec3\u97f3\u8272/instruct\u6587\u672c\u4f1a\u88ab\u5ffd\u7565\uff01')\n\n if mode_checkbox_group == '\u9884\u8bad\u7ec3\u97f3\u8272':\n logging.info('get sft inference request')\n set_all_random_seed(seed)\n for i in cosyvoice.inference_sft(tts_text, sft_dropdown, stream=stream, speed=speed):\n yield (cosyvoice.sample_rate, i['tts_speech'].numpy().flatten())\n elif mode_checkbox_group == '3s\u6781\u901f\u590d\u523b':\n logging.info('get zero_shot inference request')\n prompt_speech_16k = postprocess(load_wav(prompt_wav, prompt_sr))\n set_all_random_seed(seed)\n for i in cosyvoice.inference_zero_shot(tts_text, prompt_text, prompt_speech_16k, stream=stream, speed=speed):\n yield (cosyvoice.sample_rate, i['tts_speech'].numpy().flatten())\n elif mode_checkbox_group == '\u8de8\u8bed\u79cd\u590d\u523b':\n logging.info('get cross_lingual inference request')\n prompt_speech_16k = postprocess(load_wav(prompt_wav, prompt_sr))\n set_all_random_seed(seed)\n for i in cosyvoice.inference_cross_lingual(tts_text, prompt_speech_16k, stream=stream, speed=speed):\n yield (cosyvoice.sample_rate, i['tts_speech'].numpy().flatten())\n else:\n logging.info('get instruct inference request')\n set_all_random_seed(seed)\n for i in cosyvoice.inference_instruct(tts_text, sft_dropdown, instruct_text, stream=stream, speed=speed):\n yield (cosyvoice.sample_rate, i['tts_speech'].numpy().flatten())", "creation_date": "2024-07-04T13:15:12Z", "repo": "FunAudioLLM/CosyVoice", "file_path": "webui.py", "stars": 15127, "label": 0} +{"function": "def main():\n with gr.Blocks() as demo:\n gr.Markdown(\"### \u4ee3\u7801\u5e93 [CosyVoice](https://github.com/FunAudioLLM/CosyVoice) \\\n \u9884\u8bad\u7ec3\u6a21\u578b [CosyVoice-300M](https://www.modelscope.cn/models/iic/CosyVoice-300M) \\\n [CosyVoice-300M-Instruct](https://www.modelscope.cn/models/iic/CosyVoice-300M-Instruct) \\\n [CosyVoice-300M-SFT](https://www.modelscope.cn/models/iic/CosyVoice-300M-SFT)\")\n gr.Markdown(\"#### \u8bf7\u8f93\u5165\u9700\u8981\u5408\u6210\u7684\u6587\u672c\uff0c\u9009\u62e9\u63a8\u7406\u6a21\u5f0f\uff0c\u5e76\u6309\u7167\u63d0\u793a\u6b65\u9aa4\u8fdb\u884c\u64cd\u4f5c\")\n\n tts_text = gr.Textbox(label=\"\u8f93\u5165\u5408\u6210\u6587\u672c\", lines=1, value=\"\u6211\u662f\u901a\u4e49\u5b9e\u9a8c\u5ba4\u8bed\u97f3\u56e2\u961f\u5168\u65b0\u63a8\u51fa\u7684\u751f\u6210\u5f0f\u8bed\u97f3\u5927\u6a21\u578b\uff0c\u63d0\u4f9b\u8212\u9002\u81ea\u7136\u7684\u8bed\u97f3\u5408\u6210\u80fd\u529b\u3002\")\n with gr.Row():\n mode_checkbox_group = gr.Radio(choices=inference_mode_list, label='\u9009\u62e9\u63a8\u7406\u6a21\u5f0f', value=inference_mode_list[0])\n instruction_text = gr.Text(label=\"\u64cd\u4f5c\u6b65\u9aa4\", value=instruct_dict[inference_mode_list[0]], scale=0.5)\n sft_dropdown = gr.Dropdown(choices=sft_spk, label='\u9009\u62e9\u9884\u8bad\u7ec3\u97f3\u8272', value=sft_spk[0], scale=0.25)\n stream = gr.Radio(choices=stream_mode_list, label='\u662f\u5426\u6d41\u5f0f\u63a8\u7406', value=stream_mode_list[0][1])\n speed = gr.Number(value=1, label=\"\u901f\u5ea6\u8c03\u8282(\u4ec5\u652f\u6301\u975e\u6d41\u5f0f\u63a8\u7406)\", minimum=0.5, maximum=2.0, step=0.1)\n with gr.Column(scale=0.25):\n seed_button = gr.Button(value=\"\\U0001F3B2\")\n seed = gr.Number(value=0, label=\"\u968f\u673a\u63a8\u7406\u79cd\u5b50\")\n\n with gr.Row():\n prompt_wav_upload = gr.Audio(sources='upload', type='filepath', label='\u9009\u62e9prompt\u97f3\u9891\u6587\u4ef6\uff0c\u6ce8\u610f\u91c7\u6837\u7387\u4e0d\u4f4e\u4e8e16khz')\n prompt_wav_record = gr.Audio(sources='microphone', type='filepath', label='\u5f55\u5236prompt\u97f3\u9891\u6587\u4ef6')\n prompt_text = gr.Textbox(label=\"\u8f93\u5165prompt\u6587\u672c\", lines=1, placeholder=\"\u8bf7\u8f93\u5165prompt\u6587\u672c\uff0c\u9700\u4e0eprompt\u97f3\u9891\u5185\u5bb9\u4e00\u81f4\uff0c\u6682\u65f6\u4e0d\u652f\u6301\u81ea\u52a8\u8bc6\u522b...\", value='')\n instruct_text = gr.Textbox(label=\"\u8f93\u5165instruct\u6587\u672c\", lines=1, placeholder=\"\u8bf7\u8f93\u5165instruct\u6587\u672c.\", value='')\n\n generate_button = gr.Button(\"\u751f\u6210\u97f3\u9891\")\n\n audio_output = gr.Audio(label=\"\u5408\u6210\u97f3\u9891\", autoplay=True, streaming=True)\n\n seed_button.click(generate_seed, inputs=[], outputs=seed)\n generate_button.click(generate_audio,\n inputs=[tts_text, mode_checkbox_group, sft_dropdown, prompt_text, prompt_wav_upload, prompt_wav_record, instruct_text,\n seed, stream, speed],\n outputs=[audio_output])\n mode_checkbox_group.change(fn=change_instruction, inputs=[mode_checkbox_group], outputs=[instruction_text])\n demo.queue(max_size=4, default_concurrency_limit=2)\n demo.launch(server_name='0.0.0.0', server_port=args.port)", "creation_date": "2024-07-04T13:15:12Z", "repo": "FunAudioLLM/CosyVoice", "file_path": "webui.py", "stars": 15127, "label": 0} +{"function": "def single_job(utt):\n audio, sample_rate = torchaudio.load(utt2wav[utt])\n if sample_rate != 16000:\n audio = torchaudio.transforms.Resample(orig_freq=sample_rate, new_freq=16000)(audio)\n feat = kaldi.fbank(audio,\n num_mel_bins=80,\n dither=0,\n sample_frequency=16000)\n feat = feat - feat.mean(dim=0, keepdim=True)\n embedding = ort_session.run(None, {ort_session.get_inputs()[0].name: feat.unsqueeze(dim=0).cpu().numpy()})[0].flatten().tolist()\n return utt, embedding", "creation_date": "2024-07-04T13:15:12Z", "repo": "FunAudioLLM/CosyVoice", "file_path": "tools/extract_embedding.py", "stars": 15127, "label": 0} +{"function": "def main(args):\n all_task = [executor.submit(single_job, utt) for utt in utt2wav.keys()]\n utt2embedding, spk2embedding = {}, {}\n for future in tqdm(as_completed(all_task)):\n utt, embedding = future.result()\n utt2embedding[utt] = embedding\n spk = utt2spk[utt]\n if spk not in spk2embedding:\n spk2embedding[spk] = []\n spk2embedding[spk].append(embedding)\n for k, v in spk2embedding.items():\n spk2embedding[k] = torch.tensor(v).mean(dim=0).tolist()\n torch.save(utt2embedding, \"{}/utt2embedding.pt\".format(args.dir))\n torch.save(spk2embedding, \"{}/spk2embedding.pt\".format(args.dir))", "creation_date": "2024-07-04T13:15:12Z", "repo": "FunAudioLLM/CosyVoice", "file_path": "tools/extract_embedding.py", "stars": 15127, "label": 0} +{"function": "def single_job(utt):\n audio, sample_rate = torchaudio.load(utt2wav[utt], backend='soundfile')\n if sample_rate != 16000:\n audio = torchaudio.transforms.Resample(orig_freq=sample_rate, new_freq=16000)(audio)\n # Convert audio to mono\n if audio.shape[0] > 1:\n audio = audio.mean(dim=0, keepdim=True)\n if audio.shape[1] / 16000 > 30:\n logging.warning('do not support extract speech token for audio longer than 30s')\n speech_token = []\n else:\n feat = whisper.log_mel_spectrogram(audio, n_mels=128)\n speech_token = ort_session.run(None, {ort_session.get_inputs()[0].name: feat.detach().cpu().numpy(),\n ort_session.get_inputs()[1].name: np.array([feat.shape[2]], dtype=np.int32)})[0].flatten().tolist()\n return utt, speech_token", "creation_date": "2024-07-04T13:15:12Z", "repo": "FunAudioLLM/CosyVoice", "file_path": "tools/extract_speech_token.py", "stars": 15127, "label": 0} +{"function": "def main(args):\n all_task = [executor.submit(single_job, utt) for utt in utt2wav.keys()]\n utt2speech_token = {}\n for future in tqdm(as_completed(all_task)):\n utt, speech_token = future.result()\n utt2speech_token[utt] = speech_token\n torch.save(utt2speech_token, '{}/utt2speech_token.pt'.format(args.dir))", "creation_date": "2024-07-04T13:15:12Z", "repo": "FunAudioLLM/CosyVoice", "file_path": "tools/extract_speech_token.py", "stars": 15127, "label": 0} +{"function": "def ask(\n question,\n debug=False,\n max_plan_iterations=1,\n max_step_num=3,\n enable_background_investigation=True,\n):\n \"\"\"Run the agent workflow with the given question.\n\n Args:\n question: The user's query or request\n debug: If True, enables debug level logging\n max_plan_iterations: Maximum number of plan iterations\n max_step_num: Maximum number of steps in a plan\n enable_background_investigation: If True, performs web search before planning to enhance context\n \"\"\"\n asyncio.run(\n run_agent_workflow_async(\n user_input=question,\n debug=debug,\n max_plan_iterations=max_plan_iterations,\n max_step_num=max_step_num,\n enable_background_investigation=enable_background_investigation,\n )\n )", "creation_date": "2025-04-09T12:32:16Z", "repo": "bytedance/deer-flow", "file_path": "main.py", "stars": 15119, "label": 0} +{"function": "def main(\n debug=False,\n max_plan_iterations=1,\n max_step_num=3,\n enable_background_investigation=True,\n):\n \"\"\"Interactive mode with built-in questions.\n\n Args:\n enable_background_investigation: If True, performs web search before planning to enhance context\n debug: If True, enables debug level logging\n max_plan_iterations: Maximum number of plan iterations\n max_step_num: Maximum number of steps in a plan\n \"\"\"\n # First select language\n language = inquirer.select(\n message=\"Select language / \u9009\u62e9\u8bed\u8a00:\",\n choices=[\"English\", \"\u4e2d\u6587\"],\n ).execute()\n\n # Choose questions based on language\n questions = (\n BUILT_IN_QUESTIONS if language == \"English\" else BUILT_IN_QUESTIONS_ZH_CN\n )\n ask_own_option = (\n \"[Ask my own question]\" if language == \"English\" else \"[\u81ea\u5b9a\u4e49\u95ee\u9898]\"\n )\n\n # Select a question\n initial_question = inquirer.select(\n message=(\n \"What do you want to know?\" if language == \"English\" else \"\u60a8\u60f3\u4e86\u89e3\u4ec0\u4e48?\"\n ),\n choices=[ask_own_option] + questions,\n ).execute()\n\n if initial_question == ask_own_option:\n initial_question = inquirer.text(\n message=(\n \"What do you want to know?\"\n if language == \"English\"\n else \"\u60a8\u60f3\u4e86\u89e3\u4ec0\u4e48?\"\n ),\n ).execute()\n\n # Pass all parameters to ask function\n ask(\n question=initial_question,\n debug=debug,\n max_plan_iterations=max_plan_iterations,\n max_step_num=max_step_num,\n enable_background_investigation=enable_background_investigation,\n )", "creation_date": "2025-04-09T12:32:16Z", "repo": "bytedance/deer-flow", "file_path": "main.py", "stars": 15119, "label": 0} +{"function": "def handle_shutdown(signum, frame):\n \"\"\"Handle graceful shutdown on SIGTERM/SIGINT\"\"\"\n logger.info(\"Received shutdown signal. Starting graceful shutdown...\")\n sys.exit(0)", "creation_date": "2025-04-13T13:14:31Z", "repo": "bytedance/deer-flow", "file_path": "server.py", "stars": 15119, "label": 0} +{"function": "def load_state_class():\n # Get the absolute path to the types.py file\n src_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), \"..\", \"src\"))\n types_path = os.path.join(src_dir, \"graph\", \"types.py\")\n\n # Create a namespace for the module\n import types\n\n module_name = \"src.graph.types_direct\"\n spec = types.ModuleType(module_name)\n\n # Add the module to sys.modules to avoid import loops\n sys.modules[module_name] = spec\n\n # Set up the namespace with required imports\n spec.__dict__[\"operator\"] = __import__(\"operator\")\n spec.__dict__[\"Annotated\"] = Annotated\n spec.__dict__[\"MessagesState\"] = MessagesState\n spec.__dict__[\"Plan\"] = Plan\n\n # Execute the module code\n with open(types_path, \"r\") as f:\n module_code = f.read()\n\n exec(module_code, spec.__dict__)\n\n # Return the State class\n return spec.State", "creation_date": "2025-05-15T15:56:13Z", "repo": "bytedance/deer-flow", "file_path": "tests/test_state.py", "stars": 15119, "label": 0} +{"function": "def test_state_initialization():\n \"\"\"Test that State class has correct default attribute definitions.\"\"\"\n # Test that the class has the expected attribute definitions\n assert State.locale == \"en-US\"\n assert State.observations == []\n assert State.plan_iterations == 0\n assert State.current_plan is None\n assert State.final_report == \"\"\n assert State.auto_accepted_plan is False\n assert State.enable_background_investigation is True\n assert State.background_investigation_results is None\n\n # Verify state initialization\n state = State(messages=[])\n assert \"messages\" in state\n\n # Without explicitly passing attributes, they're not in the state\n assert \"locale\" not in state\n assert \"observations\" not in state", "creation_date": "2025-05-15T15:56:13Z", "repo": "bytedance/deer-flow", "file_path": "tests/test_state.py", "stars": 15119, "label": 0} +{"function": "def test_state_with_custom_values():\n \"\"\"Test that State can be initialized with custom values.\"\"\"\n test_step = Step(\n need_search=True,\n title=\"Test Step\",\n description=\"Step description\",\n step_type=StepType.RESEARCH,\n )\n\n test_plan = Plan(\n locale=\"en-US\",\n has_enough_context=False,\n thought=\"Test thought\",\n title=\"Test Plan\",\n steps=[test_step],\n )\n\n # Initialize state with custom values and required messages field\n state = State(\n messages=[],\n locale=\"fr-FR\",\n observations=[\"Observation 1\"],\n plan_iterations=2,\n current_plan=test_plan,\n final_report=\"Test report\",\n auto_accepted_plan=True,\n enable_background_investigation=False,\n background_investigation_results=\"Test results\",\n )\n\n # Access state keys - these are explicitly initialized\n assert state[\"locale\"] == \"fr-FR\"\n assert state[\"observations\"] == [\"Observation 1\"]\n assert state[\"plan_iterations\"] == 2\n assert state[\"current_plan\"].title == \"Test Plan\"\n assert state[\"current_plan\"].thought == \"Test thought\"\n assert len(state[\"current_plan\"].steps) == 1\n assert state[\"current_plan\"].steps[0].title == \"Test Step\"\n assert state[\"final_report\"] == \"Test report\"\n assert state[\"auto_accepted_plan\"] is True\n assert state[\"enable_background_investigation\"] is False\n assert state[\"background_investigation_results\"] == \"Test results\"", "creation_date": "2025-05-15T15:56:13Z", "repo": "bytedance/deer-flow", "file_path": "tests/test_state.py", "stars": 15119, "label": 0} +{"function": " def __init__(self, need_search, title, description, step_type):\n self.need_search = need_search\n self.title = title\n self.description = description\n self.step_type = step_type", "creation_date": "2025-05-15T15:56:13Z", "repo": "bytedance/deer-flow", "file_path": "tests/test_state.py", "stars": 15119, "label": 0} +{"function": " def __init__(self, locale, has_enough_context, thought, title, steps):\n self.locale = locale\n self.has_enough_context = has_enough_context\n self.thought = thought\n self.title = title\n self.steps = steps", "creation_date": "2025-05-15T15:56:13Z", "repo": "bytedance/deer-flow", "file_path": "tests/test_state.py", "stars": 15119, "label": 0} +{"function": " def test_valid_json_object(self):\n \"\"\"Test with valid JSON object\"\"\"\n content = '{\"key\": \"value\", \"number\": 123}'\n result = repair_json_output(content)\n expected = json.dumps({\"key\": \"value\", \"number\": 123}, ensure_ascii=False)\n assert result == expected", "creation_date": "2025-06-18T02:04:46Z", "repo": "bytedance/deer-flow", "file_path": "tests/unit/utils/test_json_utils.py", "stars": 15119, "label": 0} +{"function": " def test_valid_json_array(self):\n \"\"\"Test with valid JSON array\"\"\"\n content = '[1, 2, 3, \"test\"]'\n result = repair_json_output(content)\n expected = json.dumps([1, 2, 3, \"test\"], ensure_ascii=False)\n assert result == expected", "creation_date": "2025-06-18T02:04:46Z", "repo": "bytedance/deer-flow", "file_path": "tests/unit/utils/test_json_utils.py", "stars": 15119, "label": 0} +{"function": "def worker(input_image, prompt, n_prompt, seed, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, mp4_crf):\n total_latent_sections = (total_second_length * 30) / (latent_window_size * 4)\n total_latent_sections = int(max(round(total_latent_sections), 1))\n\n job_id = generate_timestamp()\n\n stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'Starting ...'))))\n\n try:\n # Clean GPU\n if not high_vram:\n unload_complete_models(\n text_encoder, text_encoder_2, image_encoder, vae, transformer\n )\n\n # Text encoding\n\n stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'Text encoding ...'))))\n\n if not high_vram:\n fake_diffusers_current_device(text_encoder, gpu) # since we only encode one text - that is one model move and one encode, offload is same time consumption since it is also one load and one encode.\n load_model_as_complete(text_encoder_2, target_device=gpu)\n\n llama_vec, clip_l_pooler = encode_prompt_conds(prompt, text_encoder, text_encoder_2, tokenizer, tokenizer_2)\n\n if cfg == 1:\n llama_vec_n, clip_l_pooler_n = torch.zeros_like(llama_vec), torch.zeros_like(clip_l_pooler)\n else:\n llama_vec_n, clip_l_pooler_n = encode_prompt_conds(n_prompt, text_encoder, text_encoder_2, tokenizer, tokenizer_2)\n\n llama_vec, llama_attention_mask = crop_or_pad_yield_mask(llama_vec, length=512)\n llama_vec_n, llama_attention_mask_n = crop_or_pad_yield_mask(llama_vec_n, length=512)\n\n # Processing input image\n\n stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'Image processing ...'))))\n\n H, W, C = input_image.shape\n height, width = find_nearest_bucket(H, W, resolution=640)\n input_image_np = resize_and_center_crop(input_image, target_width=width, target_height=height)\n\n Image.fromarray(input_image_np).save(os.path.join(outputs_folder, f'{job_id}.png'))\n\n input_image_pt = torch.from_numpy(input_image_np).float() / 127.5 - 1\n input_image_pt = input_image_pt.permute(2, 0, 1)[None, :, None]\n\n # VAE encoding\n\n stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'VAE encoding ...'))))\n\n if not high_vram:\n load_model_as_complete(vae, target_device=gpu)\n\n start_latent = vae_encode(input_image_pt, vae)\n\n # CLIP Vision\n\n stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'CLIP Vision encoding ...'))))\n\n if not high_vram:\n load_model_as_complete(image_encoder, target_device=gpu)\n\n image_encoder_output = hf_clip_vision_encode(input_image_np, feature_extractor, image_encoder)\n image_encoder_last_hidden_state = image_encoder_output.last_hidden_state\n\n # Dtype\n\n llama_vec = llama_vec.to(transformer.dtype)\n llama_vec_n = llama_vec_n.to(transformer.dtype)\n clip_l_pooler = clip_l_pooler.to(transformer.dtype)\n clip_l_pooler_n = clip_l_pooler_n.to(transformer.dtype)\n image_encoder_last_hidden_state = image_encoder_last_hidden_state.to(transformer.dtype)\n\n # Sampling\n\n stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'Start sampling ...'))))\n\n rnd = torch.Generator(\"cpu\").manual_seed(seed)\n num_frames = latent_window_size * 4 - 3\n\n history_latents = torch.zeros(size=(1, 16, 1 + 2 + 16, height // 8, width // 8), dtype=torch.float32).cpu()\n history_pixels = None\n total_generated_latent_frames = 0\n\n latent_paddings = reversed(range(total_latent_sections))\n\n if total_latent_sections > 4:\n # In theory the latent_paddings should follow the above sequence, but it seems that duplicating some\n # items looks better than expanding it when total_latent_sections > 4\n # One can try to remove below trick and just\n # use `latent_paddings = list(reversed(range(total_latent_sections)))` to compare\n latent_paddings = [3] + [2] * (total_latent_sections - 3) + [1, 0]\n\n for latent_padding in latent_paddings:\n is_last_section = latent_padding == 0\n latent_padding_size = latent_padding * latent_window_size\n\n if stream.input_queue.top() == 'end':\n stream.output_queue.push(('end', None))\n return\n\n print(f'latent_padding_size = {latent_padding_size}, is_last_section = {is_last_section}')\n\n indices = torch.arange(0, sum([1, latent_padding_size, latent_window_size, 1, 2, 16])).unsqueeze(0)\n clean_latent_indices_pre, blank_indices, latent_indices, clean_latent_indices_post, clean_latent_2x_indices, clean_latent_4x_indices = indices.split([1, latent_padding_size, latent_window_size, 1, 2, 16], dim=1)\n clean_latent_indices = torch.cat([clean_latent_indices_pre, clean_latent_indices_post], dim=1)\n\n clean_latents_pre = start_latent.to(history_latents)\n clean_latents_post, clean_latents_2x, clean_latents_4x = history_latents[:, :, :1 + 2 + 16, :, :].split([1, 2, 16], dim=2)\n clean_latents = torch.cat([clean_latents_pre, clean_latents_post], dim=2)\n\n if not high_vram:\n unload_complete_models()\n move_model_to_device_with_memory_preservation(transformer, target_device=gpu, preserved_memory_gb=gpu_memory_preservation)\n\n if use_teacache:\n transformer.initialize_teacache(enable_teacache=True, num_steps=steps)\n else:\n transformer.initialize_teacache(enable_teacache=False)\n\n def callback(d):\n preview = d['denoised']\n preview = vae_decode_fake(preview)\n\n preview = (preview * 255.0).detach().cpu().numpy().clip(0, 255).astype(np.uint8)\n preview = einops.rearrange(preview, 'b c t h w -> (b h) (t w) c')\n\n if stream.input_queue.top() == 'end':\n stream.output_queue.push(('end', None))\n raise KeyboardInterrupt('User ends the task.')\n\n current_step = d['i'] + 1\n percentage = int(100.0 * current_step / steps)\n hint = f'Sampling {current_step}/{steps}'\n desc = f'Total generated frames: {int(max(0, total_generated_latent_frames * 4 - 3))}, Video length: {max(0, (total_generated_latent_frames * 4 - 3) / 30) :.2f} seconds (FPS-30). The video is being extended now ...'\n stream.output_queue.push(('progress', (preview, desc, make_progress_bar_html(percentage, hint))))\n return\n\n generated_latents = sample_hunyuan(\n transformer=transformer,\n sampler='unipc',\n width=width,\n height=height,\n frames=num_frames,\n real_guidance_scale=cfg,\n distilled_guidance_scale=gs,\n guidance_rescale=rs,\n # shift=3.0,\n num_inference_steps=steps,\n generator=rnd,\n prompt_embeds=llama_vec,\n prompt_embeds_mask=llama_attention_mask,\n prompt_poolers=clip_l_pooler,\n negative_prompt_embeds=llama_vec_n,\n negative_prompt_embeds_mask=llama_attention_mask_n,\n negative_prompt_poolers=clip_l_pooler_n,\n device=gpu,\n dtype=torch.bfloat16,\n image_embeddings=image_encoder_last_hidden_state,\n latent_indices=latent_indices,\n clean_latents=clean_latents,\n clean_latent_indices=clean_latent_indices,\n clean_latents_2x=clean_latents_2x,\n clean_latent_2x_indices=clean_latent_2x_indices,\n clean_latents_4x=clean_latents_4x,\n clean_latent_4x_indices=clean_latent_4x_indices,\n callback=callback,\n )\n\n if is_last_section:\n generated_latents = torch.cat([start_latent.to(generated_latents), generated_latents], dim=2)\n\n total_generated_latent_frames += int(generated_latents.shape[2])\n history_latents = torch.cat([generated_latents.to(history_latents), history_latents], dim=2)\n\n if not high_vram:\n offload_model_from_device_for_memory_preservation(transformer, target_device=gpu, preserved_memory_gb=8)\n load_model_as_complete(vae, target_device=gpu)\n\n real_history_latents = history_latents[:, :, :total_generated_latent_frames, :, :]\n\n if history_pixels is None:\n history_pixels = vae_decode(real_history_latents, vae).cpu()\n else:\n section_latent_frames = (latent_window_size * 2 + 1) if is_last_section else (latent_window_size * 2)\n overlapped_frames = latent_window_size * 4 - 3\n\n current_pixels = vae_decode(real_history_latents[:, :, :section_latent_frames], vae).cpu()\n history_pixels = soft_append_bcthw(current_pixels, history_pixels, overlapped_frames)\n\n if not high_vram:\n unload_complete_models()\n\n output_filename = os.path.join(outputs_folder, f'{job_id}_{total_generated_latent_frames}.mp4')\n\n save_bcthw_as_mp4(history_pixels, output_filename, fps=30, crf=mp4_crf)\n\n print(f'Decoded. Current latent shape {real_history_latents.shape}; pixel shape {history_pixels.shape}')\n\n stream.output_queue.push(('file', output_filename))\n\n if is_last_section:\n break\n except:\n traceback.print_exc()\n\n if not high_vram:\n unload_complete_models(\n text_encoder, text_encoder_2, image_encoder, vae, transformer\n )\n\n stream.output_queue.push(('end', None))\n return", "creation_date": "2025-04-17T02:51:46Z", "repo": "lllyasviel/FramePack", "file_path": "demo_gradio.py", "stars": 15026, "label": 0} +{"function": "def process(input_image, prompt, n_prompt, seed, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, mp4_crf):\n global stream\n assert input_image is not None, 'No input image!'\n\n yield None, None, '', '', gr.update(interactive=False), gr.update(interactive=True)\n\n stream = AsyncStream()\n\n async_run(worker, input_image, prompt, n_prompt, seed, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, mp4_crf)\n\n output_filename = None\n\n while True:\n flag, data = stream.output_queue.next()\n\n if flag == 'file':\n output_filename = data\n yield output_filename, gr.update(), gr.update(), gr.update(), gr.update(interactive=False), gr.update(interactive=True)\n\n if flag == 'progress':\n preview, desc, html = data\n yield gr.update(), gr.update(visible=True, value=preview), desc, html, gr.update(interactive=False), gr.update(interactive=True)\n\n if flag == 'end':\n yield output_filename, gr.update(visible=False), gr.update(), '', gr.update(interactive=True), gr.update(interactive=False)\n break", "creation_date": "2025-04-17T02:51:46Z", "repo": "lllyasviel/FramePack", "file_path": "demo_gradio.py", "stars": 15026, "label": 0} +{"function": "def end_process():\n stream.input_queue.push('end')", "creation_date": "2025-04-17T02:51:46Z", "repo": "lllyasviel/FramePack", "file_path": "demo_gradio.py", "stars": 15026, "label": 0} +{"function": " def callback(d):\n preview = d['denoised']\n preview = vae_decode_fake(preview)\n\n preview = (preview * 255.0).detach().cpu().numpy().clip(0, 255).astype(np.uint8)\n preview = einops.rearrange(preview, 'b c t h w -> (b h) (t w) c')\n\n if stream.input_queue.top() == 'end':\n stream.output_queue.push(('end', None))\n raise KeyboardInterrupt('User ends the task.')\n\n current_step = d['i'] + 1\n percentage = int(100.0 * current_step / steps)\n hint = f'Sampling {current_step}/{steps}'\n desc = f'Total generated frames: {int(max(0, total_generated_latent_frames * 4 - 3))}, Video length: {max(0, (total_generated_latent_frames * 4 - 3) / 30) :.2f} seconds (FPS-30). The video is being extended now ...'\n stream.output_queue.push(('progress', (preview, desc, make_progress_bar_html(percentage, hint))))\n return", "creation_date": "2025-04-17T02:51:46Z", "repo": "lllyasviel/FramePack", "file_path": "demo_gradio.py", "stars": 15026, "label": 0} +{"function": "def worker(input_image, prompt, n_prompt, seed, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, mp4_crf):\n total_latent_sections = (total_second_length * 30) / (latent_window_size * 4)\n total_latent_sections = int(max(round(total_latent_sections), 1))\n\n job_id = generate_timestamp()\n\n stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'Starting ...'))))\n\n try:\n # Clean GPU\n if not high_vram:\n unload_complete_models(\n text_encoder, text_encoder_2, image_encoder, vae, transformer\n )\n\n # Text encoding\n\n stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'Text encoding ...'))))\n\n if not high_vram:\n fake_diffusers_current_device(text_encoder, gpu) # since we only encode one text - that is one model move and one encode, offload is same time consumption since it is also one load and one encode.\n load_model_as_complete(text_encoder_2, target_device=gpu)\n\n llama_vec, clip_l_pooler = encode_prompt_conds(prompt, text_encoder, text_encoder_2, tokenizer, tokenizer_2)\n\n if cfg == 1:\n llama_vec_n, clip_l_pooler_n = torch.zeros_like(llama_vec), torch.zeros_like(clip_l_pooler)\n else:\n llama_vec_n, clip_l_pooler_n = encode_prompt_conds(n_prompt, text_encoder, text_encoder_2, tokenizer, tokenizer_2)\n\n llama_vec, llama_attention_mask = crop_or_pad_yield_mask(llama_vec, length=512)\n llama_vec_n, llama_attention_mask_n = crop_or_pad_yield_mask(llama_vec_n, length=512)\n\n # Processing input image\n\n stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'Image processing ...'))))\n\n H, W, C = input_image.shape\n height, width = find_nearest_bucket(H, W, resolution=640)\n input_image_np = resize_and_center_crop(input_image, target_width=width, target_height=height)\n\n Image.fromarray(input_image_np).save(os.path.join(outputs_folder, f'{job_id}.png'))\n\n input_image_pt = torch.from_numpy(input_image_np).float() / 127.5 - 1\n input_image_pt = input_image_pt.permute(2, 0, 1)[None, :, None]\n\n # VAE encoding\n\n stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'VAE encoding ...'))))\n\n if not high_vram:\n load_model_as_complete(vae, target_device=gpu)\n\n start_latent = vae_encode(input_image_pt, vae)\n\n # CLIP Vision\n\n stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'CLIP Vision encoding ...'))))\n\n if not high_vram:\n load_model_as_complete(image_encoder, target_device=gpu)\n\n image_encoder_output = hf_clip_vision_encode(input_image_np, feature_extractor, image_encoder)\n image_encoder_last_hidden_state = image_encoder_output.last_hidden_state\n\n # Dtype\n\n llama_vec = llama_vec.to(transformer.dtype)\n llama_vec_n = llama_vec_n.to(transformer.dtype)\n clip_l_pooler = clip_l_pooler.to(transformer.dtype)\n clip_l_pooler_n = clip_l_pooler_n.to(transformer.dtype)\n image_encoder_last_hidden_state = image_encoder_last_hidden_state.to(transformer.dtype)\n\n # Sampling\n\n stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'Start sampling ...'))))\n\n rnd = torch.Generator(\"cpu\").manual_seed(seed)\n\n history_latents = torch.zeros(size=(1, 16, 16 + 2 + 1, height // 8, width // 8), dtype=torch.float32).cpu()\n history_pixels = None\n\n history_latents = torch.cat([history_latents, start_latent.to(history_latents)], dim=2)\n total_generated_latent_frames = 1\n\n for section_index in range(total_latent_sections):\n if stream.input_queue.top() == 'end':\n stream.output_queue.push(('end', None))\n return\n\n print(f'section_index = {section_index}, total_latent_sections = {total_latent_sections}')\n\n if not high_vram:\n unload_complete_models()\n move_model_to_device_with_memory_preservation(transformer, target_device=gpu, preserved_memory_gb=gpu_memory_preservation)\n\n if use_teacache:\n transformer.initialize_teacache(enable_teacache=True, num_steps=steps)\n else:\n transformer.initialize_teacache(enable_teacache=False)\n\n def callback(d):\n preview = d['denoised']\n preview = vae_decode_fake(preview)\n\n preview = (preview * 255.0).detach().cpu().numpy().clip(0, 255).astype(np.uint8)\n preview = einops.rearrange(preview, 'b c t h w -> (b h) (t w) c')\n\n if stream.input_queue.top() == 'end':\n stream.output_queue.push(('end', None))\n raise KeyboardInterrupt('User ends the task.')\n\n current_step = d['i'] + 1\n percentage = int(100.0 * current_step / steps)\n hint = f'Sampling {current_step}/{steps}'\n desc = f'Total generated frames: {int(max(0, total_generated_latent_frames * 4 - 3))}, Video length: {max(0, (total_generated_latent_frames * 4 - 3) / 30) :.2f} seconds (FPS-30). The video is being extended now ...'\n stream.output_queue.push(('progress', (preview, desc, make_progress_bar_html(percentage, hint))))\n return\n\n indices = torch.arange(0, sum([1, 16, 2, 1, latent_window_size])).unsqueeze(0)\n clean_latent_indices_start, clean_latent_4x_indices, clean_latent_2x_indices, clean_latent_1x_indices, latent_indices = indices.split([1, 16, 2, 1, latent_window_size], dim=1)\n clean_latent_indices = torch.cat([clean_latent_indices_start, clean_latent_1x_indices], dim=1)\n\n clean_latents_4x, clean_latents_2x, clean_latents_1x = history_latents[:, :, -sum([16, 2, 1]):, :, :].split([16, 2, 1], dim=2)\n clean_latents = torch.cat([start_latent.to(history_latents), clean_latents_1x], dim=2)\n\n generated_latents = sample_hunyuan(\n transformer=transformer,\n sampler='unipc',\n width=width,\n height=height,\n frames=latent_window_size * 4 - 3,\n real_guidance_scale=cfg,\n distilled_guidance_scale=gs,\n guidance_rescale=rs,\n # shift=3.0,\n num_inference_steps=steps,\n generator=rnd,\n prompt_embeds=llama_vec,\n prompt_embeds_mask=llama_attention_mask,\n prompt_poolers=clip_l_pooler,\n negative_prompt_embeds=llama_vec_n,\n negative_prompt_embeds_mask=llama_attention_mask_n,\n negative_prompt_poolers=clip_l_pooler_n,\n device=gpu,\n dtype=torch.bfloat16,\n image_embeddings=image_encoder_last_hidden_state,\n latent_indices=latent_indices,\n clean_latents=clean_latents,\n clean_latent_indices=clean_latent_indices,\n clean_latents_2x=clean_latents_2x,\n clean_latent_2x_indices=clean_latent_2x_indices,\n clean_latents_4x=clean_latents_4x,\n clean_latent_4x_indices=clean_latent_4x_indices,\n callback=callback,\n )\n\n total_generated_latent_frames += int(generated_latents.shape[2])\n history_latents = torch.cat([history_latents, generated_latents.to(history_latents)], dim=2)\n\n if not high_vram:\n offload_model_from_device_for_memory_preservation(transformer, target_device=gpu, preserved_memory_gb=8)\n load_model_as_complete(vae, target_device=gpu)\n\n real_history_latents = history_latents[:, :, -total_generated_latent_frames:, :, :]\n\n if history_pixels is None:\n history_pixels = vae_decode(real_history_latents, vae).cpu()\n else:\n section_latent_frames = latent_window_size * 2\n overlapped_frames = latent_window_size * 4 - 3\n\n current_pixels = vae_decode(real_history_latents[:, :, -section_latent_frames:], vae).cpu()\n history_pixels = soft_append_bcthw(history_pixels, current_pixels, overlapped_frames)\n\n if not high_vram:\n unload_complete_models()\n\n output_filename = os.path.join(outputs_folder, f'{job_id}_{total_generated_latent_frames}.mp4')\n\n save_bcthw_as_mp4(history_pixels, output_filename, fps=30, crf=mp4_crf)\n\n print(f'Decoded. Current latent shape {real_history_latents.shape}; pixel shape {history_pixels.shape}')\n\n stream.output_queue.push(('file', output_filename))\n except:\n traceback.print_exc()\n\n if not high_vram:\n unload_complete_models(\n text_encoder, text_encoder_2, image_encoder, vae, transformer\n )\n\n stream.output_queue.push(('end', None))\n return", "creation_date": "2025-05-03T08:40:00Z", "repo": "lllyasviel/FramePack", "file_path": "demo_gradio_f1.py", "stars": 15026, "label": 0} +{"function": "def process(input_image, prompt, n_prompt, seed, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, mp4_crf):\n global stream\n assert input_image is not None, 'No input image!'\n\n yield None, None, '', '', gr.update(interactive=False), gr.update(interactive=True)\n\n stream = AsyncStream()\n\n async_run(worker, input_image, prompt, n_prompt, seed, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, mp4_crf)\n\n output_filename = None\n\n while True:\n flag, data = stream.output_queue.next()\n\n if flag == 'file':\n output_filename = data\n yield output_filename, gr.update(), gr.update(), gr.update(), gr.update(interactive=False), gr.update(interactive=True)\n\n if flag == 'progress':\n preview, desc, html = data\n yield gr.update(), gr.update(visible=True, value=preview), desc, html, gr.update(interactive=False), gr.update(interactive=True)\n\n if flag == 'end':\n yield output_filename, gr.update(visible=False), gr.update(), '', gr.update(interactive=True), gr.update(interactive=False)\n break", "creation_date": "2025-05-03T08:40:00Z", "repo": "lllyasviel/FramePack", "file_path": "demo_gradio_f1.py", "stars": 15026, "label": 0} +{"function": "def end_process():\n stream.input_queue.push('end')", "creation_date": "2025-05-03T08:40:00Z", "repo": "lllyasviel/FramePack", "file_path": "demo_gradio_f1.py", "stars": 15026, "label": 0} +{"function": " def callback(d):\n preview = d['denoised']\n preview = vae_decode_fake(preview)\n\n preview = (preview * 255.0).detach().cpu().numpy().clip(0, 255).astype(np.uint8)\n preview = einops.rearrange(preview, 'b c t h w -> (b h) (t w) c')\n\n if stream.input_queue.top() == 'end':\n stream.output_queue.push(('end', None))\n raise KeyboardInterrupt('User ends the task.')\n\n current_step = d['i'] + 1\n percentage = int(100.0 * current_step / steps)\n hint = f'Sampling {current_step}/{steps}'\n desc = f'Total generated frames: {int(max(0, total_generated_latent_frames * 4 - 3))}, Video length: {max(0, (total_generated_latent_frames * 4 - 3) / 30) :.2f} seconds (FPS-30). The video is being extended now ...'\n stream.output_queue.push(('progress', (preview, desc, make_progress_bar_html(percentage, hint))))\n return", "creation_date": "2025-05-03T08:40:00Z", "repo": "lllyasviel/FramePack", "file_path": "demo_gradio_f1.py", "stars": 15026, "label": 0} +{"function": "def find_nearest_bucket(h, w, resolution=640):\n min_metric = float('inf')\n best_bucket = None\n for (bucket_h, bucket_w) in bucket_options[resolution]:\n metric = abs(h * bucket_w - w * bucket_h)\n if metric <= min_metric:\n min_metric = metric\n best_bucket = (bucket_h, bucket_w)\n return best_bucket", "creation_date": "2025-04-17T02:51:46Z", "repo": "lllyasviel/FramePack", "file_path": "diffusers_helper/bucket_tools.py", "stars": 15026, "label": 0} +{"function": "def hf_clip_vision_encode(image, feature_extractor, image_encoder):\n assert isinstance(image, np.ndarray)\n assert image.ndim == 3 and image.shape[2] == 3\n assert image.dtype == np.uint8\n\n preprocessed = feature_extractor.preprocess(images=image, return_tensors=\"pt\").to(device=image_encoder.device, dtype=image_encoder.dtype)\n image_encoder_output = image_encoder(**preprocessed)\n\n return image_encoder_output", "creation_date": "2025-04-17T02:51:46Z", "repo": "lllyasviel/FramePack", "file_path": "diffusers_helper/clip_vision.py", "stars": 15026, "label": 0} +{"function": "def clear_argv(func):\n \"\"\"\n Decorator: Clear sys.argv before calling the decorated function, keeping only the script name. Restore original sys.argv after calling.\n Used to prevent arguments from being parsed by Hugging Face HfArgumentParser causing ValueError.\n \"\"\"\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n original_argv = sys.argv.copy()\n sys.argv = [original_argv[0]] # Keep only script name\n try:\n return func(*args, **kwargs)\n finally:\n sys.argv = original_argv # Restore original sys.argv\n\n return wrapper", "creation_date": "2025-04-26T11:08:32Z", "repo": "xming521/WeClone", "file_path": "weclone/cli.py", "stars": 14848, "label": 0} +{"function": "def with_community_info(func):\n \"\"\"\n Decorator: Show community info before executing the command\n \"\"\"\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n show_community_info()\n return func(*args, **kwargs)\n\n return wrapper", "creation_date": "2025-04-26T11:08:32Z", "repo": "xming521/WeClone", "file_path": "weclone/cli.py", "stars": 14848, "label": 0} +{"function": "def apply_common_decorators(capture_output_enabled=False):\n \"\"\"\n A unified decorator for applications\n \"\"\"\n\n def decorator(original_cmd_func):\n @functools.wraps(original_cmd_func)\n def new_runtime_wrapper(*args, **kwargs):\n if cli_config and cli_config.full_log:\n return capture_output(original_cmd_func)(*args, **kwargs)\n else:\n return original_cmd_func(*args, **kwargs)\n\n func_with_clear_argv = clear_argv(new_runtime_wrapper)\n\n return functools.wraps(original_cmd_func)(func_with_clear_argv)\n\n return decorator", "creation_date": "2025-04-26T11:08:32Z", "repo": "xming521/WeClone", "file_path": "weclone/cli.py", "stars": 14848, "label": 0} +{"function": "def cli(ctx, config_path):\n \"\"\"WeClone: One-stop solution for creating digital avatars from chat history\"\"\"\n # Only show community info when no subcommand is invoked\n if ctx.invoked_subcommand is None:\n show_community_info()\n click.echo(ctx.get_help())\n return\n\n if config_path:\n os.environ[\"WECLONE_CONFIG_PATH\"] = config_path\n logger.info(f\"Config file path set to: {config_path}\")\n\n _check_project_root()\n _check_versions()\n global cli_config\n cli_config = cast(CliArgs, load_config(arg_type=\"cli_args\"))\n\n configure_log_level_from_config()", "creation_date": "2025-04-26T11:08:32Z", "repo": "xming521/WeClone", "file_path": "weclone/cli.py", "stars": 14848, "label": 0} +{"function": "def qa_generator():\n \"\"\"Process chat history CSV files to generate Q&A pair datasets.\"\"\"\n from weclone.data.qa_generator import DataProcessor\n\n processor = DataProcessor()\n processor.main()", "creation_date": "2025-04-26T11:08:32Z", "repo": "xming521/WeClone", "file_path": "weclone/cli.py", "stars": 14848, "label": 0} +{"function": "def train_sft():\n \"\"\"Fine-tune the model using prepared datasets.\"\"\"\n from weclone.train.train_sft import main as train_sft_main\n\n train_sft_main()", "creation_date": "2025-04-26T11:08:32Z", "repo": "xming521/WeClone", "file_path": "weclone/cli.py", "stars": 14848, "label": 0} +{"function": "def web_demo():\n \"\"\"Launch Web UI for interactive testing with fine-tuned model.\"\"\"\n from weclone.eval.web_demo import main as web_demo_main\n\n web_demo_main()", "creation_date": "2025-04-26T11:08:32Z", "repo": "xming521/WeClone", "file_path": "weclone/cli.py", "stars": 14848, "label": 0} +{"function": "def eval_model():\n \"\"\"Evaluate using validation set split from training data.\"\"\"\n from weclone.eval.eval_model import main as evaluate_main\n\n evaluate_main()", "creation_date": "2025-04-26T11:08:32Z", "repo": "xming521/WeClone", "file_path": "weclone/cli.py", "stars": 14848, "label": 0} +{"function": "def test_model():\n \"\"\"Test model with common chat questions.\"\"\"\n from weclone.eval.test_model import main as test_main\n\n test_main()", "creation_date": "2025-04-26T11:08:32Z", "repo": "xming521/WeClone", "file_path": "weclone/cli.py", "stars": 14848, "label": 0} +{"function": "def server():\n \"\"\"Start API service providing model inference interface.\"\"\"\n from weclone.server.api_service import main as server_main\n\n server_main()", "creation_date": "2025-04-26T11:08:32Z", "repo": "xming521/WeClone", "file_path": "weclone/cli.py", "stars": 14848, "label": 0} +{"function": " def __init__(self, max_debate_rounds=1, max_risk_discuss_rounds=1):\n \"\"\"Initialize with configuration parameters.\"\"\"\n self.max_debate_rounds = max_debate_rounds\n self.max_risk_discuss_rounds = max_risk_discuss_rounds", "creation_date": "2025-06-05T11:27:57Z", "repo": "TauricResearch/TradingAgents", "file_path": "tradingagents/graph/conditional_logic.py", "stars": 14794, "label": 0} +{"function": " def should_continue_market(self, state: AgentState):\n \"\"\"Determine if market analysis should continue.\"\"\"\n messages = state[\"messages\"]\n last_message = messages[-1]\n if last_message.tool_calls:\n return \"tools_market\"\n return \"Msg Clear Market\"", "creation_date": "2025-06-05T11:27:57Z", "repo": "TauricResearch/TradingAgents", "file_path": "tradingagents/graph/conditional_logic.py", "stars": 14794, "label": 0} +{"function": " def should_continue_social(self, state: AgentState):\n \"\"\"Determine if social media analysis should continue.\"\"\"\n messages = state[\"messages\"]\n last_message = messages[-1]\n if last_message.tool_calls:\n return \"tools_social\"\n return \"Msg Clear Social\"", "creation_date": "2025-06-05T11:27:57Z", "repo": "TauricResearch/TradingAgents", "file_path": "tradingagents/graph/conditional_logic.py", "stars": 14794, "label": 0} +{"function": " def should_continue_news(self, state: AgentState):\n \"\"\"Determine if news analysis should continue.\"\"\"\n messages = state[\"messages\"]\n last_message = messages[-1]\n if last_message.tool_calls:\n return \"tools_news\"\n return \"Msg Clear News\"", "creation_date": "2025-06-05T11:27:57Z", "repo": "TauricResearch/TradingAgents", "file_path": "tradingagents/graph/conditional_logic.py", "stars": 14794, "label": 0} +{"function": " def should_continue_fundamentals(self, state: AgentState):\n \"\"\"Determine if fundamentals analysis should continue.\"\"\"\n messages = state[\"messages\"]\n last_message = messages[-1]\n if last_message.tool_calls:\n return \"tools_fundamentals\"\n return \"Msg Clear Fundamentals\"", "creation_date": "2025-06-05T11:27:57Z", "repo": "TauricResearch/TradingAgents", "file_path": "tradingagents/graph/conditional_logic.py", "stars": 14794, "label": 0} +{"function": " def should_continue_debate(self, state: AgentState) -> str:\n \"\"\"Determine if debate should continue.\"\"\"\n\n if (\n state[\"investment_debate_state\"][\"count\"] >= 2 * self.max_debate_rounds\n ): # 3 rounds of back-and-forth between 2 agents\n return \"Research Manager\"\n if state[\"investment_debate_state\"][\"current_response\"].startswith(\"Bull\"):\n return \"Bear Researcher\"\n return \"Bull Researcher\"", "creation_date": "2025-06-05T11:27:57Z", "repo": "TauricResearch/TradingAgents", "file_path": "tradingagents/graph/conditional_logic.py", "stars": 14794, "label": 0} +{"function": " def should_continue_risk_analysis(self, state: AgentState) -> str:\n \"\"\"Determine if risk analysis should continue.\"\"\"\n if (\n state[\"risk_debate_state\"][\"count\"] >= 3 * self.max_risk_discuss_rounds\n ): # 3 rounds of back-and-forth between 3 agents\n return \"Risk Judge\"\n if state[\"risk_debate_state\"][\"latest_speaker\"].startswith(\"Risky\"):\n return \"Safe Analyst\"\n if state[\"risk_debate_state\"][\"latest_speaker\"].startswith(\"Safe\"):\n return \"Neutral Analyst\"\n return \"Risky Analyst\"", "creation_date": "2025-06-05T11:27:57Z", "repo": "TauricResearch/TradingAgents", "file_path": "tradingagents/graph/conditional_logic.py", "stars": 14794, "label": 0} +{"function": " def __init__(self, max_recur_limit=100):\n \"\"\"Initialize with configuration parameters.\"\"\"\n self.max_recur_limit = max_recur_limit", "creation_date": "2025-06-05T11:27:57Z", "repo": "TauricResearch/TradingAgents", "file_path": "tradingagents/graph/propagation.py", "stars": 14794, "label": 0} +{"function": " def create_initial_state(\n self, company_name: str, trade_date: str\n ) -> Dict[str, Any]:\n \"\"\"Create the initial state for the agent graph.\"\"\"\n return {\n \"messages\": [(\"human\", company_name)],\n \"company_of_interest\": company_name,\n \"trade_date\": str(trade_date),\n \"investment_debate_state\": InvestDebateState(\n {\"history\": \"\", \"current_response\": \"\", \"count\": 0}\n ),\n \"risk_debate_state\": RiskDebateState(\n {\n \"history\": \"\",\n \"current_risky_response\": \"\",\n \"current_safe_response\": \"\",\n \"current_neutral_response\": \"\",\n \"count\": 0,\n }\n ),\n \"market_report\": \"\",\n \"fundamentals_report\": \"\",\n \"sentiment_report\": \"\",\n \"news_report\": \"\",\n }", "creation_date": "2025-06-05T11:27:57Z", "repo": "TauricResearch/TradingAgents", "file_path": "tradingagents/graph/propagation.py", "stars": 14794, "label": 0} +{"function": " def get_graph_args(self) -> Dict[str, Any]:\n \"\"\"Get arguments for the graph invocation.\"\"\"\n return {\n \"stream_mode\": \"values\",\n \"config\": {\"recursion_limit\": self.max_recur_limit},\n }", "creation_date": "2025-06-05T11:27:57Z", "repo": "TauricResearch/TradingAgents", "file_path": "tradingagents/graph/propagation.py", "stars": 14794, "label": 0} +{"function": "def colored(text, color=None, bold=False):\n fmt = []\n if color== 'red':\n fmt.append('31')\n elif color == 'green':\n fmt.append('32')\n if bold:\n fmt.append('1')\n\n return f\"\\033[{';'.join(fmt)}m{text}\\033[0m\"", "creation_date": "2024-07-27T08:06:58Z", "repo": "kvcache-ai/ktransformers", "file_path": "setup.py", "stars": 14577, "label": 0} +{"function": "def split_line(text: str) -> List[str]:\n \"\"\"Split text into lines based on terminal width.\"\"\"\n term_width = shutil.get_terminal_size().columns or 80\n if not text.strip():\n return []\n # Split by explicit newlines and wrap long lines\n lines = []\n for line in text.split('\\n'):\n while len(line) > term_width:\n lines.append(line[:term_width])\n line = line[term_width:]\n if line:\n lines.append(line)\n return lines", "creation_date": "2024-07-27T08:06:58Z", "repo": "kvcache-ai/ktransformers", "file_path": "setup.py", "stars": 14577, "label": 0} +{"function": "def colored(text, color=None, bold=False):\n fmt = []\n if color== 'red':\n fmt.append('31')\n elif color == 'green':\n fmt.append('32')\n if bold:\n fmt.append('1')\n\n return f\"\\033[{';'.join(fmt)}m{text}\\033[0m\"", "creation_date": "2024-07-27T08:06:58Z", "repo": "kvcache-ai/ktransformers", "file_path": "setup.py", "stars": 14577, "label": 0} +{"function": "def split_line(text: str) -> List[str]:\n \"\"\"Split text into lines based on terminal width.\"\"\"\n term_width = shutil.get_terminal_size().columns or 80\n if not text.strip():\n return []\n # Split by explicit newlines and wrap long lines\n lines = []\n for line in text.split('\\n'):\n while len(line) > term_width:\n lines.append(line[:term_width])\n line = line[term_width:]\n if line:\n lines.append(line)\n return lines", "creation_date": "2024-07-27T08:06:58Z", "repo": "kvcache-ai/ktransformers", "file_path": "setup.py", "stars": 14577, "label": 0} +{"function": "def run_command_with_live_tail(ext: str, command: List[str], output_lines: int = 20,\n refresh_rate: float = 0.1, cwd: Optional[str] = None):\n \"\"\"\n Execute a script-like command with real-time output of the last `output_lines` lines.\n\n - during execution: displays the last `output_lines` lines of output in real-time.\n - On success: Clears the displayed output.\n - On failure: Prints the full command output.\n\n Args:\n ext (str): the name of the native extension currently building.\n command (List[str]): The command to execute, as a list of arguments.\n output_lines (int, optional): Number of terminal lines to display during live output. Defaults to 20.\n refresh_rate (float, optional): Time in seconds between output refreshes. Defaults to 0.1.\n cwd (Optional[str], optional): Working directory to run the command in. Defaults to current directory.\n \"\"\"\n # Dump all subprocess output without any buffering if stdout is not a terminal\n if not sys.stdout.isatty():\n return subprocess.run(command, cwd=cwd, check=True)\n # Start time for elapsed time calculation\n start = time.time()\n # Buffer for all output\n all_output = []\n write_buffer = deque(maxlen=output_lines)\n # Current number of lines from sub process displayed\n current_lines = 0\n\n # ANSI escape codes for terminal control\n CLEAR_LINE = '\\033[K'\n MOVE_UP = '\\033[1A'\n SAVE_CURSOR = '\\0337'\n RESTORE_CURSOR = '\\0338'\n CLEAR_REMAINING = '\\033[J'\n\n def write_progress(status: Literal['RUNNING', 'SUCCEED', 'FAILED'] = 'RUNNING',\n new_line: Optional[str] = None):\n \"\"\"Update terminal display with latest output\"\"\"\n nonlocal current_lines, process\n sys.stdout.write(SAVE_CURSOR)\n sys.stdout.write(MOVE_UP * current_lines)\n banner = f\"ext={ext} pid={process.pid} status={status.upper()} elapsed=({time.time()-start:.2f}S)\\n\"\n if status != 'FAILED':\n banner = colored(banner, 'green', bold=True)\n else:\n banner = colored(banner, 'red', bold=True)\n sys.stdout.write(CLEAR_LINE + banner)\n if new_line is not None:\n all_output.append(new_line)\n write_buffer.extend(split_line(ANSI_ESCAPE.sub('', new_line).rstrip()))\n elif status == 'RUNNING':\n sys.stdout.write(RESTORE_CURSOR)\n sys.stdout.flush()\n return\n\n sys.stdout.write(CLEAR_REMAINING)\n if status == 'RUNNING':\n current_lines = 1 + len(write_buffer)\n for text in write_buffer:\n sys.stdout.write(text + '\\n')\n elif status == 'FAILED':\n for text in all_output:\n sys.stdout.write(text)\n sys.stdout.flush()\n\n # Start subprocess\n sys.stdout.write(colored(f'ext={ext} command={\" \".join(str(c) for c in command)}\\n', bold=True))\n sys.stdout.flush()\n process = subprocess.Popen(\n command,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n cwd=cwd,\n text=True,\n bufsize=1\n )\n\n try:\n write_progress()\n poll_obj = select.poll()\n poll_obj.register(process.stdout, select.POLLIN)\n while process.poll() is None:\n poll_result = poll_obj.poll(refresh_rate * 1000)\n if poll_result:\n write_progress(new_line=process.stdout.readline())\n else:\n write_progress()\n\n # Get any remaining output\n while True:\n line = process.stdout.readline()\n if not line:\n break\n write_progress(new_line=line)\n except BaseException as e:\n process.terminate()\n raise e\n finally:\n exit_code = process.wait()\n write_progress(status='SUCCEED' if exit_code == 0 else 'FAILED')", "creation_date": "2024-07-27T08:06:58Z", "repo": "kvcache-ai/ktransformers", "file_path": "setup.py", "stars": 14577, "label": 0} +{"function": "def get_cmake_abi_args(cmake_args):\n if torch.compiled_with_cxx11_abi():\n cmake_args.append(\"-D_GLIBCXX_USE_CXX11_ABI=1\")\n else:\n cmake_args.append(\"-D_GLIBCXX_USE_CXX11_ABI=0\")\n return cmake_args", "creation_date": "2024-07-27T08:06:58Z", "repo": "kvcache-ai/ktransformers", "file_path": "setup.py", "stars": 14577, "label": 0} +{"function": " def get_musa_bare_metal_version(self, musa_dir):\n raw_output = subprocess.run(\n [musa_dir + \"/bin/mcc\", \"-v\"], check=True,\n stdout=subprocess.PIPE, stderr=subprocess.STDOUT).stdout.decode(\"utf-8\")\n output = raw_output.split()\n release_idx = output.index(\"version\") + 1\n bare_metal_version = parse(output[release_idx].split(\",\")[0])\n musa_version = f\"{bare_metal_version.major}{bare_metal_version.minor}\"\n return musa_version", "creation_date": "2024-07-27T08:06:58Z", "repo": "kvcache-ai/ktransformers", "file_path": "setup.py", "stars": 14577, "label": 0} +{"function": " def get_rocm_bare_metal_version(self, rocm_dir):\n \"\"\"\n Get the ROCm version from the ROCm installation directory.\n\n Args:\n rocm_dir: Path to the ROCm installation directory\n\n Returns:\n A string representation of the ROCm version (e.g., \"63\" for ROCm 6.3)\n \"\"\"\n try:\n # Try using rocm_agent_enumerator to get version info\n raw_output = subprocess.check_output(\n [rocm_dir + \"/bin/rocminfo\", \"--version\"],\n universal_newlines=True,\n stderr=subprocess.STDOUT)\n # Extract version number from output\n match = re.search(r'(\\d+\\.\\d+)', raw_output)\n if match:\n version_str = match.group(1)\n version = parse(version_str)\n rocm_version = f\"{version.major}{version.minor}\"\n return rocm_version\n except (subprocess.CalledProcessError, FileNotFoundError):\n # If rocminfo --version fails, try alternative methods\n pass\n\n try:\n # Try reading version from release file\n with open(os.path.join(rocm_dir, \"share/doc/hip/version.txt\"), \"r\") as f:\n version_str = f.read().strip()\n version = parse(version_str)\n rocm_version = f\"{version.major}{version.minor}\"\n return rocm_version\n except (FileNotFoundError, IOError):\n pass\n\n # If all else fails, try to extract from directory name\n dir_name = os.path.basename(os.path.normpath(rocm_dir))\n match = re.search(r'rocm-(\\d+\\.\\d+)', dir_name)\n if match:\n version_str = match.group(1)\n version = parse(version_str)\n rocm_version = f\"{version.major}{version.minor}\"\n return rocm_version\n\n # Fallback to extracting from hipcc version\n try:\n raw_output = subprocess.check_output(\n [rocm_dir + \"/bin/hipcc\", \"--version\"],\n universal_newlines=True,\n stderr=subprocess.STDOUT)\n match = re.search(r'HIP version: (\\d+\\.\\d+)', raw_output)\n if match:\n version_str = match.group(1)\n version = parse(version_str)\n rocm_version = f\"{version.major}{version.minor}\"\n return rocm_version\n except (subprocess.CalledProcessError, FileNotFoundError):\n pass\n\n # If we still can't determine the version, raise an error\n raise ValueError(f\"Could not determine ROCm version from directory: {rocm_dir}\")", "creation_date": "2024-07-27T08:06:58Z", "repo": "kvcache-ai/ktransformers", "file_path": "setup.py", "stars": 14577, "label": 0} +{"function": " def get_cuda_bare_metal_version(self, cuda_dir):\n raw_output = subprocess.check_output(\n [cuda_dir + \"/bin/nvcc\", \"-V\"], universal_newlines=True)\n output = raw_output.split()\n release_idx = output.index(\"release\") + 1\n bare_metal_version = parse(output[release_idx].split(\",\")[0])\n cuda_version = f\"{bare_metal_version.major}{bare_metal_version.minor}\"\n return cuda_version", "creation_date": "2024-07-27T08:06:58Z", "repo": "kvcache-ai/ktransformers", "file_path": "setup.py", "stars": 14577, "label": 0} +{"function": " def get_cuda_version_of_torch(self):\n torch_cuda_version = parse(torch.version.cuda)\n cuda_version = f\"{torch_cuda_version.major}{torch_cuda_version.minor}\"\n return cuda_version", "creation_date": "2024-07-27T08:06:58Z", "repo": "kvcache-ai/ktransformers", "file_path": "setup.py", "stars": 14577, "label": 0} +{"function": "async def test_simple_echo():\n \"\"\"Test the simple echo server\"\"\"\n from examples.simple_echo import mcp\n\n async with Client(mcp) as client:\n result = await client.call_tool_mcp(\"echo\", {\"text\": \"hello\"})\n assert len(result.content) == 1\n assert result.content[0].text == \"hello\" # type: ignore[attr-defined]", "creation_date": "2025-05-03T13:57:23Z", "repo": "jlowin/fastmcp", "file_path": "tests/test_examples.py", "stars": 14416, "label": 0} +{"function": "async def test_complex_inputs():\n \"\"\"Test the complex inputs server\"\"\"\n from examples.complex_inputs import mcp\n\n async with Client(mcp) as client:\n tank = {\"shrimp\": [{\"name\": \"bob\"}, {\"name\": \"alice\"}]}\n result = await client.call_tool_mcp(\n \"name_shrimp\", {\"tank\": tank, \"extra_names\": [\"charlie\"]}\n )\n assert len(result.content) == 1\n assert result.content[0].text == '[\"bob\",\"alice\",\"charlie\"]' # type: ignore[attr-defined]", "creation_date": "2025-05-03T13:57:23Z", "repo": "jlowin/fastmcp", "file_path": "tests/test_examples.py", "stars": 14416, "label": 0} +{"function": "async def test_desktop(monkeypatch):\n \"\"\"Test the desktop server\"\"\"\n from examples.desktop import mcp\n\n async with Client(mcp) as client:\n # Test the add function\n result = await client.call_tool_mcp(\"add\", {\"a\": 1, \"b\": 2})\n assert len(result.content) == 1\n assert result.content[0].text == \"3\" # type: ignore[attr-defined]\n\n async with Client(mcp) as client:\n result = await client.read_resource(AnyUrl(\"greeting://rooter12\"))\n assert len(result) == 1\n assert result[0].text == \"Hello, rooter12!\" # type: ignore[attr-defined]", "creation_date": "2025-05-03T13:57:23Z", "repo": "jlowin/fastmcp", "file_path": "tests/test_examples.py", "stars": 14416, "label": 0} +{"function": "async def test_echo():\n \"\"\"Test the echo server\"\"\"\n from examples.echo import mcp\n\n async with Client(mcp) as client:\n result = await client.call_tool_mcp(\"echo_tool\", {\"text\": \"hello\"})\n assert len(result.content) == 1\n assert result.content[0].text == \"hello\" # type: ignore[attr-defined]\n\n async with Client(mcp) as client:\n result = await client.read_resource(AnyUrl(\"echo://static\"))\n assert len(result) == 1\n assert result[0].text == \"Echo!\" # type: ignore[attr-defined]\n\n async with Client(mcp) as client:\n result = await client.read_resource(AnyUrl(\"echo://server42\"))\n assert len(result) == 1\n assert result[0].text == \"Echo: server42\" # type: ignore[attr-defined]\n\n async with Client(mcp) as client:\n result = await client.get_prompt(\"echo\", {\"text\": \"hello\"})\n assert len(result.messages) == 1\n assert result.messages[0].content.text == \"hello\" # type: ignore[attr-defined]", "creation_date": "2025-05-03T13:57:23Z", "repo": "jlowin/fastmcp", "file_path": "tests/test_examples.py", "stars": 14416, "label": 0} +{"function": " def test_init(self):\n \"\"\"Test that a TimedCache can be initialized with an expiration.\"\"\"\n expiration = datetime.timedelta(seconds=10)\n cache = TimedCache(expiration)\n assert cache.expiration == expiration\n assert isinstance(cache.cache, dict)\n assert len(cache.cache) == 0", "creation_date": "2025-05-09T16:15:47Z", "repo": "jlowin/fastmcp", "file_path": "tests/utilities/test_cache.py", "stars": 14416, "label": 0} +{"function": " def test_set(self):\n \"\"\"Test that values can be set in the cache.\"\"\"\n cache = TimedCache(datetime.timedelta(seconds=10))\n key, value = \"test_key\", \"test_value\"\n\n with patch(\"datetime.datetime\") as mock_datetime:\n now = datetime.datetime(2023, 1, 1, tzinfo=datetime.timezone.utc)\n mock_datetime.now.return_value = now\n\n cache.set(key, value)\n\n # Check that the value is stored with the correct expiration\n assert key in cache.cache\n stored_value, expiration = cache.cache[key]\n assert stored_value == value\n assert expiration == now + datetime.timedelta(seconds=10)", "creation_date": "2025-05-09T16:15:47Z", "repo": "jlowin/fastmcp", "file_path": "tests/utilities/test_cache.py", "stars": 14416, "label": 0} +{"function": " def test_get_found(self):\n \"\"\"Test retrieving a value that exists and has not expired.\"\"\"\n cache = TimedCache(datetime.timedelta(seconds=10))\n key, value = \"test_key\", \"test_value\"\n\n # Set a future expiration time\n future = datetime.datetime.now(datetime.timezone.utc) + datetime.timedelta(\n seconds=30\n )\n cache.cache[key] = (value, future)\n\n # The value should be returned\n assert cache.get(key) == value", "creation_date": "2025-05-09T16:15:47Z", "repo": "jlowin/fastmcp", "file_path": "tests/utilities/test_cache.py", "stars": 14416, "label": 0} +{"function": " def test_get_expired(self):\n \"\"\"Test retrieving a value that exists but has expired.\"\"\"\n cache = TimedCache(datetime.timedelta(seconds=10))\n key, value = \"test_key\", \"test_value\"\n\n # Set a past expiration time\n past = datetime.datetime.now(datetime.timezone.utc) - datetime.timedelta(\n seconds=1\n )\n cache.cache[key] = (value, past)\n\n # Should return NOT_FOUND\n assert cache.get(key) is TimedCache.NOT_FOUND", "creation_date": "2025-05-09T16:15:47Z", "repo": "jlowin/fastmcp", "file_path": "tests/utilities/test_cache.py", "stars": 14416, "label": 0} +{"function": " def test_get_not_found(self):\n \"\"\"Test retrieving a value that doesn't exist in the cache.\"\"\"\n cache = TimedCache(datetime.timedelta(seconds=10))\n\n # Key doesn't exist\n assert cache.get(\"nonexistent_key\") is TimedCache.NOT_FOUND", "creation_date": "2025-05-09T16:15:47Z", "repo": "jlowin/fastmcp", "file_path": "tests/utilities/test_cache.py", "stars": 14416, "label": 0} +{"function": " def test_clear(self):\n \"\"\"Test that the cache can be cleared.\"\"\"\n cache = TimedCache(datetime.timedelta(seconds=10))\n\n # Add some items\n cache.set(\"key1\", \"value1\")\n cache.set(\"key2\", \"value2\")\n assert len(cache.cache) == 2\n\n # Clear the cache\n cache.clear()\n assert len(cache.cache) == 0", "creation_date": "2025-05-09T16:15:47Z", "repo": "jlowin/fastmcp", "file_path": "tests/utilities/test_cache.py", "stars": 14416, "label": 0} +{"function": "def main():\n parser = argparse.ArgumentParser(description=\"Gradio WebUI for Browser Agent\")\n parser.add_argument(\"--ip\", type=str, default=\"127.0.0.1\", help=\"IP address to bind to\")\n parser.add_argument(\"--port\", type=int, default=7788, help=\"Port to listen on\")\n parser.add_argument(\"--theme\", type=str, default=\"Ocean\", choices=theme_map.keys(), help=\"Theme to use for the UI\")\n args = parser.parse_args()\n\n demo = create_ui(theme_name=args.theme)\n demo.queue().launch(server_name=args.ip, server_port=args.port)", "creation_date": "2025-01-08T15:35:09Z", "repo": "browser-use/web-ui", "file_path": "webui.py", "stars": 14100, "label": 0} +{"function": "async def test_browser_use_agent():\n from browser_use.browser.browser import Browser, BrowserConfig\n from browser_use.browser.context import (\n BrowserContextConfig\n )\n from browser_use.agent.service import Agent\n\n from src.browser.custom_browser import CustomBrowser\n from src.controller.custom_controller import CustomController\n from src.utils import llm_provider\n from src.agent.browser_use.browser_use_agent import BrowserUseAgent\n\n llm = llm_provider.get_llm_model(\n provider=\"openai\",\n model_name=\"gpt-4o\",\n temperature=0.8,\n )\n\n # llm = llm_provider.get_llm_model(\n # provider=\"google\",\n # model_name=\"gemini-2.0-flash\",\n # temperature=0.6,\n # api_key=os.getenv(\"GOOGLE_API_KEY\", \"\")\n # )\n\n # llm = utils.get_llm_model(\n # provider=\"deepseek\",\n # model_name=\"deepseek-reasoner\",\n # temperature=0.8\n # )\n\n # llm = utils.get_llm_model(\n # provider=\"deepseek\",\n # model_name=\"deepseek-chat\",\n # temperature=0.8\n # )\n\n # llm = utils.get_llm_model(\n # provider=\"ollama\", model_name=\"qwen2.5:7b\", temperature=0.5\n # )\n\n # llm = utils.get_llm_model(\n # provider=\"ollama\", model_name=\"deepseek-r1:14b\", temperature=0.5\n # )\n\n window_w, window_h = 1280, 1100\n\n # llm = llm_provider.get_llm_model(\n # provider=\"azure_openai\",\n # model_name=\"gpt-4o\",\n # temperature=0.5,\n # base_url=os.getenv(\"AZURE_OPENAI_ENDPOINT\", \"\"),\n # api_key=os.getenv(\"AZURE_OPENAI_API_KEY\", \"\"),\n # )\n\n mcp_server_config = {\n \"mcpServers\": {\n # \"markitdown\": {\n # \"command\": \"docker\",\n # \"args\": [\n # \"run\",\n # \"--rm\",\n # \"-i\",\n # \"markitdown-mcp:latest\"\n # ]\n # },\n \"desktop-commander\": {\n \"command\": \"npx\",\n \"args\": [\n \"-y\",\n \"@wonderwhy-er/desktop-commander\"\n ]\n },\n }\n }\n controller = CustomController()\n await controller.setup_mcp_client(mcp_server_config)\n use_own_browser = True\n use_vision = True # Set to False when using DeepSeek\n\n max_actions_per_step = 10\n browser = None\n browser_context = None\n\n try:\n extra_browser_args = []\n if use_own_browser:\n browser_binary_path = os.getenv(\"BROWSER_PATH\", None)\n if browser_binary_path == \"\":\n browser_binary_path = None\n browser_user_data = os.getenv(\"BROWSER_USER_DATA\", None)\n if browser_user_data:\n extra_browser_args += [f\"--user-data-dir={browser_user_data}\"]\n else:\n browser_binary_path = None\n browser = CustomBrowser(\n config=BrowserConfig(\n headless=False,\n browser_binary_path=browser_binary_path,\n extra_browser_args=extra_browser_args,\n new_context_config=BrowserContextConfig(\n window_width=window_w,\n window_height=window_h,\n )\n )\n )\n browser_context = await browser.new_context(\n config=BrowserContextConfig(\n trace_path=None,\n save_recording_path=None,\n save_downloads_path=\"./tmp/downloads\",\n window_height=window_h,\n window_width=window_w,\n )\n )\n agent = BrowserUseAgent(\n # task=\"download pdf from https://arxiv.org/pdf/2311.16498 and rename this pdf to 'mcp-test.pdf'\",\n task=\"give me nvidia stock price\",\n llm=llm,\n browser=browser,\n browser_context=browser_context,\n controller=controller,\n use_vision=use_vision,\n max_actions_per_step=max_actions_per_step,\n generate_gif=True\n )\n history: AgentHistoryList = await agent.run(max_steps=100)\n\n print(\"Final Result:\")\n pprint(history.final_result(), indent=4)\n\n print(\"\\nErrors:\")\n pprint(history.errors(), indent=4)\n\n except Exception:\n import traceback\n traceback.print_exc()\n finally:\n if browser_context:\n await browser_context.close()\n if browser:\n await browser.close()\n if controller:\n await controller.close_mcp_client()", "creation_date": "2025-04-27T13:21:56Z", "repo": "browser-use/web-ui", "file_path": "tests/test_agents.py", "stars": 14100, "label": 0} +{"function": "async def test_browser_use_parallel():\n from browser_use.browser.browser import Browser, BrowserConfig\n from browser_use.browser.context import (\n BrowserContextConfig,\n )\n from browser_use.agent.service import Agent\n\n from src.browser.custom_browser import CustomBrowser\n from src.controller.custom_controller import CustomController\n from src.utils import llm_provider\n from src.agent.browser_use.browser_use_agent import BrowserUseAgent\n\n # llm = utils.get_llm_model(\n # provider=\"openai\",\n # model_name=\"gpt-4o\",\n # temperature=0.8,\n # base_url=os.getenv(\"OPENAI_ENDPOINT\", \"\"),\n # api_key=os.getenv(\"OPENAI_API_KEY\", \"\"),\n # )\n\n # llm = utils.get_llm_model(\n # provider=\"google\",\n # model_name=\"gemini-2.0-flash\",\n # temperature=0.6,\n # api_key=os.getenv(\"GOOGLE_API_KEY\", \"\")\n # )\n\n # llm = utils.get_llm_model(\n # provider=\"deepseek\",\n # model_name=\"deepseek-reasoner\",\n # temperature=0.8\n # )\n\n # llm = utils.get_llm_model(\n # provider=\"deepseek\",\n # model_name=\"deepseek-chat\",\n # temperature=0.8\n # )\n\n # llm = utils.get_llm_model(\n # provider=\"ollama\", model_name=\"qwen2.5:7b\", temperature=0.5\n # )\n\n # llm = utils.get_llm_model(\n # provider=\"ollama\", model_name=\"deepseek-r1:14b\", temperature=0.5\n # )\n\n window_w, window_h = 1280, 1100\n\n llm = llm_provider.get_llm_model(\n provider=\"azure_openai\",\n model_name=\"gpt-4o\",\n temperature=0.5,\n base_url=os.getenv(\"AZURE_OPENAI_ENDPOINT\", \"\"),\n api_key=os.getenv(\"AZURE_OPENAI_API_KEY\", \"\"),\n )\n\n mcp_server_config = {\n \"mcpServers\": {\n # \"markitdown\": {\n # \"command\": \"docker\",\n # \"args\": [\n # \"run\",\n # \"--rm\",\n # \"-i\",\n # \"markitdown-mcp:latest\"\n # ]\n # },\n \"desktop-commander\": {\n \"command\": \"npx\",\n \"args\": [\n \"-y\",\n \"@wonderwhy-er/desktop-commander\"\n ]\n },\n # \"filesystem\": {\n # \"command\": \"npx\",\n # \"args\": [\n # \"-y\",\n # \"@modelcontextprotocol/server-filesystem\",\n # \"/Users/xxx/ai_workspace\",\n # ]\n # },\n }\n }\n controller = CustomController()\n await controller.setup_mcp_client(mcp_server_config)\n use_own_browser = True\n use_vision = True # Set to False when using DeepSeek\n\n max_actions_per_step = 10\n browser = None\n browser_context = None\n\n try:\n extra_browser_args = []\n if use_own_browser:\n browser_binary_path = os.getenv(\"BROWSER_PATH\", None)\n if browser_binary_path == \"\":\n browser_binary_path = None\n browser_user_data = os.getenv(\"BROWSER_USER_DATA\", None)\n if browser_user_data:\n extra_browser_args += [f\"--user-data-dir={browser_user_data}\"]\n else:\n browser_binary_path = None\n browser = CustomBrowser(\n config=BrowserConfig(\n headless=False,\n browser_binary_path=browser_binary_path,\n extra_browser_args=extra_browser_args,\n new_context_config=BrowserContextConfig(\n window_width=window_w,\n window_height=window_h,\n )\n )\n )\n browser_context = await browser.new_context(\n config=BrowserContextConfig(\n trace_path=None,\n save_recording_path=None,\n save_downloads_path=\"./tmp/downloads\",\n window_height=window_h,\n window_width=window_w,\n force_new_context=True\n )\n )\n agents = [\n BrowserUseAgent(task=task, llm=llm, browser=browser, controller=controller)\n for task in [\n 'Search Google for weather in Tokyo',\n # 'Check Reddit front page title',\n # 'Find NASA image of the day',\n # 'Check top story on CNN',\n # 'Search latest SpaceX launch date',\n # 'Look up population of Paris',\n 'Find current time in Sydney',\n 'Check who won last Super Bowl',\n # 'Search trending topics on Twitter',\n ]\n ]\n\n history = await asyncio.gather(*[agent.run() for agent in agents])\n print(\"Final Result:\")\n pprint(history.final_result(), indent=4)\n\n print(\"\\nErrors:\")\n pprint(history.errors(), indent=4)\n\n pdb.set_trace()\n\n except Exception:\n import traceback\n\n traceback.print_exc()\n finally:\n if browser_context:\n await browser_context.close()\n if browser:\n await browser.close()\n if controller:\n await controller.close_mcp_client()", "creation_date": "2025-04-27T13:21:56Z", "repo": "browser-use/web-ui", "file_path": "tests/test_agents.py", "stars": 14100, "label": 0} +{"function": "async def test_deep_research_agent():\n from src.agent.deep_research.deep_research_agent import DeepResearchAgent, PLAN_FILENAME, REPORT_FILENAME\n from src.utils import llm_provider\n\n llm = llm_provider.get_llm_model(\n provider=\"openai\",\n model_name=\"gpt-4o\",\n temperature=0.5\n )\n\n # llm = llm_provider.get_llm_model(\n # provider=\"bedrock\",\n # )\n\n mcp_server_config = {\n \"mcpServers\": {\n \"desktop-commander\": {\n \"command\": \"npx\",\n \"args\": [\n \"-y\",\n \"@wonderwhy-er/desktop-commander\"\n ]\n },\n }\n }\n\n browser_config = {\"headless\": False, \"window_width\": 1280, \"window_height\": 1100, \"use_own_browser\": False}\n agent = DeepResearchAgent(llm=llm, browser_config=browser_config, mcp_server_config=mcp_server_config)\n research_topic = \"Give me investment advices of nvidia and tesla.\"\n task_id_to_resume = \"\" # Set this to resume a previous task ID\n\n print(f\"Starting research on: {research_topic}\")\n\n try:\n # Call run and wait for the final result dictionary\n result = await agent.run(research_topic,\n task_id=task_id_to_resume,\n save_dir=\"./tmp/deep_research\",\n max_parallel_browsers=1,\n )\n\n print(\"\\n--- Research Process Ended ---\")\n print(f\"Status: {result.get('status')}\")\n print(f\"Message: {result.get('message')}\")\n print(f\"Task ID: {result.get('task_id')}\")\n\n # Check the final state for the report\n final_state = result.get('final_state', {})\n if final_state:\n print(\"\\n--- Final State Summary ---\")\n print(\n f\" Plan Steps Completed: {sum(1 for item in final_state.get('research_plan', []) if item.get('status') == 'completed')}\")\n print(f\" Total Search Results Logged: {len(final_state.get('search_results', []))}\")\n if final_state.get(\"final_report\"):\n print(\" Final Report: Generated (content omitted). You can find it in the output directory.\")\n # print(\"\\n--- Final Report ---\") # Optionally print report\n # print(final_state[\"final_report\"])\n else:\n print(\" Final Report: Not generated.\")\n else:\n print(\"Final state information not available.\")\n\n\n except Exception as e:\n print(f\"\\n--- An unhandled error occurred outside the agent run ---\")\n print(e)", "creation_date": "2025-04-27T13:21:56Z", "repo": "browser-use/web-ui", "file_path": "tests/test_agents.py", "stars": 14100, "label": 0} +{"function": "async def test_mcp_client():\n from src.utils.mcp_client import setup_mcp_client_and_tools, create_tool_param_model\n\n test_server_config = {\n \"mcpServers\": {\n # \"markitdown\": {\n # \"command\": \"docker\",\n # \"args\": [\n # \"run\",\n # \"--rm\",\n # \"-i\",\n # \"markitdown-mcp:latest\"\n # ]\n # },\n \"desktop-commander\": {\n \"command\": \"npx\",\n \"args\": [\n \"-y\",\n \"@wonderwhy-er/desktop-commander\"\n ]\n },\n # \"filesystem\": {\n # \"command\": \"npx\",\n # \"args\": [\n # \"-y\",\n # \"@modelcontextprotocol/server-filesystem\",\n # \"/Users/xxx/ai_workspace\",\n # ]\n # },\n }\n }\n\n mcp_tools, mcp_client = await setup_mcp_client_and_tools(test_server_config)\n\n for tool in mcp_tools:\n tool_param_model = create_tool_param_model(tool)\n print(tool.name)\n print(tool.description)\n print(tool_param_model.model_json_schema())\n pdb.set_trace()", "creation_date": "2025-04-26T15:14:40Z", "repo": "browser-use/web-ui", "file_path": "tests/test_controller.py", "stars": 14100, "label": 0} +{"function": "async def test_controller_with_mcp():\n import os\n from src.controller.custom_controller import CustomController\n from browser_use.controller.registry.views import ActionModel\n\n mcp_server_config = {\n \"mcpServers\": {\n # \"markitdown\": {\n # \"command\": \"docker\",\n # \"args\": [\n # \"run\",\n # \"--rm\",\n # \"-i\",\n # \"markitdown-mcp:latest\"\n # ]\n # },\n \"desktop-commander\": {\n \"command\": \"npx\",\n \"args\": [\n \"-y\",\n \"@wonderwhy-er/desktop-commander\"\n ]\n },\n # \"filesystem\": {\n # \"command\": \"npx\",\n # \"args\": [\n # \"-y\",\n # \"@modelcontextprotocol/server-filesystem\",\n # \"/Users/xxx/ai_workspace\",\n # ]\n # },\n }\n }\n\n controller = CustomController()\n await controller.setup_mcp_client(mcp_server_config)\n action_name = \"mcp.desktop-commander.execute_command\"\n action_info = controller.registry.registry.actions[action_name]\n param_model = action_info.param_model\n print(param_model.model_json_schema())\n params = {\"command\": f\"python ./tmp/test.py\"\n }\n validated_params = param_model(**params)\n ActionModel_ = controller.registry.create_action_model()\n # Create ActionModel instance with the validated parameters\n action_model = ActionModel_(**{action_name: validated_params})\n result = await controller.act(action_model)\n result = result.extracted_content\n print(result)\n if result and \"Command is still running. Use read_output to get more output.\" in result and \"PID\" in \\\n result.split(\"\\n\")[0]:\n pid = int(result.split(\"\\n\")[0].split(\"PID\")[-1].strip())\n action_name = \"mcp.desktop-commander.read_output\"\n action_info = controller.registry.registry.actions[action_name]\n param_model = action_info.param_model\n print(param_model.model_json_schema())\n params = {\"pid\": pid}\n validated_params = param_model(**params)\n action_model = ActionModel_(**{action_name: validated_params})\n output_result = \"\"\n while True:\n time.sleep(1)\n result = await controller.act(action_model)\n result = result.extracted_content\n if result:\n pdb.set_trace()\n output_result = result\n break\n print(output_result)\n pdb.set_trace()\n await controller.close_mcp_client()\n pdb.set_trace()", "creation_date": "2025-04-26T15:14:40Z", "repo": "browser-use/web-ui", "file_path": "tests/test_controller.py", "stars": 14100, "label": 0} +{"function": "def create_message_content(text, image_path=None):\n content = [{\"type\": \"text\", \"text\": text}]\n image_format = \"png\" if image_path and image_path.endswith(\".png\") else \"jpeg\"\n if image_path:\n from src.utils import utils\n image_data = utils.encode_image(image_path)\n content.append({\n \"type\": \"image_url\",\n \"image_url\": {\"url\": f\"data:image/{image_format};base64,{image_data}\"}\n })\n return content", "creation_date": "2025-01-02T01:30:46Z", "repo": "browser-use/web-ui", "file_path": "tests/test_llm_api.py", "stars": 14100, "label": 0} +{"function": "def get_env_value(key, provider):\n env_mappings = {\n \"openai\": {\"api_key\": \"OPENAI_API_KEY\", \"base_url\": \"OPENAI_ENDPOINT\"},\n \"azure_openai\": {\"api_key\": \"AZURE_OPENAI_API_KEY\", \"base_url\": \"AZURE_OPENAI_ENDPOINT\"},\n \"google\": {\"api_key\": \"GOOGLE_API_KEY\"},\n \"deepseek\": {\"api_key\": \"DEEPSEEK_API_KEY\", \"base_url\": \"DEEPSEEK_ENDPOINT\"},\n \"mistral\": {\"api_key\": \"MISTRAL_API_KEY\", \"base_url\": \"MISTRAL_ENDPOINT\"},\n \"alibaba\": {\"api_key\": \"ALIBABA_API_KEY\", \"base_url\": \"ALIBABA_ENDPOINT\"},\n \"moonshot\": {\"api_key\": \"MOONSHOT_API_KEY\", \"base_url\": \"MOONSHOT_ENDPOINT\"},\n \"ibm\": {\"api_key\": \"IBM_API_KEY\", \"base_url\": \"IBM_ENDPOINT\"}\n }\n\n if provider in env_mappings and key in env_mappings[provider]:\n return os.getenv(env_mappings[provider][key], \"\")\n return \"\"", "creation_date": "2025-01-02T01:30:46Z", "repo": "browser-use/web-ui", "file_path": "tests/test_llm_api.py", "stars": 14100, "label": 0} +{"function": "def test_llm(config, query, image_path=None, system_message=None):\n from src.utils import utils, llm_provider\n\n # Special handling for Ollama-based models\n if config.provider == \"ollama\":\n if \"deepseek-r1\" in config.model_name:\n from src.utils.llm_provider import DeepSeekR1ChatOllama\n llm = DeepSeekR1ChatOllama(model=config.model_name)\n else:\n llm = ChatOllama(model=config.model_name)\n\n ai_msg = llm.invoke(query)\n print(ai_msg.content)\n if \"deepseek-r1\" in config.model_name:\n pdb.set_trace()\n return\n\n # For other providers, use the standard configuration\n llm = llm_provider.get_llm_model(\n provider=config.provider,\n model_name=config.model_name,\n temperature=config.temperature,\n base_url=config.base_url or get_env_value(\"base_url\", config.provider),\n api_key=config.api_key or get_env_value(\"api_key\", config.provider)\n )\n\n # Prepare messages for non-Ollama models\n messages = []\n if system_message:\n messages.append(SystemMessage(content=create_message_content(system_message)))\n messages.append(HumanMessage(content=create_message_content(query, image_path)))\n ai_msg = llm.invoke(messages)\n\n # Handle different response types\n if hasattr(ai_msg, \"reasoning_content\"):\n print(ai_msg.reasoning_content)\n print(ai_msg.content)", "creation_date": "2025-01-02T01:30:46Z", "repo": "browser-use/web-ui", "file_path": "tests/test_llm_api.py", "stars": 14100, "label": 0} +{"function": "def test_openai_model():\n config = LLMConfig(provider=\"openai\", model_name=\"gpt-4o\")\n test_llm(config, \"Describe this image\", \"assets/examples/test.png\")", "creation_date": "2025-01-02T01:30:46Z", "repo": "browser-use/web-ui", "file_path": "tests/test_llm_api.py", "stars": 14100, "label": 0} +{"function": "def install_package(*packages):\n subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", *packages])", "creation_date": "2024-08-09T11:19:39Z", "repo": "Huanshere/VideoLingo", "file_path": "install.py", "stars": 13903, "label": 0} +{"function": "def check_nvidia_gpu():\n install_package(\"pynvml\")\n import pynvml\n from translations.translations import translate as t\n initialized = False\n try:\n pynvml.nvmlInit()\n initialized = True\n device_count = pynvml.nvmlDeviceGetCount()\n if device_count > 0:\n print(t(\"Detected NVIDIA GPU(s)\"))\n for i in range(device_count):\n handle = pynvml.nvmlDeviceGetHandleByIndex(i)\n name = pynvml.nvmlDeviceGetName(handle)\n print(f\"GPU {i}: {name}\")\n return True\n else:\n print(t(\"No NVIDIA GPU detected\"))\n return False\n except pynvml.NVMLError:\n print(t(\"No NVIDIA GPU detected or NVIDIA drivers not properly installed\"))\n return False\n finally:\n if initialized:\n pynvml.nvmlShutdown()", "creation_date": "2024-08-09T11:19:39Z", "repo": "Huanshere/VideoLingo", "file_path": "install.py", "stars": 13903, "label": 0} +{"function": "def check_ffmpeg():\n from rich.console import Console\n from rich.panel import Panel\n from translations.translations import translate as t\n console = Console()\n\n try:\n # Check if ffmpeg is installed\n subprocess.run(['ffmpeg', '-version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True)\n console.print(Panel(t(\"\u2705 FFmpeg is already installed\"), style=\"green\"))\n return True\n except (subprocess.CalledProcessError, FileNotFoundError):\n system = platform.system()\n install_cmd = \"\"\n \n if system == \"Windows\":\n install_cmd = \"choco install ffmpeg\"\n extra_note = t(\"Install Chocolatey first (https://chocolatey.org/)\")\n elif system == \"Darwin\":\n install_cmd = \"brew install ffmpeg\"\n extra_note = t(\"Install Homebrew first (https://brew.sh/)\")\n elif system == \"Linux\":\n install_cmd = \"sudo apt install ffmpeg # Ubuntu/Debian\\nsudo yum install ffmpeg # CentOS/RHEL\"\n extra_note = t(\"Use your distribution's package manager\")\n \n console.print(Panel.fit(\n t(\"\u274c FFmpeg not found\\n\\n\") +\n f\"{t('\ud83d\udee0\ufe0f Install using:')}\\n[bold cyan]{install_cmd}[/bold cyan]\\n\\n\" +\n f\"{t('\ud83d\udca1 Note:')}\\n{extra_note}\\n\\n\" +\n f\"{t('\ud83d\udd04 After installing FFmpeg, please run this installer again:')}\\n[bold cyan]python install.py[/bold cyan]\",\n style=\"red\"\n ))\n raise SystemExit(t(\"FFmpeg is required. Please install it and run the installer again.\"))", "creation_date": "2024-08-09T11:19:39Z", "repo": "Huanshere/VideoLingo", "file_path": "install.py", "stars": 13903, "label": 0} +{"function": "def main():\n install_package(\"requests\", \"rich\", \"ruamel.yaml\", \"InquirerPy\")\n from rich.console import Console\n from rich.panel import Panel\n from rich.box import DOUBLE\n from InquirerPy import inquirer\n from translations.translations import translate as t\n from translations.translations import DISPLAY_LANGUAGES\n from core.utils.config_utils import load_key, update_key\n from core.utils.decorator import except_handler\n\n console = Console()\n \n width = max(len(line) for line in ascii_logo.splitlines()) + 4\n welcome_panel = Panel(\n ascii_logo,\n width=width,\n box=DOUBLE,\n title=\"[bold green]\ud83c\udf0f[/bold green]\",\n border_style=\"bright_blue\"\n )\n console.print(welcome_panel)\n # Language selection\n current_language = load_key(\"display_language\")\n # Find the display name for current language code\n current_display = next((k for k, v in DISPLAY_LANGUAGES.items() if v == current_language), \"\ud83c\uddec\ud83c\udde7 English\")\n selected_language = DISPLAY_LANGUAGES[inquirer.select(\n message=\"Select language / \u9009\u62e9\u8bed\u8a00 / \u9078\u64c7\u8a9e\u8a00 / \u8a00\u8a9e\u3092\u9078\u629e / Seleccionar idioma / S\u00e9lectionner la langue / \u0412\u044b\u0431\u0435\u0440\u0438\u0442\u0435 \u044f\u0437\u044b\u043a:\",\n choices=list(DISPLAY_LANGUAGES.keys()),\n default=current_display\n ).execute()]\n update_key(\"display_language\", selected_language)\n\n console.print(Panel.fit(t(\"\ud83d\ude80 Starting Installation\"), style=\"bold magenta\"))\n\n # Configure mirrors\n # add a check to ask user if they want to configure mirrors\n if inquirer.confirm(\n message=t(\"Do you need to auto-configure PyPI mirrors? (Recommended if you have difficulty accessing pypi.org)\"),\n default=True\n ).execute():\n from core.utils.pypi_autochoose import main as choose_mirror\n choose_mirror()\n\n # Detect system and GPU\n has_gpu = platform.system() != 'Darwin' and check_nvidia_gpu()\n if has_gpu:\n console.print(Panel(t(\"\ud83c\udfae NVIDIA GPU detected, installing CUDA version of PyTorch...\"), style=\"cyan\"))\n subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", \"torch==2.0.0\", \"torchaudio==2.0.0\", \"--index-url\", \"https://download.pytorch.org/whl/cu118\"])\n else:\n system_name = \"\ud83c\udf4e MacOS\" if platform.system() == 'Darwin' else \"\ud83d\udcbb No NVIDIA GPU\"\n console.print(Panel(t(f\"{system_name} detected, installing CPU version of PyTorch... Note: it might be slow during whisperX transcription.\"), style=\"cyan\"))\n subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", \"torch==2.1.2\", \"torchaudio==2.1.2\"])\n\n @except_handler(\"Failed to install project\")\n def install_requirements():\n console.print(Panel(t(\"Installing project in editable mode using `pip install -e .`\"), style=\"cyan\"))\n subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", \"-e\", \".\"], env={**os.environ, \"PIP_NO_CACHE_DIR\": \"0\", \"PYTHONIOENCODING\": \"utf-8\"})\n\n @except_handler(\"Failed to install Noto fonts\")\n def install_noto_font():\n # Detect Linux distribution type\n if os.path.exists('/etc/debian_version'):\n # Debian/Ubuntu systems\n cmd = ['sudo', 'apt-get', 'install', '-y', 'fonts-noto']\n pkg_manager = \"apt-get\"\n elif os.path.exists('/etc/redhat-release'):\n # RHEL/CentOS/Fedora systems\n cmd = ['sudo', 'yum', 'install', '-y', 'google-noto*']\n pkg_manager = \"yum\"\n else:\n console.print(\"Warning: Unrecognized Linux distribution, please install Noto fonts manually\", style=\"yellow\")\n return\n\n subprocess.run(cmd, check=True)\n console.print(f\"\u2705 Successfully installed Noto fonts using {pkg_manager}\", style=\"green\")\n\n if platform.system() == 'Linux':\n install_noto_font()\n \n install_requirements()\n check_ffmpeg()\n \n # First panel with installation complete and startup command\n panel1_text = (\n t(\"Installation completed\") + \"\\n\\n\" +\n t(\"Now I will run this command to start the application:\") + \"\\n\" +\n \"[bold]streamlit run st.py[/bold]\\n\" +\n t(\"Note: First startup may take up to 1 minute\")\n )\n console.print(Panel(panel1_text, style=\"bold green\"))\n\n # Second panel with troubleshooting tips\n panel2_text = (\n t(\"If the application fails to start:\") + \"\\n\" +\n \"1. \" + t(\"Check your network connection\") + \"\\n\" +\n \"2. \" + t(\"Re-run the installer: [bold]python install.py[/bold]\")\n )\n console.print(Panel(panel2_text, style=\"yellow\"))\n\n # start the application\n subprocess.Popen([\"streamlit\", \"run\", \"st.py\"])", "creation_date": "2024-08-09T11:19:39Z", "repo": "Huanshere/VideoLingo", "file_path": "install.py", "stars": 13903, "label": 0} +{"function": " def install_requirements():\n console.print(Panel(t(\"Installing project in editable mode using `pip install -e .`\"), style=\"cyan\"))\n subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", \"-e\", \".\"], env={**os.environ, \"PIP_NO_CACHE_DIR\": \"0\", \"PYTHONIOENCODING\": \"utf-8\"})", "creation_date": "2024-08-09T11:19:39Z", "repo": "Huanshere/VideoLingo", "file_path": "install.py", "stars": 13903, "label": 0} +{"function": " def install_noto_font():\n # Detect Linux distribution type\n if os.path.exists('/etc/debian_version'):\n # Debian/Ubuntu systems\n cmd = ['sudo', 'apt-get', 'install', '-y', 'fonts-noto']\n pkg_manager = \"apt-get\"\n elif os.path.exists('/etc/redhat-release'):\n # RHEL/CentOS/Fedora systems\n cmd = ['sudo', 'yum', 'install', '-y', 'google-noto*']\n pkg_manager = \"yum\"\n else:\n console.print(\"Warning: Unrecognized Linux distribution, please install Noto fonts manually\", style=\"yellow\")\n return\n\n subprocess.run(cmd, check=True)\n console.print(f\"\u2705 Successfully installed Noto fonts using {pkg_manager}\", style=\"green\")", "creation_date": "2024-08-09T11:19:39Z", "repo": "Huanshere/VideoLingo", "file_path": "install.py", "stars": 13903, "label": 0} +{"function": "def text_processing_section():\n st.header(t(\"b. Translate and Generate Subtitles\"))\n with st.container(border=True):\n st.markdown(f\"\"\"\n

\n {t(\"This stage includes the following steps:\")}\n

\n 1. {t(\"WhisperX word-level transcription\")}
\n 2. {t(\"Sentence segmentation using NLP and LLM\")}
\n 3. {t(\"Summarization and multi-step translation\")}
\n 4. {t(\"Cutting and aligning long subtitles\")}
\n 5. {t(\"Generating timeline and subtitles\")}
\n 6. {t(\"Merging subtitles into the video\")}\n \"\"\", unsafe_allow_html=True)\n\n if not os.path.exists(SUB_VIDEO):\n if st.button(t(\"Start Processing Subtitles\"), key=\"text_processing_button\"):\n process_text()\n st.rerun()\n else:\n if load_key(\"burn_subtitles\"):\n st.video(SUB_VIDEO)\n download_subtitle_zip_button(text=t(\"Download All Srt Files\"))\n \n if st.button(t(\"Archive to 'history'\"), key=\"cleanup_in_text_processing\"):\n cleanup()\n st.rerun()\n return True", "creation_date": "2024-08-09T11:19:39Z", "repo": "Huanshere/VideoLingo", "file_path": "st.py", "stars": 13903, "label": 0} +{"function": "def process_text():\n with st.spinner(t(\"Using Whisper for transcription...\")):\n _2_asr.transcribe()\n with st.spinner(t(\"Splitting long sentences...\")): \n _3_1_split_nlp.split_by_spacy()\n _3_2_split_meaning.split_sentences_by_meaning()\n with st.spinner(t(\"Summarizing and translating...\")):\n _4_1_summarize.get_summary()\n if load_key(\"pause_before_translate\"):\n input(t(\"\u26a0\ufe0f PAUSE_BEFORE_TRANSLATE. Go to `output/log/terminology.json` to edit terminology. Then press ENTER to continue...\"))\n _4_2_translate.translate_all()\n with st.spinner(t(\"Processing and aligning subtitles...\")): \n _5_split_sub.split_for_sub_main()\n _6_gen_sub.align_timestamp_main()\n with st.spinner(t(\"Merging subtitles to video...\")):\n _7_sub_into_vid.merge_subtitles_to_video()\n \n st.success(t(\"Subtitle processing complete! \ud83c\udf89\"))\n st.balloons()", "creation_date": "2024-08-09T11:19:39Z", "repo": "Huanshere/VideoLingo", "file_path": "st.py", "stars": 13903, "label": 0} +{"function": "def audio_processing_section():\n st.header(t(\"c. Dubbing\"))\n with st.container(border=True):\n st.markdown(f\"\"\"\n

\n {t(\"This stage includes the following steps:\")}\n

\n 1. {t(\"Generate audio tasks and chunks\")}
\n 2. {t(\"Extract reference audio\")}
\n 3. {t(\"Generate and merge audio files\")}
\n 4. {t(\"Merge final audio into video\")}\n \"\"\", unsafe_allow_html=True)\n if not os.path.exists(DUB_VIDEO):\n if st.button(t(\"Start Audio Processing\"), key=\"audio_processing_button\"):\n process_audio()\n st.rerun()\n else:\n st.success(t(\"Audio processing is complete! You can check the audio files in the `output` folder.\"))\n if load_key(\"burn_subtitles\"):\n st.video(DUB_VIDEO) \n if st.button(t(\"Delete dubbing files\"), key=\"delete_dubbing_files\"):\n delete_dubbing_files()\n st.rerun()\n if st.button(t(\"Archive to 'history'\"), key=\"cleanup_in_audio_processing\"):\n cleanup()\n st.rerun()", "creation_date": "2024-08-09T11:19:39Z", "repo": "Huanshere/VideoLingo", "file_path": "st.py", "stars": 13903, "label": 0} +{"function": "def process_audio():\n with st.spinner(t(\"Generate audio tasks\")): \n _8_1_audio_task.gen_audio_task_main()\n _8_2_dub_chunks.gen_dub_chunks()\n with st.spinner(t(\"Extract refer audio\")):\n _9_refer_audio.extract_refer_audio_main()\n with st.spinner(t(\"Generate all audio\")):\n _10_gen_audio.gen_audio()\n with st.spinner(t(\"Merge full audio\")):\n _11_merge_audio.merge_full_audio()\n with st.spinner(t(\"Merge dubbing to the video\")):\n _12_dub_to_vid.merge_video_audio()\n \n st.success(t(\"Audio processing complete! \ud83c\udf87\"))\n st.balloons()", "creation_date": "2024-08-09T11:19:39Z", "repo": "Huanshere/VideoLingo", "file_path": "st.py", "stars": 13903, "label": 0} +{"function": "async def run() -> None:\n file_name = \"skyvern_screenshot.png\"\n png_file_path = f\"{get_skyvern_temp_dir()}/{file_name}\"\n\n while True:\n # run subprocess to take screenshot\n subprocess.run(\n f\"xwd -root | xwdtopnm 2>/dev/null | pnmtopng > {png_file_path}\", shell=True, env={\"DISPLAY\": \":99\"}\n )\n\n # FIXME: upload screenshot to S3 with correct organization id\n try:\n await app.STORAGE.save_streaming_file(\"placeholder_org\", file_name)\n except Exception:\n LOG.info(\"Failed to save screenshot\")\n\n await asyncio.sleep(INTERVAL)", "creation_date": "2024-08-12T16:36:24Z", "repo": "Skyvern-AI/skyvern", "file_path": "run_streaming.py", "stars": 13810, "label": 0} +{"function": "def main() -> None:\n asyncio.run(run())", "creation_date": "2024-08-12T16:36:24Z", "repo": "Skyvern-AI/skyvern", "file_path": "run_streaming.py", "stars": 13810, "label": 0} +{"function": "async def test_openrouter_basic_completion(monkeypatch):\n settings = Settings(\n ENABLE_OPENROUTER=True,\n OPENROUTER_API_KEY=\"key\",\n OPENROUTER_MODEL=\"test-model\",\n LLM_KEY=\"OPENROUTER\",\n )\n SettingsManager.set_settings(settings)\n importlib.reload(config_registry)\n\n monkeypatch.setattr(app, \"ARTIFACT_MANAGER\", DummyArtifactManager())\n\n async_mock = AsyncMock(return_value=DummyResponse('{\"result\": \"ok\"}'))\n monkeypatch.setattr(api_handler_factory.litellm, \"acompletion\", async_mock)\n\n handler = api_handler_factory.LLMAPIHandlerFactory.get_llm_api_handler(\"OPENROUTER\")\n result = await handler(\"hi\", \"test\")\n assert result == {\"result\": \"ok\"}\n async_mock.assert_called_once()", "creation_date": "2025-07-01T18:02:22Z", "repo": "Skyvern-AI/skyvern", "file_path": "tests/unit_tests/test_openrouter_integration.py", "stars": 13810, "label": 0} +{"function": "async def test_openrouter_dynamic_model(monkeypatch):\n settings = Settings(\n ENABLE_OPENROUTER=True,\n OPENROUTER_API_KEY=\"key\",\n OPENROUTER_MODEL=\"base-model\",\n LLM_KEY=\"OPENROUTER\",\n )\n SettingsManager.set_settings(settings)\n importlib.reload(config_registry)\n\n monkeypatch.setattr(app, \"ARTIFACT_MANAGER\", DummyArtifactManager())\n async_mock = AsyncMock(return_value=DummyResponse('{\"status\": \"ok\"}'))\n monkeypatch.setattr(api_handler_factory.litellm, \"acompletion\", async_mock)\n\n base_handler = api_handler_factory.LLMAPIHandlerFactory.get_llm_api_handler(\"OPENROUTER\")\n override_handler = api_handler_factory.LLMAPIHandlerFactory.get_override_llm_api_handler(\n \"openrouter/other-model\", default=base_handler\n )\n result = await override_handler(\"hi\", \"test\")\n assert result == {\"status\": \"ok\"}\n called_model = async_mock.call_args.kwargs.get(\"model\")\n assert called_model == \"openrouter/other-model\"", "creation_date": "2025-07-01T18:02:22Z", "repo": "Skyvern-AI/skyvern", "file_path": "tests/unit_tests/test_openrouter_integration.py", "stars": 13810, "label": 0} +{"function": "async def test_openrouter_error_propagation(monkeypatch):\n class DummyAPIError(Exception):\n pass\n\n settings = Settings(\n ENABLE_OPENROUTER=True,\n OPENROUTER_API_KEY=\"key\",\n OPENROUTER_MODEL=\"test-model\",\n LLM_KEY=\"OPENROUTER\",\n )\n SettingsManager.set_settings(settings)\n importlib.reload(config_registry)\n\n monkeypatch.setattr(app, \"ARTIFACT_MANAGER\", DummyArtifactManager())\n\n async def _raise(*args, **kwargs):\n raise DummyAPIError()\n\n fake_litellm = types.SimpleNamespace(\n acompletion=_raise,\n exceptions=types.SimpleNamespace(APIError=DummyAPIError),\n )\n monkeypatch.setattr(api_handler_factory, \"litellm\", fake_litellm)\n\n handler = api_handler_factory.LLMAPIHandlerFactory.get_llm_api_handler(\"OPENROUTER\")\n with pytest.raises(api_handler_factory.LLMProviderErrorRetryableTask):\n await handler(\"hi\", \"test\")", "creation_date": "2025-07-01T18:02:22Z", "repo": "Skyvern-AI/skyvern", "file_path": "tests/unit_tests/test_openrouter_integration.py", "stars": 13810, "label": 0} +{"function": " def __init__(self, content: str):\n super().__init__({\"choices\": [{\"message\": {\"content\": content}}], \"usage\": {}})\n self.choices = [types.SimpleNamespace(message=types.SimpleNamespace(content=content))]", "creation_date": "2025-07-01T18:02:22Z", "repo": "Skyvern-AI/skyvern", "file_path": "tests/unit_tests/test_openrouter_integration.py", "stars": 13810, "label": 0} +{"function": " def model_dump_json(self, indent: int = 2):\n return json.dumps(self, indent=indent)", "creation_date": "2025-07-01T18:02:22Z", "repo": "Skyvern-AI/skyvern", "file_path": "tests/unit_tests/test_openrouter_integration.py", "stars": 13810, "label": 0} +{"function": " async def create_llm_artifact(self, *args, **kwargs):\n return None", "creation_date": "2025-07-01T18:02:22Z", "repo": "Skyvern-AI/skyvern", "file_path": "tests/unit_tests/test_openrouter_integration.py", "stars": 13810, "label": 0} +{"function": " async def _raise(*args, **kwargs):\n raise DummyAPIError()", "creation_date": "2025-07-01T18:02:22Z", "repo": "Skyvern-AI/skyvern", "file_path": "tests/unit_tests/test_openrouter_integration.py", "stars": 13810, "label": 0} +{"function": "def test_encode_url_basic():\n \"\"\"Test basic URL encoding with simple path\"\"\"\n url = \"https://example.com/path with spaces\"\n expected = \"https://example.com/path%20with%20spaces\"\n assert encode_url(url) == expected", "creation_date": "2025-05-29T19:55:48Z", "repo": "Skyvern-AI/skyvern", "file_path": "tests/unit_tests/test_url_validators.py", "stars": 13810, "label": 0} +{"function": "def load_llama3_tokenizer():\n \"\"\"\n https://github.com/huggingface/transformers/issues/22794#issuecomment-2092623992\n \"\"\"\n tokenizer_name = \"meta-llama/Llama-3.2-1B\"\n tokenizer = AutoTokenizer.from_pretrained(tokenizer_name)\n bos = tokenizer.bos_token\n eos = tokenizer.eos_token\n tokenizer._tokenizer.post_processor = TemplateProcessing(\n single=f\"{bos}:0 $A:0 {eos}:0\",\n pair=f\"{bos}:0 $A:0 {eos}:0 {bos}:1 $B:1 {eos}:1\",\n special_tokens=[(f\"{bos}\", tokenizer.bos_token_id), (f\"{eos}\", tokenizer.eos_token_id)],\n )\n\n return tokenizer", "creation_date": "2025-03-13T20:25:26Z", "repo": "SesameAILabs/csm", "file_path": "generator.py", "stars": 13729, "label": 0} +{"function": "def load_csm_1b(device: str = \"cuda\") -> Generator:\n model = Model.from_pretrained(\"sesame/csm-1b\")\n model.to(device=device, dtype=torch.bfloat16)\n\n generator = Generator(model)\n return generator", "creation_date": "2025-03-13T20:25:26Z", "repo": "SesameAILabs/csm", "file_path": "generator.py", "stars": 13729, "label": 0} +{"function": " def __init__(\n self,\n model: Model,\n ):\n self._model = model\n self._model.setup_caches(1)\n\n self._text_tokenizer = load_llama3_tokenizer()\n\n device = next(model.parameters()).device\n mimi_weight = hf_hub_download(loaders.DEFAULT_REPO, loaders.MIMI_NAME)\n mimi = loaders.get_mimi(mimi_weight, device=device)\n mimi.set_num_codebooks(32)\n self._audio_tokenizer = mimi\n\n self._watermarker = load_watermarker(device=device)\n\n self.sample_rate = mimi.sample_rate\n self.device = device", "creation_date": "2025-03-13T20:25:26Z", "repo": "SesameAILabs/csm", "file_path": "generator.py", "stars": 13729, "label": 0} +{"function": " def _tokenize_text_segment(self, text: str, speaker: int) -> Tuple[torch.Tensor, torch.Tensor]:\n frame_tokens = []\n frame_masks = []\n\n text_tokens = self._text_tokenizer.encode(f\"[{speaker}]{text}\")\n text_frame = torch.zeros(len(text_tokens), 33).long()\n text_frame_mask = torch.zeros(len(text_tokens), 33).bool()\n text_frame[:, -1] = torch.tensor(text_tokens)\n text_frame_mask[:, -1] = True\n\n frame_tokens.append(text_frame.to(self.device))\n frame_masks.append(text_frame_mask.to(self.device))\n\n return torch.cat(frame_tokens, dim=0), torch.cat(frame_masks, dim=0)", "creation_date": "2025-03-13T20:25:26Z", "repo": "SesameAILabs/csm", "file_path": "generator.py", "stars": 13729, "label": 0} +{"function": " def _tokenize_audio(self, audio: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:\n assert audio.ndim == 1, \"Audio must be single channel\"\n\n frame_tokens = []\n frame_masks = []\n\n # (K, T)\n audio = audio.to(self.device)\n audio_tokens = self._audio_tokenizer.encode(audio.unsqueeze(0).unsqueeze(0))[0]\n # add EOS frame\n eos_frame = torch.zeros(audio_tokens.size(0), 1).to(self.device)\n audio_tokens = torch.cat([audio_tokens, eos_frame], dim=1)\n\n audio_frame = torch.zeros(audio_tokens.size(1), 33).long().to(self.device)\n audio_frame_mask = torch.zeros(audio_tokens.size(1), 33).bool().to(self.device)\n audio_frame[:, :-1] = audio_tokens.transpose(0, 1)\n audio_frame_mask[:, :-1] = True\n\n frame_tokens.append(audio_frame)\n frame_masks.append(audio_frame_mask)\n\n return torch.cat(frame_tokens, dim=0), torch.cat(frame_masks, dim=0)", "creation_date": "2025-03-13T20:25:26Z", "repo": "SesameAILabs/csm", "file_path": "generator.py", "stars": 13729, "label": 0} +{"function": " def _tokenize_segment(self, segment: Segment) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Returns:\n (seq_len, 33), (seq_len, 33)\n \"\"\"\n text_tokens, text_masks = self._tokenize_text_segment(segment.text, segment.speaker)\n audio_tokens, audio_masks = self._tokenize_audio(segment.audio)\n\n return torch.cat([text_tokens, audio_tokens], dim=0), torch.cat([text_masks, audio_masks], dim=0)", "creation_date": "2025-03-13T20:25:26Z", "repo": "SesameAILabs/csm", "file_path": "generator.py", "stars": 13729, "label": 0} +{"function": " def generate(\n self,\n text: str,\n speaker: int,\n context: List[Segment],\n max_audio_length_ms: float = 90_000,\n temperature: float = 0.9,\n topk: int = 50,\n ) -> torch.Tensor:\n self._model.reset_caches()\n\n max_generation_len = int(max_audio_length_ms / 80)\n tokens, tokens_mask = [], []\n for segment in context:\n segment_tokens, segment_tokens_mask = self._tokenize_segment(segment)\n tokens.append(segment_tokens)\n tokens_mask.append(segment_tokens_mask)\n\n gen_segment_tokens, gen_segment_tokens_mask = self._tokenize_text_segment(text, speaker)\n tokens.append(gen_segment_tokens)\n tokens_mask.append(gen_segment_tokens_mask)\n\n prompt_tokens = torch.cat(tokens, dim=0).long().to(self.device)\n prompt_tokens_mask = torch.cat(tokens_mask, dim=0).bool().to(self.device)\n\n samples = []\n curr_tokens = prompt_tokens.unsqueeze(0)\n curr_tokens_mask = prompt_tokens_mask.unsqueeze(0)\n curr_pos = torch.arange(0, prompt_tokens.size(0)).unsqueeze(0).long().to(self.device)\n\n max_seq_len = 2048\n max_context_len = max_seq_len - max_generation_len\n if curr_tokens.size(1) >= max_context_len:\n raise ValueError(\n f\"Inputs too long, must be below max_seq_len - max_generation_len: {max_context_len}\"\n )\n\n for _ in range(max_generation_len):\n sample = self._model.generate_frame(curr_tokens, curr_tokens_mask, curr_pos, temperature, topk)\n if torch.all(sample == 0):\n break # eos\n\n samples.append(sample)\n\n curr_tokens = torch.cat([sample, torch.zeros(1, 1).long().to(self.device)], dim=1).unsqueeze(1)\n curr_tokens_mask = torch.cat(\n [torch.ones_like(sample).bool(), torch.zeros(1, 1).bool().to(self.device)], dim=1\n ).unsqueeze(1)\n curr_pos = curr_pos[:, -1:] + 1\n\n audio = self._audio_tokenizer.decode(torch.stack(samples).permute(1, 2, 0)).squeeze(0).squeeze(0)\n\n # This applies an imperceptible watermark to identify audio as AI-generated.\n # Watermarking ensures transparency, dissuades misuse, and enables traceability.\n # Please be a responsible AI citizen and keep the watermarking in place.\n # If using CSM 1B in another application, use your own private key and keep it secret.\n audio, wm_sample_rate = watermark(self._watermarker, audio, self.sample_rate, CSM_1B_GH_WATERMARK)\n audio = torchaudio.functional.resample(audio, orig_freq=wm_sample_rate, new_freq=self.sample_rate)\n\n return audio", "creation_date": "2025-03-13T20:25:26Z", "repo": "SesameAILabs/csm", "file_path": "generator.py", "stars": 13729, "label": 0} +{"function": "def llama3_2_1B() -> torchtune.modules.transformer.TransformerDecoder:\n return llama3_2.llama3_2(\n vocab_size=128_256,\n num_layers=16,\n num_heads=32,\n num_kv_heads=8,\n embed_dim=2048,\n max_seq_len=2048,\n intermediate_dim=8192,\n attn_dropout=0.0,\n norm_eps=1e-5,\n rope_base=500_000,\n scale_factor=32,\n )", "creation_date": "2025-03-13T20:25:26Z", "repo": "SesameAILabs/csm", "file_path": "models.py", "stars": 13729, "label": 0} +{"function": "def llama3_2_100M() -> torchtune.modules.transformer.TransformerDecoder:\n return llama3_2.llama3_2(\n vocab_size=128_256,\n num_layers=4,\n num_heads=8,\n num_kv_heads=2,\n embed_dim=1024,\n max_seq_len=2048,\n intermediate_dim=8192,\n attn_dropout=0.0,\n norm_eps=1e-5,\n rope_base=500_000,\n scale_factor=32,\n )", "creation_date": "2025-03-13T20:25:26Z", "repo": "SesameAILabs/csm", "file_path": "models.py", "stars": 13729, "label": 0} +{"function": "def _prepare_transformer(model):\n embed_dim = model.tok_embeddings.embedding_dim\n model.tok_embeddings = nn.Identity()\n model.output = nn.Identity()\n return model, embed_dim", "creation_date": "2025-03-13T20:25:26Z", "repo": "SesameAILabs/csm", "file_path": "models.py", "stars": 13729, "label": 0} +{"function": "def send_request(ques):\n # url = 'https://qanything-test.site.youdao.com/api/local_doc_qa/local_doc_chat'\n url = 'http://localhost:8777/api/local_doc_qa/local_doc_chat'\n headers = {\n 'content-type': 'application/json'\n }\n data = {\n \"user_id\": \"liujx_265\",\n \"kb_ids\": [\"KBf652e9e379c546f1894597dcabdc8e47\"],\n \"question\": ques,\n \"rerank\": False,\n \"history\": []\n }\n try:\n start_time = time.time()\n response = requests.post(url=url, headers=headers, json=data, timeout=60)\n end_time = time.time()\n response_times.append(end_time - start_time)\n res = response.json()\n print(res['response'])\n print(f\"\u54cd\u5e94\u72b6\u6001\u7801: {response.status_code}, \u54cd\u5e94\u65f6\u95f4: {end_time - start_time}\u79d2\")\n except Exception as e:\n print(f\"\u8bf7\u6c42\u53d1\u9001\u5931\u8d25: {e}\")", "creation_date": "2024-01-03T08:16:26Z", "repo": "netease-youdao/QAnything", "file_path": "scripts/local_chat_qa.py", "stars": 13384, "label": 0} +{"function": "def write_to_file_safe(file_name, data):\n # \u83b7\u53d6\u9501\n with lock:\n with open(file_name, 'a') as f:\n f.write(data + '\\n')", "creation_date": "2024-01-03T08:16:26Z", "repo": "netease-youdao/QAnything", "file_path": "scripts/multi_local_chat_qa.py", "stars": 13384, "label": 0} +{"function": "def stream_requests(ques, output_file):\n base_url = \"http://0.0.0.0:8777\"\n URL = base_url + \"/api/local_doc_qa/local_doc_chat\" #\u6d41\u5f0f\n data = {\n \"kb_ids\": [\n \"KBf46828db208c4289a120a34f0fc96147\",\n \"KBc2440f13e98f4736b5ef81cfaebef3a9\",\n \"KBb78af28c73f74fb4ae6ad44b3c53302f\",\n \"KB6c2b097d83be430ab809e361fa8dcc8b\",\n \"KB69331d593f5b4b5bb555a0ea1b145e5b\",\n \"KB3cdc79f8c8d24a14bffd27e6570c33da\"\n ],\n \"question\": ques,\n \"user_id\": \"liujx_265\",\n \"streaming\": False,\n \"rerank\": True,\n \"history\": []\n }\n response = requests.post(\n URL,\n json=data,\n timeout=60,\n stream=True\n )\n print(\"response\", response)\n print(response.iter_lines)\n for line in response.iter_lines(decode_unicode=False, delimiter=b\"\\n\\n\"):\n # for line in response.iter_lines():\n if line:\n yield line", "creation_date": "2024-01-03T08:16:26Z", "repo": "netease-youdao/QAnything", "file_path": "scripts/multi_local_chat_qa.py", "stars": 13384, "label": 0} +{"function": "def no_stream_requests(ques, output_file):\n url = 'https://qanything-local-test-265.site.youdao.com/api/local_doc_qa/local_doc_chat'\n headers = {'content-type': 'application/json'}\n data = {\n \"kb_ids\": [\n \"KBf46828db208c4289a120a34f0fc96147\",\n \"KBc2440f13e98f4736b5ef81cfaebef3a9\",\n \"KBb78af28c73f74fb4ae6ad44b3c53302f\",\n \"KB6c2b097d83be430ab809e361fa8dcc8b\",\n \"KB69331d593f5b4b5bb555a0ea1b145e5b\",\n \"KB3cdc79f8c8d24a14bffd27e6570c33da\"\n ],\n \"question\": ques,\n \"user_id\": \"liujx_265\",\n \"streaming\": False,\n \"rerank\": True,\n \"history\": []\n }\n try:\n response = requests.post(url=url, headers=headers, json=data, timeout=60)\n res = response.json()\n res = data['question'] + '::' + res['response']\n print(res)\n write_to_file_safe(output_file, res)\n except Exception as e:\n print(f\"\u8bf7\u6c42\u53d1\u9001\u5931\u8d25: {e}\")", "creation_date": "2024-01-03T08:16:26Z", "repo": "netease-youdao/QAnything", "file_path": "scripts/multi_local_chat_qa.py", "stars": 13384, "label": 0} +{"function": "def test_stream():\n data_raw = {\n \"kb_ids\": [\n \"KBf46828db208c4289a120a34f0fc96147\",\n \"KBc2440f13e98f4736b5ef81cfaebef3a9\",\n \"KBb78af28c73f74fb4ae6ad44b3c53302f\",\n \"KB6c2b097d83be430ab809e361fa8dcc8b\",\n \"KB69331d593f5b4b5bb555a0ea1b145e5b\",\n \"KB3cdc79f8c8d24a14bffd27e6570c33da\"\n ],\n \"question\": \"\u897f\u5357\u4ea4\u901a\u5927\u5b66\u662f211\u9662\u6821\u5417\",\n \"user_id\": \"liujx_265\",\n \"streaming\": True,\n \"rerank\": True,\n \"history\": []\n }\n for i, chunk in enumerate(stream_requests(data_raw)):\n if chunk:\n chunkstr = chunk.decode(\"utf-8\")[6:]\n chunkjs = json.loads(chunkstr)\n print(chunkjs)", "creation_date": "2024-01-03T08:16:26Z", "repo": "netease-youdao/QAnything", "file_path": "scripts/multi_local_chat_qa.py", "stars": 13384, "label": 0} +{"function": "def test():\n data_raw = {\n \"kb_ids\": [\n \"KBf46828db208c4289a120a34f0fc96147\",\n \"KBc2440f13e98f4736b5ef81cfaebef3a9\",\n \"KBb78af28c73f74fb4ae6ad44b3c53302f\",\n \"KB6c2b097d83be430ab809e361fa8dcc8b\",\n \"KB69331d593f5b4b5bb555a0ea1b145e5b\",\n \"KB3cdc79f8c8d24a14bffd27e6570c33da\"\n ],\n \"question\": \"\u897f\u5357\u4ea4\u901a\u5927\u5b66\u662f211\u9662\u6821\u5417\",\n \"user_id\": \"liujx_265\",\n \"rerank\": True,\n \"history\": []\n }\n print(type(no_stream_requests))\n no_stream_requests(data_raw) ", "creation_date": "2024-01-03T08:16:26Z", "repo": "netease-youdao/QAnything", "file_path": "scripts/multi_local_chat_qa.py", "stars": 13384, "label": 0} +{"function": "def measure_latency(ques, output_file, is_stream=False):\n start_time = time.time()\n if is_stream:\n _ = list(stream_requests(ques, output_file))\n else:\n no_stream_requests(ques, output_file)\n end_time = time.time()\n return end_time - start_time", "creation_date": "2024-01-03T08:16:26Z", "repo": "netease-youdao/QAnything", "file_path": "scripts/multi_local_chat_qa.py", "stars": 13384, "label": 0} +{"function": "def perform_load_test(concurrency, total_requests, questions, output_file, is_stream=False):\n latencies = []\n questions = [\"\u4ec0\u4e48\u662f\u4e09\u5927\u4e13\u9879\", \"\u6c5f\u82cf\u9ad8\u4e09\u7269\u751f\u5730\uff0c\u519b\u6821\u80fd\u4e0d\u80fd\u62a5\uff0c\u54ea\u4e9b\u4e13\u4e1a\u4e0d\u80fd\u62a5\", \"\u5c71\u4e1c\u6587\u79d1\u5728\u6c5f\u82cf\u600e\u4e48\u9009\u5b66\u6821\", \"\u4e1c\u5357\u5927\u5b66\u5316\u5b66\u5de5\u7a0b\u4e0e\u5de5\u827a\uff0c\u751f\u7269\u79d1\u5b66\uff0c\u5236\u836f\u5de5\u7a0b\u5206\u6d41\u54ea\u4e2a\u597d\uff1f\", \"\u7537\u751f\u9ad8\u4e09\u7269\u5316\u5730\uff0c\u8fbd\u5b81\uff0c\u5b66\u65e5\u8bed\u597d\u9009\u5b66\u6821\u5417\"]\n #questions = [\"\u4ec0\u4e48\u662f\u4e09\u5927\u4e13\u9879\"] * 5\n with concurrent.futures.ThreadPoolExecutor(max_workers=concurrency) as executor:\n future_to_request = {executor.submit(measure_latency, random.choice(questions), output_file, is_stream): i for i in range(total_requests)}\n for future in concurrent.futures.as_completed(future_to_request):\n try:\n latency = future.result()\n latencies.append(latency)\n except Exception as e:\n print(f\"\u8bf7\u6c42\u6267\u884c\u5f02\u5e38: {e}\")\n\n # \u8ba1\u7b97\u7edf\u8ba1\u6570\u636e\n p99 = np.percentile(latencies, 99)\n p95 = np.percentile(latencies, 95)\n total_time = sum(latencies)\n qps = total_requests / total_time\n\n return latencies, p99, p95, qps", "creation_date": "2024-01-03T08:16:26Z", "repo": "netease-youdao/QAnything", "file_path": "scripts/multi_local_chat_qa.py", "stars": 13384, "label": 0} +{"function": "def remove_full_width_characters(s):\n # \u5339\u914d\u5168\u89d2\u5b57\u7b26\u7684\u6b63\u5219\u8868\u8fbe\u5f0f\n pattern = re.compile(r'[\\uFF00-\\uFFEF]')\n # \u66ff\u6362\u5b57\u7b26\u4e32\u4e2d\u7684\u5168\u89d2\u5b57\u7b26\u4e3a\u7a7a\u5b57\u7b26\u4e32\n return pattern.sub('', s)", "creation_date": "2024-01-03T08:16:26Z", "repo": "netease-youdao/QAnything", "file_path": "scripts/multi_upload_files.py", "stars": 13384, "label": 0} +{"function": "async def send_request(round_, files):\n print(len(files))\n url = 'http://0.0.0.0:8777/api/local_doc_qa/upload_files'\n data = aiohttp.FormData()\n data.add_field('user_id', 'default')\n data.add_field('kb_id', kb_id)\n data.add_field('mode', 'soft')\n\n total_size = 0\n for file_path in files:\n # print(file_path)\n file_size = os.path.getsize(file_path)\n total_size += file_size\n data.add_field('files', open(file_path, 'rb'))\n print('size:', total_size / (1024 * 1024))\n for _ in range(1):\n try:\n start_time = time.time()\n async with aiohttp.ClientSession(timeout=timeout) as session:\n async with session.post(url, data=data) as response:\n end_time = time.time()\n response_times.append(end_time - start_time)\n print(f\"round_:{round_}, \u54cd\u5e94\u72b6\u6001\u7801: {response.status}, \u54cd\u5e94\u65f6\u95f4: {end_time - start_time}\u79d2\")\n if response.status != 200:\n continue\n except Exception as e:\n print(f\"\u8bf7\u6c42\u53d1\u9001\u5931\u8d25: {e}\")", "creation_date": "2024-01-03T08:16:26Z", "repo": "netease-youdao/QAnything", "file_path": "scripts/multi_upload_files.py", "stars": 13384, "label": 0} +{"function": " def testExtractText(self):\n local_pdf_path = os.path.join(os.path.dirname(__file__), \"gnarly_pdfs\", \"some_ocr1.pdf\")\n reader = PdfReader(local_pdf_path)\n page = reader.pages[0]\n\n def visitor_body(text, cm, tm, font_dict, font_size):\n print(repr(text), cm, tm, font_size)\n\n def visitor_op(op, args, cm, tm):\n # print(op, args, cm, tm)\n pass\n\n page.extract_text(visitor_text=visitor_body, visitor_operand_before=visitor_op)", "creation_date": "2024-10-01T22:10:58Z", "repo": "allenai/olmocr", "file_path": "tests/test_anchor.py", "stars": 13204, "label": 0} +{"function": " def testAnchorBase(self):\n local_pdf_path = os.path.join(os.path.dirname(__file__), \"gnarly_pdfs\", \"pdftotext_two_column_issue.pdf\")\n\n report = _pdf_report(local_pdf_path, 2)\n\n print(report)\n\n print(get_anchor_text(local_pdf_path, 2, pdf_engine=\"pdfreport\"))", "creation_date": "2024-10-01T22:10:58Z", "repo": "allenai/olmocr", "file_path": "tests/test_anchor.py", "stars": 13204, "label": 0} +{"function": " def testAnchorImage(self):\n local_pdf_path = os.path.join(os.path.dirname(__file__), \"gnarly_pdfs\", \"some_ocr1.pdf\")\n\n report = _pdf_report(local_pdf_path, 1)\n\n print(report)\n\n print(get_anchor_text(local_pdf_path, 1, pdf_engine=\"pdfreport\"))", "creation_date": "2024-10-01T22:10:58Z", "repo": "allenai/olmocr", "file_path": "tests/test_anchor.py", "stars": 13204, "label": 0} +{"function": " def testSmallPage(self):\n local_pdf_path = os.path.join(os.path.dirname(__file__), \"gnarly_pdfs\", \"small_page_size.pdf\")\n\n report = _pdf_report(local_pdf_path, 1)\n\n print(report)\n\n print(get_anchor_text(local_pdf_path, 1, pdf_engine=\"pdfreport\"))", "creation_date": "2024-10-01T22:10:58Z", "repo": "allenai/olmocr", "file_path": "tests/test_anchor.py", "stars": 13204, "label": 0} +{"function": " def testBadUTFSurrogatePairsGeneration(self):\n local_pdf_path = os.path.join(os.path.dirname(__file__), \"gnarly_pdfs\", \"badlines.pdf\")\n\n anchor_text = get_anchor_text(local_pdf_path, 4, pdf_engine=\"pdfreport\")\n\n jsondata = json.dumps({\"text\": anchor_text})\n\n import pyarrow as pa\n import pyarrow.compute as pc\n import pyarrow.json as paj\n\n buffer = io.BytesIO(jsondata.encode(\"utf-8\"))\n paj.read_json(buffer, read_options=paj.ReadOptions(use_threads=False, block_size=len(jsondata)))", "creation_date": "2024-10-01T22:10:58Z", "repo": "allenai/olmocr", "file_path": "tests/test_anchor.py", "stars": 13204, "label": 0} +{"function": " def testLargePromptHint1(self):\n local_pdf_path = os.path.join(os.path.dirname(__file__), \"gnarly_pdfs\", \"large_prompt_hint1.pdf\")\n\n anchor_text = get_anchor_text(local_pdf_path, 4, pdf_engine=\"pdfreport\")\n\n print(anchor_text)\n print(len(anchor_text))\n self.assertLessEqual(len(anchor_text), 1000)", "creation_date": "2024-10-01T22:10:58Z", "repo": "allenai/olmocr", "file_path": "tests/test_anchor.py", "stars": 13204, "label": 0} +{"function": " def testLargePromptHint2(self):\n local_pdf_path = os.path.join(os.path.dirname(__file__), \"gnarly_pdfs\", \"large_prompt_hint2.pdf\")\n\n anchor_text = get_anchor_text(local_pdf_path, 2, pdf_engine=\"pdfreport\")\n\n print(anchor_text)\n print(len(anchor_text))\n self.assertLessEqual(len(anchor_text), 4000)", "creation_date": "2024-10-01T22:10:58Z", "repo": "allenai/olmocr", "file_path": "tests/test_anchor.py", "stars": 13204, "label": 0} +{"function": " def testLargePromptHint3(self):\n local_pdf_path = os.path.join(os.path.dirname(__file__), \"gnarly_pdfs\", \"large_prompt_hint3.pdf\")\n\n anchor_text = get_anchor_text(local_pdf_path, 2, pdf_engine=\"pdfreport\")\n\n print(anchor_text)\n print(len(anchor_text))\n self.assertLessEqual(len(anchor_text), 4000)", "creation_date": "2024-10-01T22:10:58Z", "repo": "allenai/olmocr", "file_path": "tests/test_anchor.py", "stars": 13204, "label": 0} +{"function": " def testNewsPaperPromptHint(self):\n local_pdf_path = os.path.join(os.path.dirname(__file__), \"gnarly_pdfs\", \"newspaper.pdf\")\n\n anchor_text = get_anchor_text(local_pdf_path, 1, pdf_engine=\"pdfreport\")\n\n print(anchor_text)\n print(len(anchor_text))\n self.assertLessEqual(len(anchor_text), 4000)", "creation_date": "2024-10-01T22:10:58Z", "repo": "allenai/olmocr", "file_path": "tests/test_anchor.py", "stars": 13204, "label": 0} +{"function": " def testTobaccoPaperMissingParagraphs(self):\n local_pdf_path = os.path.join(os.path.dirname(__file__), \"gnarly_pdfs\", \"tobacco_missed_tokens_pg1.pdf\")\n\n anchor_text = get_anchor_text(local_pdf_path, 1, pdf_engine=\"pdfreport\")\n\n print(anchor_text)\n print(len(anchor_text))\n self.assertLessEqual(len(anchor_text), 4000)", "creation_date": "2024-10-01T22:10:58Z", "repo": "allenai/olmocr", "file_path": "tests/test_anchor.py", "stars": 13204, "label": 0} +{"function": "def main():\n # Get database path\n config = Config.from_env()\n db_path = config.database.db_file\n \n print(f\"Database path: {db_path}\")\n \n # Create migration manager\n manager = MigrationManager(db_path)\n \n # Get applied migrations\n applied = manager.get_applied_migrations()\n print(f\"Applied migrations: {', '.join(applied) if applied else 'none'}\")\n \n if not applied:\n print(\"No migrations to downgrade\")\n return\n \n # Check command line arguments\n if len(sys.argv) > 1:\n target_version = sys.argv[1]\n if target_version not in applied:\n print(f\"Error: Version {target_version} is not applied, cannot downgrade to this version\")\n return\n \n print(f\"Downgrading to version {target_version}...\")\n downgraded = manager.downgrade_to_version(target_version)\n else:\n print(\"Downgrading all migrations...\")\n downgraded = manager.downgrade_to_version()\n \n if downgraded:\n print(f\"Successfully downgraded migrations: {', '.join(downgraded)}\")\n else:\n print(\"No migrations were downgraded\")", "creation_date": "2025-04-24T06:19:23Z", "repo": "mindverse/Second-Me", "file_path": "scripts/downgrade_migration.py", "stars": 13150, "label": 0} +{"function": "def get_db_path():\n \"\"\"Get the database path from environment or use default\"\"\"\n config = Config.from_env()\n db_path = config.get(\"SQLITE_DB_PATH\", os.path.join(project_root, \"data\", \"sqlite\", \"lpm.db\"))\n return db_path", "creation_date": "2025-03-19T16:37:54Z", "repo": "mindverse/Second-Me", "file_path": "scripts/migrate_add_instance_password.py", "stars": 13150, "label": 0} +{"function": "def migrate_database():\n \"\"\"Add instance_password column to tables with instance_id\"\"\"\n db_path = get_db_path()\n \n logger.info(f\"Using database at: {db_path}\")\n \n # Check if database file exists\n if not os.path.exists(db_path):\n logger.error(f\"Database file not found at {db_path}\")\n return False\n \n # Default password to use\n default_password = \"mindverse666\"\n \n try:\n # Connect to the database\n conn = sqlite3.connect(db_path)\n cursor = conn.cursor()\n \n # List all tables\n cursor.execute(\"SELECT name FROM sqlite_master WHERE type='table'\")\n tables = cursor.fetchall()\n logger.info(f\"Tables in database: {[table[0] for table in tables]}\")\n \n # Check for any table that might have instance_id but not instance_password\n for table_name in [table[0] for table in tables]:\n # Get columns for this table\n cursor.execute(f\"PRAGMA table_info({table_name})\")\n table_columns = cursor.fetchall()\n table_column_names = [column[1] for column in table_columns]\n \n # If table has instance_id but not instance_password\n if \"instance_id\" in table_column_names and \"instance_password\" not in table_column_names:\n logger.info(f\"Table {table_name} has instance_id but not instance_password\")\n logger.info(f\"Adding instance_password column to {table_name} table\")\n cursor.execute(f\"ALTER TABLE {table_name} ADD COLUMN instance_password VARCHAR(255) DEFAULT '{default_password}'\")\n logger.info(f\"Added instance_password column to {table_name} table with default value '{default_password}'\")\n \n # Update existing rows to set the default password where instance_id is not null\n cursor.execute(f\"UPDATE {table_name} SET instance_password = '{default_password}' WHERE instance_id IS NOT NULL AND instance_password IS NULL\")\n updated_rows = cursor.rowcount\n logger.info(f\"Updated {updated_rows} rows in {table_name} with default password\")\n \n # Commit the changes\n conn.commit()\n logger.info(\"Migration completed successfully\")\n \n # Close the connection\n conn.close()\n return True\n \n except sqlite3.Error as e:\n logger.error(f\"SQLite error: {e}\")\n return False\n except Exception as e:\n logger.error(f\"Error during migration: {e}\")\n return False", "creation_date": "2025-03-19T16:37:54Z", "repo": "mindverse/Second-Me", "file_path": "scripts/migrate_add_instance_password.py", "stars": 13150, "label": 0} +{"function": "def get_db_path():\n \"\"\"Get the database path from environment or use default\"\"\"\n config = Config.from_env()\n db_path = config.get(\"SQLITE_DB_PATH\", os.path.join(project_root, \"data\", \"sqlite\", \"lpm.db\"))\n return db_path", "creation_date": "2025-04-24T06:19:23Z", "repo": "mindverse/Second-Me", "file_path": "scripts/run_migrations.py", "stars": 13150, "label": 0} +{"function": "def run_migrations():\n \"\"\"Run all pending database migrations\"\"\"\n db_path = get_db_path()\n \n # logger.info(f\"Using database at: {db_path}\")\n \n # Check if database file exists\n if not os.path.exists(db_path):\n # logger.error(f\"Database file not found at {db_path}\")\n return False\n \n try:\n # Initialize migration manager\n migrations_dir = os.path.join(project_root, \"lpm_kernel\", \"database\", \"migrations\")\n manager = MigrationManager(db_path)\n \n # Apply migrations\n applied = manager.apply_migrations(migrations_dir)\n \n # if applied:\n # logger.info(f\"Successfully applied {len(applied)} migrations\")\n # else:\n # logger.info(\"No new migrations to apply\")\n \n return True\n \n except Exception as e:\n logger.error(f\"Error during migrations: {e}\")\n return False", "creation_date": "2025-04-24T06:19:23Z", "repo": "mindverse/Second-Me", "file_path": "scripts/run_migrations.py", "stars": 13150, "label": 0} +{"function": "def create_migration(description):\n \"\"\"Create a new migration file\"\"\"\n db_path = get_db_path()\n migrations_dir = os.path.join(project_root, \"lpm_kernel\", \"database\", \"migrations\")\n \n manager = MigrationManager(db_path)\n filepath = manager.create_migration(description, migrations_dir)\n \n # logger.info(f\"Created new migration at: {filepath}\")\n return filepath", "creation_date": "2025-04-24T06:19:23Z", "repo": "mindverse/Second-Me", "file_path": "scripts/run_migrations.py", "stars": 13150, "label": 0} +{"function": "async def get_response(query:str) -> str | None | Any:\n \"\"\"\n Received a response based on local secondme model.\n\n Args:\n query (str): Questions raised by users regarding the secondme model\n\n \"\"\"\n\n headers = {\n \"Content-Type\": \"application/json\",\n \"Accept\": \"text/event-stream\"\n }\n messages.append({\"role\": \"user\", \"content\": query})\n\n data={\n \"messages\": messages,\n \"stream\": True\n }\n\n conn = http.client.HTTPConnection(url)\n\n # Send the POST request\n conn.request(\"POST\", path, body=json.dumps(data), headers=headers)\n\n # Get the response\n response = conn.getresponse()\n full_content=\"\"\n\n for line in response:\n if line:\n decoded_line = line.decode('utf-8').strip()\n if decoded_line == 'data: [DONE]':\n break\n if decoded_line.startswith('data: '):\n try:\n json_str = decoded_line[6:]\n chunk = json.loads(json_str)\n content = chunk['choices'][0]['delta'].get('content', '')\n if content:\n full_content+=content\n except json.JSONDecodeError:\n pass\n\n conn.close()\n if full_content:\n messages.append({\"role\": \"system\", \"content\": full_content})\n return full_content\n else:\n return None", "creation_date": "2025-04-15T09:44:18Z", "repo": "mindverse/Second-Me", "file_path": "mcp/mcp_local.py", "stars": 13150, "label": 0} +{"function": "async def get_response(query:str, instance_id:str) -> str | None:\n \"\"\"\n Received a response based on public secondme model.\n\n Args:\n query (str): Questions raised by users regarding the secondme model.\n instance_id (str): ID used to identify the secondme model, or url used to identify the secondme model.\n\n \"\"\"\n id = instance_id.split('/')[-1]\n path = f\"/api/chat/{id}\"\n headers = {\"Content-Type\": \"application/json\"}\n messages.append({\"role\": \"user\", \"content\": query})\n\n data = {\n \"messages\": messages,\n \"metadata\": {\n \"enable_l0_retrieval\": False,\n \"role_id\": \"default_role\"\n },\n \"temperature\": 0.7,\n \"max_tokens\": 2000,\n \"stream\": True\n }\n\n conn = http.client.HTTPSConnection(url)\n\n # Send the POST request\n conn.request(\"POST\", path, body=json.dumps(data), headers=headers)\n\n # Get the response\n response = conn.getresponse()\n\n full_content = \"\"\n\n for line in response:\n if line:\n decoded_line = line.decode('utf-8').strip()\n if decoded_line == 'data: [DONE]':\n break\n if decoded_line.startswith('data: '):\n try:\n json_str = decoded_line[6:]\n chunk = json.loads(json_str)\n content = chunk['choices'][0]['delta'].get('content', '')\n if content:\n full_content += content\n except json.JSONDecodeError:\n pass\n\n conn.close()\n if full_content:\n messages.append({\"role\": \"system\", \"content\": full_content})\n return full_content\n else:\n return None", "creation_date": "2025-04-15T09:44:18Z", "repo": "mindverse/Second-Me", "file_path": "mcp/mcp_public.py", "stars": 13150, "label": 0} +{"function": "async def get_online_instances():\n \"\"\"\n Check which secondme models are available for chatting online.\n \"\"\"\n url = \"https://app.secondme.io/api/upload/list?page_size=100\"\n response = requests.get(url)\n\n if response.status_code == 200:\n data = response.json()\n items = data.get(\"data\", {}).get(\"items\", [])\n\n online_items = [\n {\n \"upload_name\": item[\"upload_name\"],\n \"instance_id\": item[\"instance_id\"],\n \"description\": item[\"description\"]\n }\n for item in items if item.get(\"status\") == \"online\"\n ]\n\n return json.dumps(online_items, ensure_ascii=False, indent=2)\n else:\n raise Exception(f\"Request failed with status code: {response.status_code}\")", "creation_date": "2025-04-15T09:44:18Z", "repo": "mindverse/Second-Me", "file_path": "mcp/mcp_public.py", "stars": 13150, "label": 0} +{"function": "def create_app():\n app = Flask(__name__)\n\n # Initialize database connection\n try:\n DatabaseSession.initialize()\n logger.info(\"Database connection initialized successfully\")\n \n except Exception as e:\n logger.error(f\"Failed to initialize database connection: {str(e)}\")\n raise\n\n # Add CORS support\n\n @app.after_request\n def after_request(response):\n # Allow all origins in development environment\n response.headers.add(\"Access-Control-Allow-Origin\", \"*\")\n response.headers.add(\n \"Access-Control-Allow-Headers\", \"Content-Type,Authorization\"\n )\n response.headers.add(\n \"Access-Control-Allow-Methods\", \"GET,PUT,POST,DELETE,OPTIONS\"\n )\n return response\n\n # Create file server handler\n file_handler = FileServerHandler(\n os.path.join(os.getenv(\"APP_ROOT\", \"/app\"), \"resources\", \"raw_content\")\n )\n\n @app.route(\"/raw_content/\", defaults={\"path\": \"\"})\n @app.route(\"/raw_content/\")\n def serve_content(path=\"\"):\n return file_handler.handle_request(path, request.path)\n\n # Register all routes\n init_routes(app)\n\n # Clean up database connection only when the application shuts down\n @app.teardown_appcontext\n def cleanup_db(exception):\n pass\n\n return app", "creation_date": "2025-03-19T16:37:54Z", "repo": "mindverse/Second-Me", "file_path": "lpm_kernel/app.py", "stars": 13150, "label": 0} +{"function": "def _validate_args(args):\n # Basic check\n assert args.ckpt_dir is not None, \"Please specify the checkpoint directory.\"\n assert args.task in WAN_CONFIGS, f\"Unsupport task: {args.task}\"\n assert args.task in EXAMPLE_PROMPT, f\"Unsupport task: {args.task}\"\n\n # The default sampling steps are 40 for image-to-video tasks and 50 for text-to-video tasks.\n if args.sample_steps is None:\n args.sample_steps = 50\n if \"i2v\" in args.task:\n args.sample_steps = 40\n\n if args.sample_shift is None:\n args.sample_shift = 5.0\n if \"i2v\" in args.task and args.size in [\"832*480\", \"480*832\"]:\n args.sample_shift = 3.0\n elif \"flf2v\" in args.task or \"vace\" in args.task:\n args.sample_shift = 16\n\n # The default number of frames are 1 for text-to-image tasks and 81 for other tasks.\n if args.frame_num is None:\n args.frame_num = 1 if \"t2i\" in args.task else 81\n\n # T2I frame_num check\n if \"t2i\" in args.task:\n assert args.frame_num == 1, f\"Unsupport frame_num {args.frame_num} for task {args.task}\"\n\n args.base_seed = args.base_seed if args.base_seed >= 0 else random.randint(\n 0, sys.maxsize)\n # Size check\n assert args.size in SUPPORTED_SIZES[\n args.\n task], f\"Unsupport size {args.size} for task {args.task}, supported sizes are: {', '.join(SUPPORTED_SIZES[args.task])}\"", "creation_date": "2025-02-25T14:07:47Z", "repo": "Wan-Video/Wan2.1", "file_path": "generate.py", "stars": 12952, "label": 0} +{"function": "def _parse_args():\n parser = argparse.ArgumentParser(\n description=\"Generate a image or video from a text prompt or image using Wan\"\n )\n parser.add_argument(\n \"--task\",\n type=str,\n default=\"t2v-14B\",\n choices=list(WAN_CONFIGS.keys()),\n help=\"The task to run.\")\n parser.add_argument(\n \"--size\",\n type=str,\n default=\"1280*720\",\n choices=list(SIZE_CONFIGS.keys()),\n help=\"The area (width*height) of the generated video. For the I2V task, the aspect ratio of the output video will follow that of the input image.\"\n )\n parser.add_argument(\n \"--frame_num\",\n type=int,\n default=None,\n help=\"How many frames to sample from a image or video. The number should be 4n+1\"\n )\n parser.add_argument(\n \"--ckpt_dir\",\n type=str,\n default=None,\n help=\"The path to the checkpoint directory.\")\n parser.add_argument(\n \"--offload_model\",\n type=str2bool,\n default=None,\n help=\"Whether to offload the model to CPU after each model forward, reducing GPU memory usage.\"\n )\n parser.add_argument(\n \"--ulysses_size\",\n type=int,\n default=1,\n help=\"The size of the ulysses parallelism in DiT.\")\n parser.add_argument(\n \"--ring_size\",\n type=int,\n default=1,\n help=\"The size of the ring attention parallelism in DiT.\")\n parser.add_argument(\n \"--t5_fsdp\",\n action=\"store_true\",\n default=False,\n help=\"Whether to use FSDP for T5.\")\n parser.add_argument(\n \"--t5_cpu\",\n action=\"store_true\",\n default=False,\n help=\"Whether to place T5 model on CPU.\")\n parser.add_argument(\n \"--dit_fsdp\",\n action=\"store_true\",\n default=False,\n help=\"Whether to use FSDP for DiT.\")\n parser.add_argument(\n \"--save_file\",\n type=str,\n default=None,\n help=\"The file to save the generated image or video to.\")\n parser.add_argument(\n \"--src_video\",\n type=str,\n default=None,\n help=\"The file of the source video. Default None.\")\n parser.add_argument(\n \"--src_mask\",\n type=str,\n default=None,\n help=\"The file of the source mask. Default None.\")\n parser.add_argument(\n \"--src_ref_images\",\n type=str,\n default=None,\n help=\"The file list of the source reference images. Separated by ','. Default None.\"\n )\n parser.add_argument(\n \"--prompt\",\n type=str,\n default=None,\n help=\"The prompt to generate the image or video from.\")\n parser.add_argument(\n \"--use_prompt_extend\",\n action=\"store_true\",\n default=False,\n help=\"Whether to use prompt extend.\")\n parser.add_argument(\n \"--prompt_extend_method\",\n type=str,\n default=\"local_qwen\",\n choices=[\"dashscope\", \"local_qwen\"],\n help=\"The prompt extend method to use.\")\n parser.add_argument(\n \"--prompt_extend_model\",\n type=str,\n default=None,\n help=\"The prompt extend model to use.\")\n parser.add_argument(\n \"--prompt_extend_target_lang\",\n type=str,\n default=\"zh\",\n choices=[\"zh\", \"en\"],\n help=\"The target language of prompt extend.\")\n parser.add_argument(\n \"--base_seed\",\n type=int,\n default=-1,\n help=\"The seed to use for generating the image or video.\")\n parser.add_argument(\n \"--image\",\n type=str,\n default=None,\n help=\"[image to video] The image to generate the video from.\")\n parser.add_argument(\n \"--first_frame\",\n type=str,\n default=None,\n help=\"[first-last frame to video] The image (first frame) to generate the video from.\"\n )\n parser.add_argument(\n \"--last_frame\",\n type=str,\n default=None,\n help=\"[first-last frame to video] The image (last frame) to generate the video from.\"\n )\n parser.add_argument(\n \"--sample_solver\",\n type=str,\n default='unipc',\n choices=['unipc', 'dpm++'],\n help=\"The solver used to sample.\")\n parser.add_argument(\n \"--sample_steps\", type=int, default=None, help=\"The sampling steps.\")\n parser.add_argument(\n \"--sample_shift\",\n type=float,\n default=None,\n help=\"Sampling shift factor for flow matching schedulers.\")\n parser.add_argument(\n \"--sample_guide_scale\",\n type=float,\n default=5.0,\n help=\"Classifier free guidance scale.\")\n\n args = parser.parse_args()\n\n _validate_args(args)\n\n return args", "creation_date": "2025-02-25T14:07:47Z", "repo": "Wan-Video/Wan2.1", "file_path": "generate.py", "stars": 12952, "label": 0} +{"function": "def _init_logging(rank):\n # logging\n if rank == 0:\n # set format\n logging.basicConfig(\n level=logging.INFO,\n format=\"[%(asctime)s] %(levelname)s: %(message)s\",\n handlers=[logging.StreamHandler(stream=sys.stdout)])\n else:\n logging.basicConfig(level=logging.ERROR)", "creation_date": "2025-02-25T14:07:47Z", "repo": "Wan-Video/Wan2.1", "file_path": "generate.py", "stars": 12952, "label": 0} +{"function": "def generate(args):\n rank = int(os.getenv(\"RANK\", 0))\n world_size = int(os.getenv(\"WORLD_SIZE\", 1))\n local_rank = int(os.getenv(\"LOCAL_RANK\", 0))\n device = local_rank\n _init_logging(rank)\n\n if args.offload_model is None:\n args.offload_model = False if world_size > 1 else True\n logging.info(\n f\"offload_model is not specified, set to {args.offload_model}.\")\n if world_size > 1:\n torch.cuda.set_device(local_rank)\n dist.init_process_group(\n backend=\"nccl\",\n init_method=\"env://\",\n rank=rank,\n world_size=world_size)\n else:\n assert not (\n args.t5_fsdp or args.dit_fsdp\n ), f\"t5_fsdp and dit_fsdp are not supported in non-distributed environments.\"\n assert not (\n args.ulysses_size > 1 or args.ring_size > 1\n ), f\"context parallel are not supported in non-distributed environments.\"\n\n if args.ulysses_size > 1 or args.ring_size > 1:\n assert args.ulysses_size * args.ring_size == world_size, f\"The number of ulysses_size and ring_size should be equal to the world size.\"\n from xfuser.core.distributed import (\n init_distributed_environment,\n initialize_model_parallel,\n )\n init_distributed_environment(\n rank=dist.get_rank(), world_size=dist.get_world_size())\n\n initialize_model_parallel(\n sequence_parallel_degree=dist.get_world_size(),\n ring_degree=args.ring_size,\n ulysses_degree=args.ulysses_size,\n )\n\n if args.use_prompt_extend:\n if args.prompt_extend_method == \"dashscope\":\n prompt_expander = DashScopePromptExpander(\n model_name=args.prompt_extend_model,\n is_vl=\"i2v\" in args.task or \"flf2v\" in args.task)\n elif args.prompt_extend_method == \"local_qwen\":\n prompt_expander = QwenPromptExpander(\n model_name=args.prompt_extend_model,\n is_vl=\"i2v\" in args.task,\n device=rank)\n else:\n raise NotImplementedError(\n f\"Unsupport prompt_extend_method: {args.prompt_extend_method}\")\n\n cfg = WAN_CONFIGS[args.task]\n if args.ulysses_size > 1:\n assert cfg.num_heads % args.ulysses_size == 0, f\"`{cfg.num_heads=}` cannot be divided evenly by `{args.ulysses_size=}`.\"\n\n logging.info(f\"Generation job args: {args}\")\n logging.info(f\"Generation model config: {cfg}\")\n\n if dist.is_initialized():\n base_seed = [args.base_seed] if rank == 0 else [None]\n dist.broadcast_object_list(base_seed, src=0)\n args.base_seed = base_seed[0]\n\n if \"t2v\" in args.task or \"t2i\" in args.task:\n if args.prompt is None:\n args.prompt = EXAMPLE_PROMPT[args.task][\"prompt\"]\n logging.info(f\"Input prompt: {args.prompt}\")\n if args.use_prompt_extend:\n logging.info(\"Extending prompt ...\")\n if rank == 0:\n prompt_output = prompt_expander(\n args.prompt,\n tar_lang=args.prompt_extend_target_lang,\n seed=args.base_seed)\n if prompt_output.status == False:\n logging.info(\n f\"Extending prompt failed: {prompt_output.message}\")\n logging.info(\"Falling back to original prompt.\")\n input_prompt = args.prompt\n else:\n input_prompt = prompt_output.prompt\n input_prompt = [input_prompt]\n else:\n input_prompt = [None]\n if dist.is_initialized():\n dist.broadcast_object_list(input_prompt, src=0)\n args.prompt = input_prompt[0]\n logging.info(f\"Extended prompt: {args.prompt}\")\n\n logging.info(\"Creating WanT2V pipeline.\")\n wan_t2v = wan.WanT2V(\n config=cfg,\n checkpoint_dir=args.ckpt_dir,\n device_id=device,\n rank=rank,\n t5_fsdp=args.t5_fsdp,\n dit_fsdp=args.dit_fsdp,\n use_usp=(args.ulysses_size > 1 or args.ring_size > 1),\n t5_cpu=args.t5_cpu,\n )\n\n logging.info(\n f\"Generating {'image' if 't2i' in args.task else 'video'} ...\")\n video = wan_t2v.generate(\n args.prompt,\n size=SIZE_CONFIGS[args.size],\n frame_num=args.frame_num,\n shift=args.sample_shift,\n sample_solver=args.sample_solver,\n sampling_steps=args.sample_steps,\n guide_scale=args.sample_guide_scale,\n seed=args.base_seed,\n offload_model=args.offload_model)\n\n elif \"i2v\" in args.task:\n if args.prompt is None:\n args.prompt = EXAMPLE_PROMPT[args.task][\"prompt\"]\n if args.image is None:\n args.image = EXAMPLE_PROMPT[args.task][\"image\"]\n logging.info(f\"Input prompt: {args.prompt}\")\n logging.info(f\"Input image: {args.image}\")\n\n img = Image.open(args.image).convert(\"RGB\")\n if args.use_prompt_extend:\n logging.info(\"Extending prompt ...\")\n if rank == 0:\n prompt_output = prompt_expander(\n args.prompt,\n tar_lang=args.prompt_extend_target_lang,\n image=img,\n seed=args.base_seed)\n if prompt_output.status == False:\n logging.info(\n f\"Extending prompt failed: {prompt_output.message}\")\n logging.info(\"Falling back to original prompt.\")\n input_prompt = args.prompt\n else:\n input_prompt = prompt_output.prompt\n input_prompt = [input_prompt]\n else:\n input_prompt = [None]\n if dist.is_initialized():\n dist.broadcast_object_list(input_prompt, src=0)\n args.prompt = input_prompt[0]\n logging.info(f\"Extended prompt: {args.prompt}\")\n\n logging.info(\"Creating WanI2V pipeline.\")\n wan_i2v = wan.WanI2V(\n config=cfg,\n checkpoint_dir=args.ckpt_dir,\n device_id=device,\n rank=rank,\n t5_fsdp=args.t5_fsdp,\n dit_fsdp=args.dit_fsdp,\n use_usp=(args.ulysses_size > 1 or args.ring_size > 1),\n t5_cpu=args.t5_cpu,\n )\n\n logging.info(\"Generating video ...\")\n video = wan_i2v.generate(\n args.prompt,\n img,\n max_area=MAX_AREA_CONFIGS[args.size],\n frame_num=args.frame_num,\n shift=args.sample_shift,\n sample_solver=args.sample_solver,\n sampling_steps=args.sample_steps,\n guide_scale=args.sample_guide_scale,\n seed=args.base_seed,\n offload_model=args.offload_model)\n elif \"flf2v\" in args.task:\n if args.prompt is None:\n args.prompt = EXAMPLE_PROMPT[args.task][\"prompt\"]\n if args.first_frame is None or args.last_frame is None:\n args.first_frame = EXAMPLE_PROMPT[args.task][\"first_frame\"]\n args.last_frame = EXAMPLE_PROMPT[args.task][\"last_frame\"]\n logging.info(f\"Input prompt: {args.prompt}\")\n logging.info(f\"Input first frame: {args.first_frame}\")\n logging.info(f\"Input last frame: {args.last_frame}\")\n first_frame = Image.open(args.first_frame).convert(\"RGB\")\n last_frame = Image.open(args.last_frame).convert(\"RGB\")\n if args.use_prompt_extend:\n logging.info(\"Extending prompt ...\")\n if rank == 0:\n prompt_output = prompt_expander(\n args.prompt,\n tar_lang=args.prompt_extend_target_lang,\n image=[first_frame, last_frame],\n seed=args.base_seed)\n if prompt_output.status == False:\n logging.info(\n f\"Extending prompt failed: {prompt_output.message}\")\n logging.info(\"Falling back to original prompt.\")\n input_prompt = args.prompt\n else:\n input_prompt = prompt_output.prompt\n input_prompt = [input_prompt]\n else:\n input_prompt = [None]\n if dist.is_initialized():\n dist.broadcast_object_list(input_prompt, src=0)\n args.prompt = input_prompt[0]\n logging.info(f\"Extended prompt: {args.prompt}\")\n\n logging.info(\"Creating WanFLF2V pipeline.\")\n wan_flf2v = wan.WanFLF2V(\n config=cfg,\n checkpoint_dir=args.ckpt_dir,\n device_id=device,\n rank=rank,\n t5_fsdp=args.t5_fsdp,\n dit_fsdp=args.dit_fsdp,\n use_usp=(args.ulysses_size > 1 or args.ring_size > 1),\n t5_cpu=args.t5_cpu,\n )\n\n logging.info(\"Generating video ...\")\n video = wan_flf2v.generate(\n args.prompt,\n first_frame,\n last_frame,\n max_area=MAX_AREA_CONFIGS[args.size],\n frame_num=args.frame_num,\n shift=args.sample_shift,\n sample_solver=args.sample_solver,\n sampling_steps=args.sample_steps,\n guide_scale=args.sample_guide_scale,\n seed=args.base_seed,\n offload_model=args.offload_model)\n elif \"vace\" in args.task:\n if args.prompt is None:\n args.prompt = EXAMPLE_PROMPT[args.task][\"prompt\"]\n args.src_video = EXAMPLE_PROMPT[args.task].get(\"src_video\", None)\n args.src_mask = EXAMPLE_PROMPT[args.task].get(\"src_mask\", None)\n args.src_ref_images = EXAMPLE_PROMPT[args.task].get(\n \"src_ref_images\", None)\n\n logging.info(f\"Input prompt: {args.prompt}\")\n if args.use_prompt_extend and args.use_prompt_extend != 'plain':\n logging.info(\"Extending prompt ...\")\n if rank == 0:\n prompt = prompt_expander.forward(args.prompt)\n logging.info(\n f\"Prompt extended from '{args.prompt}' to '{prompt}'\")\n input_prompt = [prompt]\n else:\n input_prompt = [None]\n if dist.is_initialized():\n dist.broadcast_object_list(input_prompt, src=0)\n args.prompt = input_prompt[0]\n logging.info(f\"Extended prompt: {args.prompt}\")\n\n logging.info(\"Creating VACE pipeline.\")\n wan_vace = wan.WanVace(\n config=cfg,\n checkpoint_dir=args.ckpt_dir,\n device_id=device,\n rank=rank,\n t5_fsdp=args.t5_fsdp,\n dit_fsdp=args.dit_fsdp,\n use_usp=(args.ulysses_size > 1 or args.ring_size > 1),\n t5_cpu=args.t5_cpu,\n )\n\n src_video, src_mask, src_ref_images = wan_vace.prepare_source(\n [args.src_video], [args.src_mask], [\n None if args.src_ref_images is None else\n args.src_ref_images.split(',')\n ], args.frame_num, SIZE_CONFIGS[args.size], device)\n\n logging.info(f\"Generating video...\")\n video = wan_vace.generate(\n args.prompt,\n src_video,\n src_mask,\n src_ref_images,\n size=SIZE_CONFIGS[args.size],\n frame_num=args.frame_num,\n shift=args.sample_shift,\n sample_solver=args.sample_solver,\n sampling_steps=args.sample_steps,\n guide_scale=args.sample_guide_scale,\n seed=args.base_seed,\n offload_model=args.offload_model)\n else:\n raise ValueError(f\"Unkown task type: {args.task}\")\n\n if rank == 0:\n if args.save_file is None:\n formatted_time = datetime.now().strftime(\"%Y%m%d_%H%M%S\")\n formatted_prompt = args.prompt.replace(\" \", \"_\").replace(\"/\",\n \"_\")[:50]\n suffix = '.png' if \"t2i\" in args.task else '.mp4'\n args.save_file = f\"{args.task}_{args.size.replace('*','x') if sys.platform=='win32' else args.size}_{args.ulysses_size}_{args.ring_size}_{formatted_prompt}_{formatted_time}\" + suffix\n\n if \"t2i\" in args.task:\n logging.info(f\"Saving generated image to {args.save_file}\")\n cache_image(\n tensor=video.squeeze(1)[None],\n save_file=args.save_file,\n nrow=1,\n normalize=True,\n value_range=(-1, 1))\n else:\n logging.info(f\"Saving generated video to {args.save_file}\")\n cache_video(\n tensor=video[None],\n save_file=args.save_file,\n fps=cfg.sample_fps,\n nrow=1,\n normalize=True,\n value_range=(-1, 1))\n logging.info(\"Finished.\")", "creation_date": "2025-02-25T14:07:47Z", "repo": "Wan-Video/Wan2.1", "file_path": "generate.py", "stars": 12952, "label": 0} +{"function": " def __init__(\n self,\n config,\n checkpoint_dir,\n device_id=0,\n rank=0,\n t5_fsdp=False,\n dit_fsdp=False,\n use_usp=False,\n t5_cpu=False,\n init_on_cpu=True,\n ):\n r\"\"\"\n Initializes the image-to-video generation model components.\n\n Args:\n config (EasyDict):\n Object containing model parameters initialized from config.py\n checkpoint_dir (`str`):\n Path to directory containing model checkpoints\n device_id (`int`, *optional*, defaults to 0):\n Id of target GPU device\n rank (`int`, *optional*, defaults to 0):\n Process rank for distributed training\n t5_fsdp (`bool`, *optional*, defaults to False):\n Enable FSDP sharding for T5 model\n dit_fsdp (`bool`, *optional*, defaults to False):\n Enable FSDP sharding for DiT model\n use_usp (`bool`, *optional*, defaults to False):\n Enable distribution strategy of USP.\n t5_cpu (`bool`, *optional*, defaults to False):\n Whether to place T5 model on CPU. Only works without t5_fsdp.\n init_on_cpu (`bool`, *optional*, defaults to True):\n Enable initializing Transformer Model on CPU. Only works without FSDP or USP.\n \"\"\"\n self.device = torch.device(f\"cuda:{device_id}\")\n self.config = config\n self.rank = rank\n self.use_usp = use_usp\n self.t5_cpu = t5_cpu\n\n self.num_train_timesteps = config.num_train_timesteps\n self.param_dtype = config.param_dtype\n\n shard_fn = partial(shard_model, device_id=device_id)\n self.text_encoder = T5EncoderModel(\n text_len=config.text_len,\n dtype=config.t5_dtype,\n device=torch.device('cpu'),\n checkpoint_path=os.path.join(checkpoint_dir, config.t5_checkpoint),\n tokenizer_path=os.path.join(checkpoint_dir, config.t5_tokenizer),\n shard_fn=shard_fn if t5_fsdp else None,\n )\n\n self.vae_stride = config.vae_stride\n self.patch_size = config.patch_size\n self.vae = WanVAE(\n vae_pth=os.path.join(checkpoint_dir, config.vae_checkpoint),\n device=self.device)\n\n self.clip = CLIPModel(\n dtype=config.clip_dtype,\n device=self.device,\n checkpoint_path=os.path.join(checkpoint_dir,\n config.clip_checkpoint),\n tokenizer_path=os.path.join(checkpoint_dir, config.clip_tokenizer))\n\n logging.info(f\"Creating WanModel from {checkpoint_dir}\")\n self.model = WanModel.from_pretrained(checkpoint_dir)\n self.model.eval().requires_grad_(False)\n\n if t5_fsdp or dit_fsdp or use_usp:\n init_on_cpu = False\n\n if use_usp:\n from xfuser.core.distributed import get_sequence_parallel_world_size\n\n from .distributed.xdit_context_parallel import (\n usp_attn_forward,\n usp_dit_forward,\n )\n for block in self.model.blocks:\n block.self_attn.forward = types.MethodType(\n usp_attn_forward, block.self_attn)\n self.model.forward = types.MethodType(usp_dit_forward, self.model)\n self.sp_size = get_sequence_parallel_world_size()\n else:\n self.sp_size = 1\n\n if dist.is_initialized():\n dist.barrier()\n if dit_fsdp:\n self.model = shard_fn(self.model)\n else:\n if not init_on_cpu:\n self.model.to(self.device)\n\n self.sample_neg_prompt = config.sample_neg_prompt", "creation_date": "2025-04-17T13:56:46Z", "repo": "Wan-Video/Wan2.1", "file_path": "wan/first_last_frame2video.py", "stars": 12952, "label": 0} +{"function": " def generate(self,\n input_prompt,\n first_frame,\n last_frame,\n max_area=720 * 1280,\n frame_num=81,\n shift=16,\n sample_solver='unipc',\n sampling_steps=50,\n guide_scale=5.5,\n n_prompt=\"\",\n seed=-1,\n offload_model=True):\n r\"\"\"\n Generates video frames from input first-last frame and text prompt using diffusion process.\n\n Args:\n input_prompt (`str`):\n Text prompt for content generation.\n first_frame (PIL.Image.Image):\n Input image tensor. Shape: [3, H, W]\n last_frame (PIL.Image.Image):\n Input image tensor. Shape: [3, H, W]\n [NOTE] If the sizes of first_frame and last_frame are mismatched, last_frame will be cropped & resized\n to match first_frame.\n max_area (`int`, *optional*, defaults to 720*1280):\n Maximum pixel area for latent space calculation. Controls video resolution scaling\n frame_num (`int`, *optional*, defaults to 81):\n How many frames to sample from a video. The number should be 4n+1\n shift (`float`, *optional*, defaults to 5.0):\n Noise schedule shift parameter. Affects temporal dynamics\n [NOTE]: If you want to generate a 480p video, it is recommended to set the shift value to 3.0.\n sample_solver (`str`, *optional*, defaults to 'unipc'):\n Solver used to sample the video.\n sampling_steps (`int`, *optional*, defaults to 40):\n Number of diffusion sampling steps. Higher values improve quality but slow generation\n guide_scale (`float`, *optional*, defaults 5.0):\n Classifier-free guidance scale. Controls prompt adherence vs. creativity\n n_prompt (`str`, *optional*, defaults to \"\"):\n Negative prompt for content exclusion. If not given, use `config.sample_neg_prompt`\n seed (`int`, *optional*, defaults to -1):\n Random seed for noise generation. If -1, use random seed\n offload_model (`bool`, *optional*, defaults to True):\n If True, offloads models to CPU during generation to save VRAM\n\n Returns:\n torch.Tensor:\n Generated video frames tensor. Dimensions: (C, N H, W) where:\n - C: Color channels (3 for RGB)\n - N: Number of frames (81)\n - H: Frame height (from max_area)\n - W: Frame width from max_area)\n \"\"\"\n first_frame_size = first_frame.size\n last_frame_size = last_frame.size\n first_frame = TF.to_tensor(first_frame).sub_(0.5).div_(0.5).to(\n self.device)\n last_frame = TF.to_tensor(last_frame).sub_(0.5).div_(0.5).to(\n self.device)\n\n F = frame_num\n first_frame_h, first_frame_w = first_frame.shape[1:]\n aspect_ratio = first_frame_h / first_frame_w\n lat_h = round(\n np.sqrt(max_area * aspect_ratio) // self.vae_stride[1] //\n self.patch_size[1] * self.patch_size[1])\n lat_w = round(\n np.sqrt(max_area / aspect_ratio) // self.vae_stride[2] //\n self.patch_size[2] * self.patch_size[2])\n first_frame_h = lat_h * self.vae_stride[1]\n first_frame_w = lat_w * self.vae_stride[2]\n if first_frame_size != last_frame_size:\n # 1. resize\n last_frame_resize_ratio = max(\n first_frame_size[0] / last_frame_size[0],\n first_frame_size[1] / last_frame_size[1])\n last_frame_size = [\n round(last_frame_size[0] * last_frame_resize_ratio),\n round(last_frame_size[1] * last_frame_resize_ratio),\n ]\n # 2. center crop\n last_frame = TF.center_crop(last_frame, last_frame_size)\n\n max_seq_len = ((F - 1) // self.vae_stride[0] + 1) * lat_h * lat_w // (\n self.patch_size[1] * self.patch_size[2])\n max_seq_len = int(math.ceil(max_seq_len / self.sp_size)) * self.sp_size\n\n seed = seed if seed >= 0 else random.randint(0, sys.maxsize)\n seed_g = torch.Generator(device=self.device)\n seed_g.manual_seed(seed)\n noise = torch.randn(\n 16, (F - 1) // 4 + 1,\n lat_h,\n lat_w,\n dtype=torch.float32,\n generator=seed_g,\n device=self.device)\n\n msk = torch.ones(1, 81, lat_h, lat_w, device=self.device)\n msk[:, 1:-1] = 0\n msk = torch.concat([\n torch.repeat_interleave(msk[:, 0:1], repeats=4, dim=1), msk[:, 1:]\n ],\n dim=1)\n msk = msk.view(1, msk.shape[1] // 4, 4, lat_h, lat_w)\n msk = msk.transpose(1, 2)[0]\n\n if n_prompt == \"\":\n n_prompt = self.sample_neg_prompt\n\n # preprocess\n if not self.t5_cpu:\n self.text_encoder.model.to(self.device)\n context = self.text_encoder([input_prompt], self.device)\n context_null = self.text_encoder([n_prompt], self.device)\n if offload_model:\n self.text_encoder.model.cpu()\n else:\n context = self.text_encoder([input_prompt], torch.device('cpu'))\n context_null = self.text_encoder([n_prompt], torch.device('cpu'))\n context = [t.to(self.device) for t in context]\n context_null = [t.to(self.device) for t in context_null]\n\n self.clip.model.to(self.device)\n clip_context = self.clip.visual(\n [first_frame[:, None, :, :], last_frame[:, None, :, :]])\n if offload_model:\n self.clip.model.cpu()\n\n y = self.vae.encode([\n torch.concat([\n torch.nn.functional.interpolate(\n first_frame[None].cpu(),\n size=(first_frame_h, first_frame_w),\n mode='bicubic').transpose(0, 1),\n torch.zeros(3, F - 2, first_frame_h, first_frame_w),\n torch.nn.functional.interpolate(\n last_frame[None].cpu(),\n size=(first_frame_h, first_frame_w),\n mode='bicubic').transpose(0, 1),\n ],\n dim=1).to(self.device)\n ])[0]\n y = torch.concat([msk, y])\n\n @contextmanager\n def noop_no_sync():\n yield\n\n no_sync = getattr(self.model, 'no_sync', noop_no_sync)\n\n # evaluation mode\n with amp.autocast(dtype=self.param_dtype), torch.no_grad(), no_sync():\n\n if sample_solver == 'unipc':\n sample_scheduler = FlowUniPCMultistepScheduler(\n num_train_timesteps=self.num_train_timesteps,\n shift=1,\n use_dynamic_shifting=False)\n sample_scheduler.set_timesteps(\n sampling_steps, device=self.device, shift=shift)\n timesteps = sample_scheduler.timesteps\n elif sample_solver == 'dpm++':\n sample_scheduler = FlowDPMSolverMultistepScheduler(\n num_train_timesteps=self.num_train_timesteps,\n shift=1,\n use_dynamic_shifting=False)\n sampling_sigmas = get_sampling_sigmas(sampling_steps, shift)\n timesteps, _ = retrieve_timesteps(\n sample_scheduler,\n device=self.device,\n sigmas=sampling_sigmas)\n else:\n raise NotImplementedError(\"Unsupported solver.\")\n\n # sample videos\n latent = noise\n\n arg_c = {\n 'context': [context[0]],\n 'clip_fea': clip_context,\n 'seq_len': max_seq_len,\n 'y': [y],\n }\n\n arg_null = {\n 'context': context_null,\n 'clip_fea': clip_context,\n 'seq_len': max_seq_len,\n 'y': [y],\n }\n\n if offload_model:\n torch.cuda.empty_cache()\n\n self.model.to(self.device)\n for _, t in enumerate(tqdm(timesteps)):\n latent_model_input = [latent.to(self.device)]\n timestep = [t]\n\n timestep = torch.stack(timestep).to(self.device)\n\n noise_pred_cond = self.model(\n latent_model_input, t=timestep, **arg_c)[0].to(\n torch.device('cpu') if offload_model else self.device)\n if offload_model:\n torch.cuda.empty_cache()\n noise_pred_uncond = self.model(\n latent_model_input, t=timestep, **arg_null)[0].to(\n torch.device('cpu') if offload_model else self.device)\n if offload_model:\n torch.cuda.empty_cache()\n noise_pred = noise_pred_uncond + guide_scale * (\n noise_pred_cond - noise_pred_uncond)\n\n latent = latent.to(\n torch.device('cpu') if offload_model else self.device)\n\n temp_x0 = sample_scheduler.step(\n noise_pred.unsqueeze(0),\n t,\n latent.unsqueeze(0),\n return_dict=False,\n generator=seed_g)[0]\n latent = temp_x0.squeeze(0)\n\n x0 = [latent.to(self.device)]\n del latent_model_input, timestep\n\n if offload_model:\n self.model.cpu()\n torch.cuda.empty_cache()\n\n if self.rank == 0:\n videos = self.vae.decode(x0)\n\n del noise, latent\n del sample_scheduler\n if offload_model:\n gc.collect()\n torch.cuda.synchronize()\n if dist.is_initialized():\n dist.barrier()\n\n return videos[0] if self.rank == 0 else None", "creation_date": "2025-04-17T13:56:46Z", "repo": "Wan-Video/Wan2.1", "file_path": "wan/first_last_frame2video.py", "stars": 12952, "label": 0} +{"function": " def noop_no_sync():\n yield", "creation_date": "2025-04-17T13:56:46Z", "repo": "Wan-Video/Wan2.1", "file_path": "wan/first_last_frame2video.py", "stars": 12952, "label": 0} +{"function": " def __init__(\n self,\n config,\n checkpoint_dir,\n device_id=0,\n rank=0,\n t5_fsdp=False,\n dit_fsdp=False,\n use_usp=False,\n t5_cpu=False,\n init_on_cpu=True,\n ):\n r\"\"\"\n Initializes the image-to-video generation model components.\n\n Args:\n config (EasyDict):\n Object containing model parameters initialized from config.py\n checkpoint_dir (`str`):\n Path to directory containing model checkpoints\n device_id (`int`, *optional*, defaults to 0):\n Id of target GPU device\n rank (`int`, *optional*, defaults to 0):\n Process rank for distributed training\n t5_fsdp (`bool`, *optional*, defaults to False):\n Enable FSDP sharding for T5 model\n dit_fsdp (`bool`, *optional*, defaults to False):\n Enable FSDP sharding for DiT model\n use_usp (`bool`, *optional*, defaults to False):\n Enable distribution strategy of USP.\n t5_cpu (`bool`, *optional*, defaults to False):\n Whether to place T5 model on CPU. Only works without t5_fsdp.\n init_on_cpu (`bool`, *optional*, defaults to True):\n Enable initializing Transformer Model on CPU. Only works without FSDP or USP.\n \"\"\"\n self.device = torch.device(f\"cuda:{device_id}\")\n self.config = config\n self.rank = rank\n self.use_usp = use_usp\n self.t5_cpu = t5_cpu\n\n self.num_train_timesteps = config.num_train_timesteps\n self.param_dtype = config.param_dtype\n\n shard_fn = partial(shard_model, device_id=device_id)\n self.text_encoder = T5EncoderModel(\n text_len=config.text_len,\n dtype=config.t5_dtype,\n device=torch.device('cpu'),\n checkpoint_path=os.path.join(checkpoint_dir, config.t5_checkpoint),\n tokenizer_path=os.path.join(checkpoint_dir, config.t5_tokenizer),\n shard_fn=shard_fn if t5_fsdp else None,\n )\n\n self.vae_stride = config.vae_stride\n self.patch_size = config.patch_size\n self.vae = WanVAE(\n vae_pth=os.path.join(checkpoint_dir, config.vae_checkpoint),\n device=self.device)\n\n self.clip = CLIPModel(\n dtype=config.clip_dtype,\n device=self.device,\n checkpoint_path=os.path.join(checkpoint_dir,\n config.clip_checkpoint),\n tokenizer_path=os.path.join(checkpoint_dir, config.clip_tokenizer))\n\n logging.info(f\"Creating WanModel from {checkpoint_dir}\")\n self.model = WanModel.from_pretrained(checkpoint_dir)\n self.model.eval().requires_grad_(False)\n\n if t5_fsdp or dit_fsdp or use_usp:\n init_on_cpu = False\n\n if use_usp:\n from xfuser.core.distributed import get_sequence_parallel_world_size\n\n from .distributed.xdit_context_parallel import (\n usp_attn_forward,\n usp_dit_forward,\n )\n for block in self.model.blocks:\n block.self_attn.forward = types.MethodType(\n usp_attn_forward, block.self_attn)\n self.model.forward = types.MethodType(usp_dit_forward, self.model)\n self.sp_size = get_sequence_parallel_world_size()\n else:\n self.sp_size = 1\n\n if dist.is_initialized():\n dist.barrier()\n if dit_fsdp:\n self.model = shard_fn(self.model)\n else:\n if not init_on_cpu:\n self.model.to(self.device)\n\n self.sample_neg_prompt = config.sample_neg_prompt", "creation_date": "2025-02-25T14:07:47Z", "repo": "Wan-Video/Wan2.1", "file_path": "wan/image2video.py", "stars": 12952, "label": 0} +{"function": " def generate(self,\n input_prompt,\n img,\n max_area=720 * 1280,\n frame_num=81,\n shift=5.0,\n sample_solver='unipc',\n sampling_steps=40,\n guide_scale=5.0,\n n_prompt=\"\",\n seed=-1,\n offload_model=True):\n r\"\"\"\n Generates video frames from input image and text prompt using diffusion process.\n\n Args:\n input_prompt (`str`):\n Text prompt for content generation.\n img (PIL.Image.Image):\n Input image tensor. Shape: [3, H, W]\n max_area (`int`, *optional*, defaults to 720*1280):\n Maximum pixel area for latent space calculation. Controls video resolution scaling\n frame_num (`int`, *optional*, defaults to 81):\n How many frames to sample from a video. The number should be 4n+1\n shift (`float`, *optional*, defaults to 5.0):\n Noise schedule shift parameter. Affects temporal dynamics\n [NOTE]: If you want to generate a 480p video, it is recommended to set the shift value to 3.0.\n sample_solver (`str`, *optional*, defaults to 'unipc'):\n Solver used to sample the video.\n sampling_steps (`int`, *optional*, defaults to 40):\n Number of diffusion sampling steps. Higher values improve quality but slow generation\n guide_scale (`float`, *optional*, defaults 5.0):\n Classifier-free guidance scale. Controls prompt adherence vs. creativity\n n_prompt (`str`, *optional*, defaults to \"\"):\n Negative prompt for content exclusion. If not given, use `config.sample_neg_prompt`\n seed (`int`, *optional*, defaults to -1):\n Random seed for noise generation. If -1, use random seed\n offload_model (`bool`, *optional*, defaults to True):\n If True, offloads models to CPU during generation to save VRAM\n\n Returns:\n torch.Tensor:\n Generated video frames tensor. Dimensions: (C, N H, W) where:\n - C: Color channels (3 for RGB)\n - N: Number of frames (81)\n - H: Frame height (from max_area)\n - W: Frame width from max_area)\n \"\"\"\n img = TF.to_tensor(img).sub_(0.5).div_(0.5).to(self.device)\n\n F = frame_num\n h, w = img.shape[1:]\n aspect_ratio = h / w\n lat_h = round(\n np.sqrt(max_area * aspect_ratio) // self.vae_stride[1] //\n self.patch_size[1] * self.patch_size[1])\n lat_w = round(\n np.sqrt(max_area / aspect_ratio) // self.vae_stride[2] //\n self.patch_size[2] * self.patch_size[2])\n h = lat_h * self.vae_stride[1]\n w = lat_w * self.vae_stride[2]\n\n max_seq_len = ((F - 1) // self.vae_stride[0] + 1) * lat_h * lat_w // (\n self.patch_size[1] * self.patch_size[2])\n max_seq_len = int(math.ceil(max_seq_len / self.sp_size)) * self.sp_size\n\n seed = seed if seed >= 0 else random.randint(0, sys.maxsize)\n seed_g = torch.Generator(device=self.device)\n seed_g.manual_seed(seed)\n noise = torch.randn(\n 16, (F - 1) // 4 + 1,\n lat_h,\n lat_w,\n dtype=torch.float32,\n generator=seed_g,\n device=self.device)\n\n msk = torch.ones(1, 81, lat_h, lat_w, device=self.device)\n msk[:, 1:] = 0\n msk = torch.concat([\n torch.repeat_interleave(msk[:, 0:1], repeats=4, dim=1), msk[:, 1:]\n ],\n dim=1)\n msk = msk.view(1, msk.shape[1] // 4, 4, lat_h, lat_w)\n msk = msk.transpose(1, 2)[0]\n\n if n_prompt == \"\":\n n_prompt = self.sample_neg_prompt\n\n # preprocess\n if not self.t5_cpu:\n self.text_encoder.model.to(self.device)\n context = self.text_encoder([input_prompt], self.device)\n context_null = self.text_encoder([n_prompt], self.device)\n if offload_model:\n self.text_encoder.model.cpu()\n else:\n context = self.text_encoder([input_prompt], torch.device('cpu'))\n context_null = self.text_encoder([n_prompt], torch.device('cpu'))\n context = [t.to(self.device) for t in context]\n context_null = [t.to(self.device) for t in context_null]\n\n self.clip.model.to(self.device)\n clip_context = self.clip.visual([img[:, None, :, :]])\n if offload_model:\n self.clip.model.cpu()\n\n y = self.vae.encode([\n torch.concat([\n torch.nn.functional.interpolate(\n img[None].cpu(), size=(h, w), mode='bicubic').transpose(\n 0, 1),\n torch.zeros(3, F - 1, h, w)\n ],\n dim=1).to(self.device)\n ])[0]\n y = torch.concat([msk, y])\n\n @contextmanager\n def noop_no_sync():\n yield\n\n no_sync = getattr(self.model, 'no_sync', noop_no_sync)\n\n # evaluation mode\n with amp.autocast(dtype=self.param_dtype), torch.no_grad(), no_sync():\n\n if sample_solver == 'unipc':\n sample_scheduler = FlowUniPCMultistepScheduler(\n num_train_timesteps=self.num_train_timesteps,\n shift=1,\n use_dynamic_shifting=False)\n sample_scheduler.set_timesteps(\n sampling_steps, device=self.device, shift=shift)\n timesteps = sample_scheduler.timesteps\n elif sample_solver == 'dpm++':\n sample_scheduler = FlowDPMSolverMultistepScheduler(\n num_train_timesteps=self.num_train_timesteps,\n shift=1,\n use_dynamic_shifting=False)\n sampling_sigmas = get_sampling_sigmas(sampling_steps, shift)\n timesteps, _ = retrieve_timesteps(\n sample_scheduler,\n device=self.device,\n sigmas=sampling_sigmas)\n else:\n raise NotImplementedError(\"Unsupported solver.\")\n\n # sample videos\n latent = noise\n\n arg_c = {\n 'context': [context[0]],\n 'clip_fea': clip_context,\n 'seq_len': max_seq_len,\n 'y': [y],\n }\n\n arg_null = {\n 'context': context_null,\n 'clip_fea': clip_context,\n 'seq_len': max_seq_len,\n 'y': [y],\n }\n\n if offload_model:\n torch.cuda.empty_cache()\n\n self.model.to(self.device)\n for _, t in enumerate(tqdm(timesteps)):\n latent_model_input = [latent.to(self.device)]\n timestep = [t]\n\n timestep = torch.stack(timestep).to(self.device)\n\n noise_pred_cond = self.model(\n latent_model_input, t=timestep, **arg_c)[0].to(\n torch.device('cpu') if offload_model else self.device)\n if offload_model:\n torch.cuda.empty_cache()\n noise_pred_uncond = self.model(\n latent_model_input, t=timestep, **arg_null)[0].to(\n torch.device('cpu') if offload_model else self.device)\n if offload_model:\n torch.cuda.empty_cache()\n noise_pred = noise_pred_uncond + guide_scale * (\n noise_pred_cond - noise_pred_uncond)\n\n latent = latent.to(\n torch.device('cpu') if offload_model else self.device)\n\n temp_x0 = sample_scheduler.step(\n noise_pred.unsqueeze(0),\n t,\n latent.unsqueeze(0),\n return_dict=False,\n generator=seed_g)[0]\n latent = temp_x0.squeeze(0)\n\n x0 = [latent.to(self.device)]\n del latent_model_input, timestep\n\n if offload_model:\n self.model.cpu()\n torch.cuda.empty_cache()\n\n if self.rank == 0:\n videos = self.vae.decode(x0)\n\n del noise, latent\n del sample_scheduler\n if offload_model:\n gc.collect()\n torch.cuda.synchronize()\n if dist.is_initialized():\n dist.barrier()\n\n return videos[0] if self.rank == 0 else None", "creation_date": "2025-02-25T14:07:47Z", "repo": "Wan-Video/Wan2.1", "file_path": "wan/image2video.py", "stars": 12952, "label": 0} +{"function": " def noop_no_sync():\n yield", "creation_date": "2025-02-25T14:07:47Z", "repo": "Wan-Video/Wan2.1", "file_path": "wan/image2video.py", "stars": 12952, "label": 0} +{"function": "def get_release():\n \"\"\"\n Get the current release of the application\n \"\"\"\n try:\n with open(os.path.join(BASE_DIR, \"pyproject.toml\"), \"rb\") as f:\n pyproject_data = tomllib.load(f)\n return pyproject_data[\"project\"][\"version\"]\n except (FileNotFoundError, KeyError):\n return \"NA\" # Default: not available", "creation_date": "2024-03-08T07:53:56Z", "repo": "suitenumerique/docs", "file_path": "src/backend/impress/settings.py", "stars": 12770, "label": 0} +{"function": " def ENVIRONMENT(self):\n \"\"\"Environment in which the application is launched.\"\"\"\n return self.__class__.__name__.lower()", "creation_date": "2024-03-08T07:53:56Z", "repo": "suitenumerique/docs", "file_path": "src/backend/impress/settings.py", "stars": 12770, "label": 0} +{"function": " def RELEASE(self):\n \"\"\"\n Return the release information.\n\n Delegate to the module function to enable easier testing.\n \"\"\"\n return get_release()", "creation_date": "2024-03-08T07:53:56Z", "repo": "suitenumerique/docs", "file_path": "src/backend/impress/settings.py", "stars": 12770, "label": 0} +{"function": " def PARLER_LANGUAGES(self):\n \"\"\"\n Return languages for Parler computed from the LANGUAGES and LANGUAGE_CODE settings.\n \"\"\"\n return {\n self.SITE_ID: tuple({\"code\": code} for code, _name in self.LANGUAGES),\n \"default\": {\n \"fallbacks\": [self.LANGUAGE_CODE],\n \"hide_untranslated\": False,\n },\n }", "creation_date": "2024-03-08T07:53:56Z", "repo": "suitenumerique/docs", "file_path": "src/backend/impress/settings.py", "stars": 12770, "label": 0} +{"function": " def post_setup(cls):\n \"\"\"Post setup configuration.\n This is the place where you can configure settings that require other\n settings to be loaded.\n \"\"\"\n super().post_setup()\n\n # The SENTRY_DSN setting should be available to activate sentry for an environment\n if cls.SENTRY_DSN is not None:\n sentry_sdk.init(\n dsn=cls.SENTRY_DSN,\n environment=cls.__name__.lower(),\n release=get_release(),\n integrations=[DjangoIntegration()],\n )\n sentry_sdk.set_tag(\"application\", \"backend\")\n\n # Ignore the logs added by the DockerflowMiddleware\n ignore_logger(\"request.summary\")\n\n if (\n cls.OIDC_FALLBACK_TO_EMAIL_FOR_IDENTIFICATION\n and cls.OIDC_ALLOW_DUPLICATE_EMAILS\n ):\n raise ValueError(\n \"Both OIDC_FALLBACK_TO_EMAIL_FOR_IDENTIFICATION and \"\n \"OIDC_ALLOW_DUPLICATE_EMAILS cannot be set to True simultaneously. \"\n )", "creation_date": "2024-03-08T07:53:56Z", "repo": "suitenumerique/docs", "file_path": "src/backend/impress/settings.py", "stars": 12770, "label": 0} +{"function": " def __init__(self):\n # pylint: disable=invalid-name\n self.INSTALLED_APPS += [\"django_extensions\", \"drf_spectacular_sidecar\"]", "creation_date": "2024-03-08T07:53:56Z", "repo": "suitenumerique/docs", "file_path": "src/backend/impress/settings.py", "stars": 12770, "label": 0} +{"function": " def __init__(self):\n # pylint: disable=invalid-name\n self.INSTALLED_APPS += [\"drf_spectacular_sidecar\"]", "creation_date": "2024-03-08T07:53:56Z", "repo": "suitenumerique/docs", "file_path": "src/backend/impress/settings.py", "stars": 12770, "label": 0} +{"function": "def test_commands_create_demo():\n \"\"\"The create_demo management command should create objects as expected.\"\"\"\n call_command(\"create_demo\")\n\n assert models.Template.objects.count() == 1\n assert models.User.objects.count() >= 10\n assert models.Document.objects.count() >= 10\n assert models.DocumentAccess.objects.count() > 10\n\n # assert dev users have doc accesses\n user = models.User.objects.get(email=\"impress@impress.world\")\n assert models.DocumentAccess.objects.filter(user=user).exists()\n user = models.User.objects.get(email=\"user@webkit.test\")\n assert models.DocumentAccess.objects.filter(user=user).exists()\n user = models.User.objects.get(email=\"user@firefox.test\")\n assert models.DocumentAccess.objects.filter(user=user).exists()\n user = models.User.objects.get(email=\"user@chromium.test\")\n assert models.DocumentAccess.objects.filter(user=user).exists()", "creation_date": "2024-04-11T07:21:35Z", "repo": "suitenumerique/docs", "file_path": "src/backend/demo/tests/test_commands_create_demo.py", "stars": 12770, "label": 0} +{"function": "def random_true_with_probability(probability):\n \"\"\"return True with the requested probability, False otherwise.\"\"\"\n return random.random() < probability", "creation_date": "2024-04-11T07:21:35Z", "repo": "suitenumerique/docs", "file_path": "src/backend/demo/management/commands/create_demo.py", "stars": 12770, "label": 0} +{"function": "def create_demo(stdout):\n \"\"\"\n Create a database with demo data for developers to work in a realistic environment.\n The code is engineered to create a huge number of objects fast.\n \"\"\"\n\n queue = BulkQueue(stdout)\n\n with Timeit(stdout, \"Creating users\"):\n name_size = int(math.sqrt(defaults.NB_OBJECTS[\"users\"]))\n first_names = [fake.first_name() for _ in range(name_size)]\n last_names = [fake.last_name() for _ in range(name_size)]\n for i in range(defaults.NB_OBJECTS[\"users\"]):\n first_name = random.choice(first_names)\n queue.push(\n models.User(\n admin_email=f\"user{i:d}@example.com\",\n email=f\"user{i:d}@example.com\",\n password=\"!\",\n is_superuser=False,\n is_active=True,\n is_staff=False,\n short_name=first_name,\n full_name=f\"{first_name:s} {random.choice(last_names):s}\",\n language=random.choice(settings.LANGUAGES)[0],\n )\n )\n queue.flush()\n\n users_ids = list(models.User.objects.values_list(\"id\", flat=True))\n\n with Timeit(stdout, \"Creating documents\"):\n for i in range(defaults.NB_OBJECTS[\"docs\"]):\n # pylint: disable=protected-access\n key = models.Document._int2str(i) # noqa: SLF001\n padding = models.Document.alphabet[0] * (models.Document.steplen - len(key))\n queue.push(\n models.Document(\n depth=1,\n path=f\"{padding}{key}\",\n creator_id=random.choice(users_ids),\n title=fake.sentence(nb_words=4),\n link_reach=models.LinkReachChoices.AUTHENTICATED\n if random_true_with_probability(0.5)\n else random.choice(models.LinkReachChoices.values),\n )\n )\n\n queue.flush()\n\n with Timeit(stdout, \"Creating docs accesses\"):\n docs_ids = list(models.Document.objects.values_list(\"id\", flat=True))\n for doc_id in docs_ids:\n for user_id in random.sample(\n users_ids,\n random.randint(1, defaults.NB_OBJECTS[\"max_users_per_document\"]),\n ):\n role = random.choice(models.RoleChoices.choices)\n queue.push(\n models.DocumentAccess(\n document_id=doc_id, user_id=user_id, role=role[0]\n )\n )\n queue.flush()\n\n with Timeit(stdout, \"Creating development users\"):\n for dev_user in defaults.DEV_USERS:\n queue.push(\n models.User(\n admin_email=dev_user[\"email\"],\n email=dev_user[\"email\"],\n sub=dev_user[\"email\"],\n password=\"!\",\n is_superuser=False,\n is_active=True,\n is_staff=False,\n language=dev_user[\"language\"]\n or random.choice(settings.LANGUAGES)[0],\n )\n )\n\n queue.flush()\n\n with Timeit(stdout, \"Creating docs accesses on development users\"):\n for dev_user in defaults.DEV_USERS:\n docs_ids = list(models.Document.objects.values_list(\"id\", flat=True))\n user_id = models.User.objects.get(email=dev_user[\"email\"]).id\n\n for doc_id in docs_ids:\n role = random.choice(models.RoleChoices.choices)\n queue.push(\n models.DocumentAccess(\n document_id=doc_id, user_id=user_id, role=role[0]\n )\n )\n\n queue.flush()\n\n with Timeit(stdout, \"Creating Template\"):\n with open(\n file=\"demo/data/template/code.txt\", mode=\"r\", encoding=\"utf-8\"\n ) as text_file:\n code_data = text_file.read()\n\n with open(\n file=\"demo/data/template/css.txt\", mode=\"r\", encoding=\"utf-8\"\n ) as text_file:\n css_data = text_file.read()\n\n queue.push(\n models.Template(\n id=\"baca9e2a-59fb-42ef-b5c6-6f6b05637111\",\n title=\"Demo Template\",\n description=\"This is the demo template\",\n code=code_data,\n css=css_data,\n is_public=True,\n )\n )\n queue.flush()", "creation_date": "2024-04-11T07:21:35Z", "repo": "suitenumerique/docs", "file_path": "src/backend/demo/management/commands/create_demo.py", "stars": 12770, "label": 0} +{"function": "def test_lucene_sanitize():\n # Call the function with test data\n queries = [\n (\n 'This has every escape character + - && || ! ( ) { } [ ] ^ \" ~ * ? : \\\\ /',\n '\\\\This has every escape character \\\\+ \\\\- \\\\&\\\\& \\\\|\\\\| \\\\! \\\\( \\\\) \\\\{ \\\\} \\\\[ \\\\] \\\\^ \\\\\" \\\\~ \\\\* \\\\? \\\\: \\\\\\\\ \\\\/',\n ),\n ('this has no escape characters', 'this has no escape characters'),\n ]\n\n for query, assert_result in queries:\n result = lucene_sanitize(query)\n assert assert_result == result", "creation_date": "2024-10-03T14:08:30Z", "repo": "getzep/graphiti", "file_path": "tests/helpers_test.py", "stars": 12768, "label": 0} +{"function": "async def test_exclude_default_entity_type():\n \"\"\"Test excluding the default 'Entity' type while keeping custom types.\"\"\"\n graphiti = Graphiti(NEO4J_URI, NEO4J_USER, NEO4J_PASSWORD)\n\n try:\n await graphiti.build_indices_and_constraints()\n\n # Define entity types but exclude the default 'Entity' type\n entity_types = {\n 'Person': Person,\n 'Organization': Organization,\n }\n\n # Add an episode that would normally create both Entity and custom type entities\n episode_content = (\n 'John Smith works at Acme Corporation in New York. The weather is nice today.'\n )\n\n result = await graphiti.add_episode(\n name='Business Meeting',\n episode_body=episode_content,\n source_description='Meeting notes',\n reference_time=datetime.now(timezone.utc),\n entity_types=entity_types,\n excluded_entity_types=['Entity'], # Exclude default type\n group_id='test_exclude_default',\n )\n\n # Verify that nodes were created (custom types should still work)\n assert result is not None\n\n # Search for nodes to verify only custom types were created\n search_results = await graphiti.search_(\n query='John Smith Acme Corporation', group_ids=['test_exclude_default']\n )\n\n # Check that entities were created but with specific types, not default 'Entity'\n found_nodes = search_results.nodes\n for node in found_nodes:\n assert 'Entity' in node.labels # All nodes should have Entity label\n # But they should also have specific type labels\n assert any(label in ['Person', 'Organization'] for label in node.labels), (\n f'Node {node.name} should have a specific type label, got: {node.labels}'\n )\n\n # Clean up\n await _cleanup_test_nodes(graphiti, 'test_exclude_default')\n\n finally:\n await graphiti.close()", "creation_date": "2025-06-27T03:54:43Z", "repo": "getzep/graphiti", "file_path": "tests/test_entity_exclusion_int.py", "stars": 12768, "label": 0} +{"function": "async def test_exclude_specific_custom_types():\n \"\"\"Test excluding specific custom entity types while keeping others.\"\"\"\n graphiti = Graphiti(NEO4J_URI, NEO4J_USER, NEO4J_PASSWORD)\n\n try:\n await graphiti.build_indices_and_constraints()\n\n # Define multiple entity types\n entity_types = {\n 'Person': Person,\n 'Organization': Organization,\n 'Location': Location,\n }\n\n # Add an episode with content that would create all types\n episode_content = (\n 'Sarah Johnson from Google visited the San Francisco office to discuss the new project.'\n )\n\n result = await graphiti.add_episode(\n name='Office Visit',\n episode_body=episode_content,\n source_description='Visit report',\n reference_time=datetime.now(timezone.utc),\n entity_types=entity_types,\n excluded_entity_types=['Organization', 'Location'], # Exclude these types\n group_id='test_exclude_custom',\n )\n\n assert result is not None\n\n # Search for nodes to verify only Person and Entity types were created\n search_results = await graphiti.search_(\n query='Sarah Johnson Google San Francisco', group_ids=['test_exclude_custom']\n )\n\n found_nodes = search_results.nodes\n\n # Should have Person and Entity type nodes, but no Organization or Location\n for node in found_nodes:\n assert 'Entity' in node.labels\n # Should not have excluded types\n assert 'Organization' not in node.labels, (\n f'Found excluded Organization in node: {node.name}'\n )\n assert 'Location' not in node.labels, f'Found excluded Location in node: {node.name}'\n\n # Should find at least one Person entity (Sarah Johnson)\n person_nodes = [n for n in found_nodes if 'Person' in n.labels]\n assert len(person_nodes) > 0, 'Should have found at least one Person entity'\n\n # Clean up\n await _cleanup_test_nodes(graphiti, 'test_exclude_custom')\n\n finally:\n await graphiti.close()", "creation_date": "2025-06-27T03:54:43Z", "repo": "getzep/graphiti", "file_path": "tests/test_entity_exclusion_int.py", "stars": 12768, "label": 0} +{"function": "async def test_exclude_all_types():\n \"\"\"Test excluding all entity types (edge case).\"\"\"\n graphiti = Graphiti(NEO4J_URI, NEO4J_USER, NEO4J_PASSWORD)\n\n try:\n await graphiti.build_indices_and_constraints()\n\n entity_types = {\n 'Person': Person,\n 'Organization': Organization,\n }\n\n # Exclude all types\n result = await graphiti.add_episode(\n name='No Entities',\n episode_body='This text mentions John and Microsoft but no entities should be created.',\n source_description='Test content',\n reference_time=datetime.now(timezone.utc),\n entity_types=entity_types,\n excluded_entity_types=['Entity', 'Person', 'Organization'], # Exclude everything\n group_id='test_exclude_all',\n )\n\n assert result is not None\n\n # Search for nodes - should find very few or none from this episode\n search_results = await graphiti.search_(\n query='John Microsoft', group_ids=['test_exclude_all']\n )\n\n # There should be minimal to no entities created\n found_nodes = search_results.nodes\n assert len(found_nodes) == 0, (\n f'Expected no entities, but found: {[n.name for n in found_nodes]}'\n )\n\n # Clean up\n await _cleanup_test_nodes(graphiti, 'test_exclude_all')\n\n finally:\n await graphiti.close()", "creation_date": "2025-06-27T03:54:43Z", "repo": "getzep/graphiti", "file_path": "tests/test_entity_exclusion_int.py", "stars": 12768, "label": 0} +{"function": "async def test_exclude_no_types():\n \"\"\"Test normal behavior when no types are excluded (baseline test).\"\"\"\n graphiti = Graphiti(NEO4J_URI, NEO4J_USER, NEO4J_PASSWORD)\n\n try:\n await graphiti.build_indices_and_constraints()\n\n entity_types = {\n 'Person': Person,\n 'Organization': Organization,\n }\n\n # Don't exclude any types\n result = await graphiti.add_episode(\n name='Normal Behavior',\n episode_body='Alice Smith works at TechCorp.',\n source_description='Normal test',\n reference_time=datetime.now(timezone.utc),\n entity_types=entity_types,\n excluded_entity_types=None, # No exclusions\n group_id='test_exclude_none',\n )\n\n assert result is not None\n\n # Search for nodes - should find entities of all types\n search_results = await graphiti.search_(\n query='Alice Smith TechCorp', group_ids=['test_exclude_none']\n )\n\n found_nodes = search_results.nodes\n assert len(found_nodes) > 0, 'Should have found some entities'\n\n # Should have both Person and Organization entities\n person_nodes = [n for n in found_nodes if 'Person' in n.labels]\n org_nodes = [n for n in found_nodes if 'Organization' in n.labels]\n\n assert len(person_nodes) > 0, 'Should have found Person entities'\n assert len(org_nodes) > 0, 'Should have found Organization entities'\n\n # Clean up\n await _cleanup_test_nodes(graphiti, 'test_exclude_none')\n\n finally:\n await graphiti.close()", "creation_date": "2025-06-27T03:54:43Z", "repo": "getzep/graphiti", "file_path": "tests/test_entity_exclusion_int.py", "stars": 12768, "label": 0} +{"function": "def test_validation_valid_excluded_types():\n \"\"\"Test validation function with valid excluded types.\"\"\"\n entity_types = {\n 'Person': Person,\n 'Organization': Organization,\n }\n\n # Valid exclusions\n assert validate_excluded_entity_types(['Entity'], entity_types) is True\n assert validate_excluded_entity_types(['Person'], entity_types) is True\n assert validate_excluded_entity_types(['Entity', 'Person'], entity_types) is True\n assert validate_excluded_entity_types(None, entity_types) is True\n assert validate_excluded_entity_types([], entity_types) is True", "creation_date": "2025-06-27T03:54:43Z", "repo": "getzep/graphiti", "file_path": "tests/test_entity_exclusion_int.py", "stars": 12768, "label": 0} +{"function": "def test_validation_invalid_excluded_types():\n \"\"\"Test validation function with invalid excluded types.\"\"\"\n entity_types = {\n 'Person': Person,\n 'Organization': Organization,\n }\n\n # Invalid exclusions should raise ValueError\n with pytest.raises(ValueError, match='Invalid excluded entity types'):\n validate_excluded_entity_types(['InvalidType'], entity_types)\n\n with pytest.raises(ValueError, match='Invalid excluded entity types'):\n validate_excluded_entity_types(['Person', 'NonExistentType'], entity_types)", "creation_date": "2025-06-27T03:54:43Z", "repo": "getzep/graphiti", "file_path": "tests/test_entity_exclusion_int.py", "stars": 12768, "label": 0} +{"function": "async def test_excluded_types_parameter_validation_in_add_episode():\n \"\"\"Test that add_episode validates excluded_entity_types parameter.\"\"\"\n graphiti = Graphiti(NEO4J_URI, NEO4J_USER, NEO4J_PASSWORD)\n\n try:\n entity_types = {\n 'Person': Person,\n }\n\n # Should raise ValueError for invalid excluded type\n with pytest.raises(ValueError, match='Invalid excluded entity types'):\n await graphiti.add_episode(\n name='Invalid Test',\n episode_body='Test content',\n source_description='Test',\n reference_time=datetime.now(timezone.utc),\n entity_types=entity_types,\n excluded_entity_types=['NonExistentType'],\n group_id='test_validation',\n )\n\n finally:\n await graphiti.close()", "creation_date": "2025-06-27T03:54:43Z", "repo": "getzep/graphiti", "file_path": "tests/test_entity_exclusion_int.py", "stars": 12768, "label": 0} +{"function": "async def _cleanup_test_nodes(graphiti: Graphiti, group_id: str):\n \"\"\"Helper function to clean up test nodes.\"\"\"\n try:\n # Get all nodes for this group\n search_results = await graphiti.search_(query='*', group_ids=[group_id])\n\n # Delete all found nodes\n for node in search_results.nodes:\n await node.delete(graphiti.driver)\n\n except Exception as e:\n # Log but don't fail the test if cleanup fails\n print(f'Warning: Failed to clean up test nodes for group {group_id}: {e}')", "creation_date": "2025-06-27T03:54:43Z", "repo": "getzep/graphiti", "file_path": "tests/test_entity_exclusion_int.py", "stars": 12768, "label": 0} +{"function": "def setup_logging():\n # Create a logger\n logger = logging.getLogger()\n logger.setLevel(logging.INFO) # Set the logging level to INFO\n\n # Create console handler and set level to INFO\n console_handler = logging.StreamHandler(sys.stdout)\n console_handler.setLevel(logging.INFO)\n\n # Create formatter\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n\n # Add formatter to console handler\n console_handler.setFormatter(formatter)\n\n # Add console handler to logger\n logger.addHandler(console_handler)\n\n return logger", "creation_date": "2025-06-30T15:01:44Z", "repo": "getzep/graphiti", "file_path": "tests/test_graphiti_falkordb_int.py", "stars": 12768, "label": 0} +{"function": " def __init__(\n self,\n model=\"F5TTS_v1_Base\",\n ckpt_file=\"\",\n vocab_file=\"\",\n ode_method=\"euler\",\n use_ema=True,\n vocoder_local_path=None,\n device=None,\n hf_cache_dir=None,\n ):\n model_cfg = OmegaConf.load(str(files(\"f5_tts\").joinpath(f\"configs/{model}.yaml\")))\n model_cls = get_class(f\"f5_tts.model.{model_cfg.model.backbone}\")\n model_arc = model_cfg.model.arch\n\n self.mel_spec_type = model_cfg.model.mel_spec.mel_spec_type\n self.target_sample_rate = model_cfg.model.mel_spec.target_sample_rate\n\n self.ode_method = ode_method\n self.use_ema = use_ema\n\n if device is not None:\n self.device = device\n else:\n import torch\n\n self.device = (\n \"cuda\"\n if torch.cuda.is_available()\n else \"xpu\"\n if torch.xpu.is_available()\n else \"mps\"\n if torch.backends.mps.is_available()\n else \"cpu\"\n )\n\n # Load models\n self.vocoder = load_vocoder(\n self.mel_spec_type, vocoder_local_path is not None, vocoder_local_path, self.device, hf_cache_dir\n )\n\n repo_name, ckpt_step, ckpt_type = \"F5-TTS\", 1250000, \"safetensors\"\n\n # override for previous models\n if model == \"F5TTS_Base\":\n if self.mel_spec_type == \"vocos\":\n ckpt_step = 1200000\n elif self.mel_spec_type == \"bigvgan\":\n model = \"F5TTS_Base_bigvgan\"\n ckpt_type = \"pt\"\n elif model == \"E2TTS_Base\":\n repo_name = \"E2-TTS\"\n ckpt_step = 1200000\n\n if not ckpt_file:\n ckpt_file = str(\n cached_path(f\"hf://SWivid/{repo_name}/{model}/model_{ckpt_step}.{ckpt_type}\", cache_dir=hf_cache_dir)\n )\n self.ema_model = load_model(\n model_cls, model_arc, ckpt_file, self.mel_spec_type, vocab_file, self.ode_method, self.use_ema, self.device\n )", "creation_date": "2024-10-23T13:07:59Z", "repo": "SWivid/F5-TTS", "file_path": "src/f5_tts/api.py", "stars": 12625, "label": 0} +{"function": " def transcribe(self, ref_audio, language=None):\n return transcribe(ref_audio, language)", "creation_date": "2024-10-23T13:07:59Z", "repo": "SWivid/F5-TTS", "file_path": "src/f5_tts/api.py", "stars": 12625, "label": 0} +{"function": " def export_wav(self, wav, file_wave, remove_silence=False):\n sf.write(file_wave, wav, self.target_sample_rate)\n\n if remove_silence:\n remove_silence_for_generated_wav(file_wave)", "creation_date": "2024-10-23T13:07:59Z", "repo": "SWivid/F5-TTS", "file_path": "src/f5_tts/api.py", "stars": 12625, "label": 0} +{"function": " def export_spectrogram(self, spec, file_spec):\n save_spectrogram(spec, file_spec)", "creation_date": "2024-10-23T13:07:59Z", "repo": "SWivid/F5-TTS", "file_path": "src/f5_tts/api.py", "stars": 12625, "label": 0} +{"function": " def infer(\n self,\n ref_file,\n ref_text,\n gen_text,\n show_info=print,\n progress=tqdm,\n target_rms=0.1,\n cross_fade_duration=0.15,\n sway_sampling_coef=-1,\n cfg_strength=2,\n nfe_step=32,\n speed=1.0,\n fix_duration=None,\n remove_silence=False,\n file_wave=None,\n file_spec=None,\n seed=None,\n ):\n if seed is None:\n seed = random.randint(0, sys.maxsize)\n seed_everything(seed)\n self.seed = seed\n\n ref_file, ref_text = preprocess_ref_audio_text(ref_file, ref_text)\n\n wav, sr, spec = infer_process(\n ref_file,\n ref_text,\n gen_text,\n self.ema_model,\n self.vocoder,\n self.mel_spec_type,\n show_info=show_info,\n progress=progress,\n target_rms=target_rms,\n cross_fade_duration=cross_fade_duration,\n nfe_step=nfe_step,\n cfg_strength=cfg_strength,\n sway_sampling_coef=sway_sampling_coef,\n speed=speed,\n fix_duration=fix_duration,\n device=self.device,\n )\n\n if file_wave is not None:\n self.export_wav(wav, file_wave, remove_silence)\n\n if file_spec is not None:\n self.export_spectrogram(spec, file_spec)\n\n return wav, sr, spec", "creation_date": "2024-10-23T13:07:59Z", "repo": "SWivid/F5-TTS", "file_path": "src/f5_tts/api.py", "stars": 12625, "label": 0} +{"function": "async def listen_to_F5TTS(text, server_ip=\"localhost\", server_port=9998):\n client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n await asyncio.get_event_loop().run_in_executor(None, client_socket.connect, (server_ip, int(server_port)))\n\n start_time = time.time()\n first_chunk_time = None\n\n async def play_audio_stream():\n nonlocal first_chunk_time\n p = pyaudio.PyAudio()\n stream = p.open(format=pyaudio.paFloat32, channels=1, rate=24000, output=True, frames_per_buffer=2048)\n\n try:\n while True:\n data = await asyncio.get_event_loop().run_in_executor(None, client_socket.recv, 8192)\n if not data:\n break\n if data == b\"END\":\n logger.info(\"End of audio received.\")\n break\n\n audio_array = np.frombuffer(data, dtype=np.float32)\n stream.write(audio_array.tobytes())\n\n if first_chunk_time is None:\n first_chunk_time = time.time()\n\n finally:\n stream.stop_stream()\n stream.close()\n p.terminate()\n\n logger.info(f\"Total time taken: {time.time() - start_time:.4f} seconds\")\n\n try:\n data_to_send = f\"{text}\".encode(\"utf-8\")\n await asyncio.get_event_loop().run_in_executor(None, client_socket.sendall, data_to_send)\n await play_audio_stream()\n\n except Exception as e:\n logger.error(f\"Error in listen_to_F5TTS: {e}\")\n\n finally:\n client_socket.close()", "creation_date": "2025-03-12T09:23:10Z", "repo": "SWivid/F5-TTS", "file_path": "src/f5_tts/socket_client.py", "stars": 12625, "label": 0} +{"function": " async def play_audio_stream():\n nonlocal first_chunk_time\n p = pyaudio.PyAudio()\n stream = p.open(format=pyaudio.paFloat32, channels=1, rate=24000, output=True, frames_per_buffer=2048)\n\n try:\n while True:\n data = await asyncio.get_event_loop().run_in_executor(None, client_socket.recv, 8192)\n if not data:\n break\n if data == b\"END\":\n logger.info(\"End of audio received.\")\n break\n\n audio_array = np.frombuffer(data, dtype=np.float32)\n stream.write(audio_array.tobytes())\n\n if first_chunk_time is None:\n first_chunk_time = time.time()\n\n finally:\n stream.stop_stream()\n stream.close()\n p.terminate()\n\n logger.info(f\"Total time taken: {time.time() - start_time:.4f} seconds\")", "creation_date": "2025-03-12T09:23:10Z", "repo": "SWivid/F5-TTS", "file_path": "src/f5_tts/socket_client.py", "stars": 12625, "label": 0} +{"function": "def handle_client(conn, processor):\n try:\n with conn:\n conn.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)\n while True:\n data = conn.recv(1024)\n if not data:\n processor.first_package = True\n break\n data_str = data.decode(\"utf-8\").strip()\n logger.info(f\"Received text: {data_str}\")\n\n try:\n processor.generate_stream(data_str, conn)\n except Exception as inner_e:\n logger.error(f\"Error during processing: {inner_e}\")\n traceback.print_exc()\n break\n except Exception as e:\n logger.error(f\"Error handling client: {e}\")\n traceback.print_exc()", "creation_date": "2024-11-04T08:48:09Z", "repo": "SWivid/F5-TTS", "file_path": "src/f5_tts/socket_server.py", "stars": 12625, "label": 0} +{"function": "def start_server(host, port, processor):\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.bind((host, port))\n s.listen()\n logger.info(f\"Server started on {host}:{port}\")\n while True:\n conn, addr = s.accept()\n logger.info(f\"Connected by {addr}\")\n handle_client(conn, processor)", "creation_date": "2024-11-04T08:48:09Z", "repo": "SWivid/F5-TTS", "file_path": "src/f5_tts/socket_server.py", "stars": 12625, "label": 0} +{"function": " def __init__(self, output_file, sampling_rate):\n super().__init__()\n self.output_file = output_file\n self.sampling_rate = sampling_rate\n self.queue = queue.Queue()\n self.stop_event = threading.Event()\n self.audio_data = []", "creation_date": "2024-11-04T08:48:09Z", "repo": "SWivid/F5-TTS", "file_path": "src/f5_tts/socket_server.py", "stars": 12625, "label": 0} +{"function": "def setup_span_processor():\n set_trace_processors([SPAN_PROCESSOR_TESTING])", "creation_date": "2025-03-11T16:42:28Z", "repo": "openai/openai-agents-python", "file_path": "tests/conftest.py", "stars": 12537, "label": 0} +{"function": "def clear_span_processor():\n SPAN_PROCESSOR_TESTING.force_flush()\n SPAN_PROCESSOR_TESTING.shutdown()\n SPAN_PROCESSOR_TESTING.clear()", "creation_date": "2025-03-11T16:42:28Z", "repo": "openai/openai-agents-python", "file_path": "tests/conftest.py", "stars": 12537, "label": 0} +{"function": "def clear_openai_settings():\n _openai_shared._default_openai_key = None\n _openai_shared._default_openai_client = None\n _openai_shared._use_responses_by_default = True", "creation_date": "2025-03-11T16:42:28Z", "repo": "openai/openai-agents-python", "file_path": "tests/conftest.py", "stars": 12537, "label": 0} +{"function": "def clear_default_runner():\n set_default_agent_runner(None)", "creation_date": "2025-03-11T16:42:28Z", "repo": "openai/openai-agents-python", "file_path": "tests/conftest.py", "stars": 12537, "label": 0} +{"function": "def shutdown_trace_provider():\n yield\n get_trace_provider().shutdown()", "creation_date": "2025-03-11T16:42:28Z", "repo": "openai/openai-agents-python", "file_path": "tests/conftest.py", "stars": 12537, "label": 0} +{"function": "def disable_real_model_clients(monkeypatch, request):\n # If the test is marked to allow the method call, don't override it.\n if request.node.get_closest_marker(\"allow_call_model_methods\"):\n return\n\n def failing_version(*args, **kwargs):\n pytest.fail(\"Real models should not be used in tests!\")\n\n monkeypatch.setattr(OpenAIResponsesModel, \"get_response\", failing_version)\n monkeypatch.setattr(OpenAIResponsesModel, \"stream_response\", failing_version)\n monkeypatch.setattr(OpenAIChatCompletionsModel, \"get_response\", failing_version)\n monkeypatch.setattr(OpenAIChatCompletionsModel, \"stream_response\", failing_version)", "creation_date": "2025-03-11T16:42:28Z", "repo": "openai/openai-agents-python", "file_path": "tests/conftest.py", "stars": 12537, "label": 0} +{"function": " def failing_version(*args, **kwargs):\n pytest.fail(\"Real models should not be used in tests!\")", "creation_date": "2025-03-11T16:42:28Z", "repo": "openai/openai-agents-python", "file_path": "tests/conftest.py", "stars": 12537, "label": 0} +{"function": "def get_response_obj(\n output: list[TResponseOutputItem],\n response_id: str | None = None,\n usage: Usage | None = None,\n) -> Response:\n return Response(\n id=response_id or \"123\",\n created_at=123,\n model=\"test_model\",\n object=\"response\",\n output=output,\n tool_choice=\"none\",\n tools=[],\n top_p=None,\n parallel_tool_calls=False,\n usage=ResponseUsage(\n input_tokens=usage.input_tokens if usage else 0,\n output_tokens=usage.output_tokens if usage else 0,\n total_tokens=usage.total_tokens if usage else 0,\n input_tokens_details=InputTokensDetails(cached_tokens=0),\n output_tokens_details=OutputTokensDetails(reasoning_tokens=0),\n ),\n )", "creation_date": "2025-03-11T16:42:28Z", "repo": "openai/openai-agents-python", "file_path": "tests/fake_model.py", "stars": 12537, "label": 0} +{"function": " def __init__(\n self,\n tracing_enabled: bool = False,\n initial_output: list[TResponseOutputItem] | Exception | None = None,\n ):\n if initial_output is None:\n initial_output = []\n self.turn_outputs: list[list[TResponseOutputItem] | Exception] = (\n [initial_output] if initial_output else []\n )\n self.tracing_enabled = tracing_enabled\n self.last_turn_args: dict[str, Any] = {}\n self.hardcoded_usage: Usage | None = None", "creation_date": "2025-03-11T16:42:28Z", "repo": "openai/openai-agents-python", "file_path": "tests/fake_model.py", "stars": 12537, "label": 0} +{"function": " def set_hardcoded_usage(self, usage: Usage):\n self.hardcoded_usage = usage", "creation_date": "2025-03-11T16:42:28Z", "repo": "openai/openai-agents-python", "file_path": "tests/fake_model.py", "stars": 12537, "label": 0} +{"function": "def print_banner() -> None:\n \"\"\"\n Prints the introductory ASCII Art Banner.\n\n Returns:\n None\n \"\"\"\n with open(f\"{ROOT_DIR}/assets/banner.txt\", \"r\") as file:\n print(colored(file.read(), \"green\"))", "creation_date": "2024-02-12T11:20:45Z", "repo": "FujiwaraChoki/MoneyPrinterV2", "file_path": "src/art.py", "stars": 12372, "label": 0} +{"function": "def get_cache_path() -> str:\n \"\"\"\n Gets the path to the cache file.\n\n Returns:\n path (str): The path to the cache folder\n \"\"\"\n return os.path.join(ROOT_DIR, '.mp')", "creation_date": "2024-02-12T11:20:45Z", "repo": "FujiwaraChoki/MoneyPrinterV2", "file_path": "src/cache.py", "stars": 12372, "label": 0} +{"function": "def get_afm_cache_path() -> str:\n \"\"\"\n Gets the path to the Affiliate Marketing cache file.\n\n Returns:\n path (str): The path to the AFM cache folder\n \"\"\"\n return os.path.join(get_cache_path(), 'afm.json')", "creation_date": "2024-02-12T11:20:45Z", "repo": "FujiwaraChoki/MoneyPrinterV2", "file_path": "src/cache.py", "stars": 12372, "label": 0} +{"function": "def get_twitter_cache_path() -> str:\n \"\"\"\n Gets the path to the Twitter cache file.\n\n Returns:\n path (str): The path to the Twitter cache folder\n \"\"\"\n return os.path.join(get_cache_path(), 'twitter.json')", "creation_date": "2024-02-12T11:20:45Z", "repo": "FujiwaraChoki/MoneyPrinterV2", "file_path": "src/cache.py", "stars": 12372, "label": 0} +{"function": "def get_youtube_cache_path() -> str:\n \"\"\"\n Gets the path to the YouTube cache file.\n\n Returns:\n path (str): The path to the YouTube cache folder\n \"\"\"\n return os.path.join(get_cache_path(), 'youtube.json')", "creation_date": "2024-02-12T11:20:45Z", "repo": "FujiwaraChoki/MoneyPrinterV2", "file_path": "src/cache.py", "stars": 12372, "label": 0} +{"function": "def get_accounts(provider: str) -> List[dict]:\n \"\"\"\n Gets the accounts from the cache.\n\n Args:\n provider (str): The provider to get the accounts for\n\n Returns:\n account (List[dict]): The accounts\n \"\"\"\n cache_path = \"\"\n\n if provider == \"twitter\":\n cache_path = get_twitter_cache_path()\n elif provider == \"youtube\":\n cache_path = get_youtube_cache_path()\n\n if not os.path.exists(cache_path):\n # Create the cache file\n with open(cache_path, 'w') as file:\n json.dump({\n \"accounts\": []\n }, file, indent=4)\n\n with open(cache_path, 'r') as file:\n parsed = json.load(file)\n\n if parsed is None:\n return []\n \n if 'accounts' not in parsed:\n return []\n\n # Get accounts dictionary\n return parsed['accounts']", "creation_date": "2024-02-12T11:20:45Z", "repo": "FujiwaraChoki/MoneyPrinterV2", "file_path": "src/cache.py", "stars": 12372, "label": 0} +{"function": "def add_account(provider: str, account: dict) -> None:\n \"\"\"\n Adds an account to the cache.\n\n Args:\n account (dict): The account to add\n\n Returns:\n None\n \"\"\"\n if provider == \"twitter\":\n # Get the current accounts\n accounts = get_accounts(\"twitter\")\n\n # Add the new account\n accounts.append(account)\n\n # Write the new accounts to the cache\n with open(get_twitter_cache_path(), 'w') as file:\n json.dump({\n \"accounts\": accounts\n }, file, indent=4)\n elif provider == \"youtube\":\n # Get the current accounts\n accounts = get_accounts(\"youtube\")\n\n # Add the new account\n accounts.append(account)\n\n # Write the new accounts to the cache\n with open(get_youtube_cache_path(), 'w') as file:\n json.dump({\n \"accounts\": accounts\n }, file, indent=4)", "creation_date": "2024-02-12T11:20:45Z", "repo": "FujiwaraChoki/MoneyPrinterV2", "file_path": "src/cache.py", "stars": 12372, "label": 0} +{"function": "def remove_account(account_id: str) -> None:\n \"\"\"\n Removes an account from the cache.\n\n Args:\n account_id (str): The ID of the account to remove\n\n Returns:\n None\n \"\"\"\n # Get the current accounts\n accounts = get_accounts()\n\n # Remove the account\n accounts = [account for account in accounts if account['id'] != account_id]\n\n # Write the new accounts to the cache\n with open(get_twitter_cache_path(), 'w') as file:\n json.dump({\n \"accounts\": accounts\n }, file, indent=4)", "creation_date": "2024-02-12T11:20:45Z", "repo": "FujiwaraChoki/MoneyPrinterV2", "file_path": "src/cache.py", "stars": 12372, "label": 0} +{"function": "def get_products() -> List[dict]:\n \"\"\"\n Gets the products from the cache.\n\n Returns:\n products (List[dict]): The products\n \"\"\"\n if not os.path.exists(get_afm_cache_path()):\n # Create the cache file\n with open(get_afm_cache_path(), 'w') as file:\n json.dump({\n \"products\": []\n }, file, indent=4)\n\n with open(get_afm_cache_path(), 'r') as file:\n parsed = json.load(file)\n\n # Get the products\n return parsed[\"products\"]", "creation_date": "2024-02-12T11:20:45Z", "repo": "FujiwaraChoki/MoneyPrinterV2", "file_path": "src/cache.py", "stars": 12372, "label": 0} +{"function": "def add_product(product: dict) -> None:\n \"\"\"\n Adds a product to the cache.\n\n Args:\n product (dict): The product to add\n\n Returns:\n None\n \"\"\"\n # Get the current products\n products = get_products()\n\n # Add the new product\n products.append(product)\n\n # Write the new products to the cache\n with open(get_afm_cache_path(), 'w') as file:\n json.dump({\n \"products\": products\n }, file, indent=4)", "creation_date": "2024-02-12T11:20:45Z", "repo": "FujiwaraChoki/MoneyPrinterV2", "file_path": "src/cache.py", "stars": 12372, "label": 0} +{"function": "def register():\n bpy.types.Scene.blendermcp_port = IntProperty(\n name=\"Port\",\n description=\"Port for the BlenderMCP server\",\n default=9876,\n min=1024,\n max=65535\n )\n \n bpy.types.Scene.blendermcp_server_running = bpy.props.BoolProperty(\n name=\"Server Running\",\n default=False\n )\n \n bpy.types.Scene.blendermcp_use_polyhaven = bpy.props.BoolProperty(\n name=\"Use Poly Haven\",\n description=\"Enable Poly Haven asset integration\",\n default=False\n )\n\n bpy.types.Scene.blendermcp_use_hyper3d = bpy.props.BoolProperty(\n name=\"Use Hyper3D Rodin\",\n description=\"Enable Hyper3D Rodin generatino integration\",\n default=False\n )\n\n bpy.types.Scene.blendermcp_hyper3d_mode = bpy.props.EnumProperty(\n name=\"Rodin Mode\",\n description=\"Choose the platform used to call Rodin APIs\",\n items=[\n (\"MAIN_SITE\", \"hyper3d.ai\", \"hyper3d.ai\"),\n (\"FAL_AI\", \"fal.ai\", \"fal.ai\"),\n ],\n default=\"MAIN_SITE\"\n )\n\n bpy.types.Scene.blendermcp_hyper3d_api_key = bpy.props.StringProperty(\n name=\"Hyper3D API Key\",\n subtype=\"PASSWORD\",\n description=\"API Key provided by Hyper3D\",\n default=\"\"\n )\n \n bpy.types.Scene.blendermcp_use_sketchfab = bpy.props.BoolProperty(\n name=\"Use Sketchfab\",\n description=\"Enable Sketchfab asset integration\",\n default=False\n )\n\n bpy.types.Scene.blendermcp_sketchfab_api_key = bpy.props.StringProperty(\n name=\"Sketchfab API Key\",\n subtype=\"PASSWORD\",\n description=\"API Key provided by Sketchfab\",\n default=\"\"\n )\n \n bpy.utils.register_class(BLENDERMCP_PT_Panel)\n bpy.utils.register_class(BLENDERMCP_OT_SetFreeTrialHyper3DAPIKey)\n bpy.utils.register_class(BLENDERMCP_OT_StartServer)\n bpy.utils.register_class(BLENDERMCP_OT_StopServer)\n \n print(\"BlenderMCP addon registered\")", "creation_date": "2025-03-10T07:52:27Z", "repo": "ahujasid/blender-mcp", "file_path": "addon.py", "stars": 12364, "label": 0} +{"function": "def unregister():\n # Stop the server if it's running\n if hasattr(bpy.types, \"blendermcp_server\") and bpy.types.blendermcp_server:\n bpy.types.blendermcp_server.stop()\n del bpy.types.blendermcp_server\n \n bpy.utils.unregister_class(BLENDERMCP_PT_Panel)\n bpy.utils.unregister_class(BLENDERMCP_OT_SetFreeTrialHyper3DAPIKey)\n bpy.utils.unregister_class(BLENDERMCP_OT_StartServer)\n bpy.utils.unregister_class(BLENDERMCP_OT_StopServer)\n \n del bpy.types.Scene.blendermcp_port\n del bpy.types.Scene.blendermcp_server_running\n del bpy.types.Scene.blendermcp_use_polyhaven\n del bpy.types.Scene.blendermcp_use_hyper3d\n del bpy.types.Scene.blendermcp_hyper3d_mode\n del bpy.types.Scene.blendermcp_hyper3d_api_key\n del bpy.types.Scene.blendermcp_use_sketchfab\n del bpy.types.Scene.blendermcp_sketchfab_api_key\n\n print(\"BlenderMCP addon unregistered\")", "creation_date": "2025-03-10T07:52:27Z", "repo": "ahujasid/blender-mcp", "file_path": "addon.py", "stars": 12364, "label": 0} +{"function": " def __init__(self, host='localhost', port=9876):\n self.host = host\n self.port = port\n self.running = False\n self.socket = None\n self.server_thread = None", "creation_date": "2025-03-10T07:52:27Z", "repo": "ahujasid/blender-mcp", "file_path": "addon.py", "stars": 12364, "label": 0} +{"function": " def start(self):\n if self.running:\n print(\"Server is already running\")\n return\n \n self.running = True\n \n try:\n # Create socket\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.socket.bind((self.host, self.port))\n self.socket.listen(1)\n \n # Start server thread\n self.server_thread = threading.Thread(target=self._server_loop)\n self.server_thread.daemon = True\n self.server_thread.start()\n \n print(f\"BlenderMCP server started on {self.host}:{self.port}\")\n except Exception as e:\n print(f\"Failed to start server: {str(e)}\")\n self.stop()", "creation_date": "2025-03-10T07:52:27Z", "repo": "ahujasid/blender-mcp", "file_path": "addon.py", "stars": 12364, "label": 0} +{"function": " def stop(self):\n self.running = False\n \n # Close socket\n if self.socket:\n try:\n self.socket.close()\n except:\n pass\n self.socket = None\n \n # Wait for thread to finish\n if self.server_thread:\n try:\n if self.server_thread.is_alive():\n self.server_thread.join(timeout=1.0)\n except:\n pass\n self.server_thread = None\n \n print(\"BlenderMCP server stopped\")", "creation_date": "2025-03-10T07:52:27Z", "repo": "ahujasid/blender-mcp", "file_path": "addon.py", "stars": 12364, "label": 0} +{"function": " def _server_loop(self):\n \"\"\"Main server loop in a separate thread\"\"\"\n print(\"Server thread started\")\n self.socket.settimeout(1.0) # Timeout to allow for stopping\n \n while self.running:\n try:\n # Accept new connection\n try:\n client, address = self.socket.accept()\n print(f\"Connected to client: {address}\")\n \n # Handle client in a separate thread\n client_thread = threading.Thread(\n target=self._handle_client,\n args=(client,)\n )\n client_thread.daemon = True\n client_thread.start()\n except socket.timeout:\n # Just check running condition\n continue\n except Exception as e:\n print(f\"Error accepting connection: {str(e)}\")\n time.sleep(0.5)\n except Exception as e:\n print(f\"Error in server loop: {str(e)}\")\n if not self.running:\n break\n time.sleep(0.5)\n \n print(\"Server thread stopped\")", "creation_date": "2025-03-10T07:52:27Z", "repo": "ahujasid/blender-mcp", "file_path": "addon.py", "stars": 12364, "label": 0} +{"function": " def _handle_client(self, client):\n \"\"\"Handle connected client\"\"\"\n print(\"Client handler started\")\n client.settimeout(None) # No timeout\n buffer = b''\n \n try:\n while self.running:\n # Receive data\n try:\n data = client.recv(8192)\n if not data:\n print(\"Client disconnected\")\n break\n \n buffer += data\n try:\n # Try to parse command\n command = json.loads(buffer.decode('utf-8'))\n buffer = b''\n \n # Execute command in Blender's main thread\n def execute_wrapper():\n try:\n response = self.execute_command(command)\n response_json = json.dumps(response)\n try:\n client.sendall(response_json.encode('utf-8'))\n except:\n print(\"Failed to send response - client disconnected\")\n except Exception as e:\n print(f\"Error executing command: {str(e)}\")\n traceback.print_exc()\n try:\n error_response = {\n \"status\": \"error\",\n \"message\": str(e)\n }\n client.sendall(json.dumps(error_response).encode('utf-8'))\n except:\n pass\n return None\n \n # Schedule execution in main thread\n bpy.app.timers.register(execute_wrapper, first_interval=0.0)\n except json.JSONDecodeError:\n # Incomplete data, wait for more\n pass\n except Exception as e:\n print(f\"Error receiving data: {str(e)}\")\n break\n except Exception as e:\n print(f\"Error in client handler: {str(e)}\")\n finally:\n try:\n client.close()\n except:\n pass\n print(\"Client handler stopped\")", "creation_date": "2025-03-10T07:52:27Z", "repo": "ahujasid/blender-mcp", "file_path": "addon.py", "stars": 12364, "label": 0} +{"function": " def execute_command(self, command):\n \"\"\"Execute a command in the main Blender thread\"\"\"\n try: \n return self._execute_command_internal(command)\n \n except Exception as e:\n print(f\"Error executing command: {str(e)}\")\n traceback.print_exc()\n return {\"status\": \"error\", \"message\": str(e)}", "creation_date": "2025-03-10T07:52:27Z", "repo": "ahujasid/blender-mcp", "file_path": "addon.py", "stars": 12364, "label": 0} +{"function": " def _execute_command_internal(self, command):\n \"\"\"Internal command execution with proper context\"\"\"\n cmd_type = command.get(\"type\")\n params = command.get(\"params\", {})\n\n # Add a handler for checking PolyHaven status\n if cmd_type == \"get_polyhaven_status\":\n return {\"status\": \"success\", \"result\": self.get_polyhaven_status()}\n \n # Base handlers that are always available\n handlers = {\n \"get_scene_info\": self.get_scene_info,\n \"get_object_info\": self.get_object_info,\n \"get_viewport_screenshot\": self.get_viewport_screenshot,\n \"execute_code\": self.execute_code,\n \"get_polyhaven_status\": self.get_polyhaven_status,\n \"get_hyper3d_status\": self.get_hyper3d_status,\n \"get_sketchfab_status\": self.get_sketchfab_status,\n }\n \n # Add Polyhaven handlers only if enabled\n if bpy.context.scene.blendermcp_use_polyhaven:\n polyhaven_handlers = {\n \"get_polyhaven_categories\": self.get_polyhaven_categories,\n \"search_polyhaven_assets\": self.search_polyhaven_assets,\n \"download_polyhaven_asset\": self.download_polyhaven_asset,\n \"set_texture\": self.set_texture,\n }\n handlers.update(polyhaven_handlers)\n \n # Add Hyper3d handlers only if enabled\n if bpy.context.scene.blendermcp_use_hyper3d:\n polyhaven_handlers = {\n \"create_rodin_job\": self.create_rodin_job,\n \"poll_rodin_job_status\": self.poll_rodin_job_status,\n \"import_generated_asset\": self.import_generated_asset,\n }\n handlers.update(polyhaven_handlers)\n \n # Add Sketchfab handlers only if enabled\n if bpy.context.scene.blendermcp_use_sketchfab:\n sketchfab_handlers = {\n \"search_sketchfab_models\": self.search_sketchfab_models,\n \"download_sketchfab_model\": self.download_sketchfab_model,\n }\n handlers.update(sketchfab_handlers)\n\n handler = handlers.get(cmd_type)\n if handler:\n try:\n print(f\"Executing handler for {cmd_type}\")\n result = handler(**params)\n print(f\"Handler execution complete\")\n return {\"status\": \"success\", \"result\": result}\n except Exception as e:\n print(f\"Error in handler: {str(e)}\")\n traceback.print_exc()\n return {\"status\": \"error\", \"message\": str(e)}\n else:\n return {\"status\": \"error\", \"message\": f\"Unknown command type: {cmd_type}\"}", "creation_date": "2025-03-10T07:52:27Z", "repo": "ahujasid/blender-mcp", "file_path": "addon.py", "stars": 12364, "label": 0} +{"function": " def get_scene_info(self):\n \"\"\"Get information about the current Blender scene\"\"\"\n try:\n print(\"Getting scene info...\")\n # Simplify the scene info to reduce data size\n scene_info = {\n \"name\": bpy.context.scene.name,\n \"object_count\": len(bpy.context.scene.objects),\n \"objects\": [],\n \"materials_count\": len(bpy.data.materials),\n }\n \n # Collect minimal object information (limit to first 10 objects)\n for i, obj in enumerate(bpy.context.scene.objects):\n if i >= 10: # Reduced from 20 to 10\n break\n \n obj_info = {\n \"name\": obj.name,\n \"type\": obj.type,\n # Only include basic location data\n \"location\": [round(float(obj.location.x), 2), \n round(float(obj.location.y), 2), \n round(float(obj.location.z), 2)],\n }\n scene_info[\"objects\"].append(obj_info)\n \n print(f\"Scene info collected: {len(scene_info['objects'])} objects\")\n return scene_info\n except Exception as e:\n print(f\"Error in get_scene_info: {str(e)}\")\n traceback.print_exc()\n return {\"error\": str(e)}", "creation_date": "2025-03-10T07:52:27Z", "repo": "ahujasid/blender-mcp", "file_path": "addon.py", "stars": 12364, "label": 0} +{"function": "def get_current_temperature(location: str, unit: str = \"Celsius\") -> Dict[str, str]:\n \"\"\"Gets the current temperature for a specific location and unit.\"\"\"\n return {\"location\": location, \"unit\": unit, \"temperature\": \"72\"}", "creation_date": "2025-01-24T04:13:50Z", "repo": "andrewyng/aisuite", "file_path": "tests/utils/test_tool_manager.py", "stars": 12234, "label": 0} +{"function": "def missing_annotation_tool(location, unit=\"Celsius\"):\n \"\"\"Tool function without type annotations.\"\"\"\n return {\"location\": location, \"unit\": unit, \"temperature\": \"72\"}", "creation_date": "2025-01-24T04:13:50Z", "repo": "andrewyng/aisuite", "file_path": "tests/utils/test_tool_manager.py", "stars": 12234, "label": 0} +{"function": "def get_current_temperature_v2(\n location: str, unit: TemperatureUnit = TemperatureUnit.CELSIUS\n) -> Dict[str, str]:\n \"\"\"Gets the current temperature for a specific location and unit (with enum support).\"\"\"\n return {\"location\": location, \"unit\": unit, \"temperature\": \"72\"}", "creation_date": "2025-01-24T04:13:50Z", "repo": "andrewyng/aisuite", "file_path": "tests/utils/test_tool_manager.py", "stars": 12234, "label": 0} +{"function": " def setUp(self):\n self.tool_manager = Tools()", "creation_date": "2025-01-24T04:13:50Z", "repo": "andrewyng/aisuite", "file_path": "tests/utils/test_tool_manager.py", "stars": 12234, "label": 0} +{"function": " def test_add_tool_with_pydantic_model(self):\n \"\"\"Test adding a tool with an explicit Pydantic model.\"\"\"\n self.tool_manager._add_tool(get_current_temperature, TemperatureParams)\n\n expected_tool_spec = [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_temperature\",\n \"description\": \"Gets the current temperature for a specific location and unit.\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"\",\n },\n \"unit\": {\n \"type\": \"string\",\n \"description\": \"\",\n \"default\": \"Celsius\",\n },\n },\n \"required\": [\"location\"],\n },\n },\n }\n ]\n\n tools = self.tool_manager.tools()\n self.assertIn(\n \"get_current_temperature\", [tool[\"function\"][\"name\"] for tool in tools]\n )\n assert (\n tools == expected_tool_spec\n ), f\"Expected {expected_tool_spec}, but got {tools}\"", "creation_date": "2025-01-24T04:13:50Z", "repo": "andrewyng/aisuite", "file_path": "tests/utils/test_tool_manager.py", "stars": 12234, "label": 0} +{"function": " def test_add_tool_with_signature_inference(self):\n \"\"\"Test adding a tool and inferring parameters from the function signature.\"\"\"\n self.tool_manager._add_tool(get_current_temperature)\n # Expected output from tool_manager.tools() when called with OpenAI format\n expected_tool_spec = [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_temperature\",\n \"description\": \"Gets the current temperature for a specific location and unit.\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"\", # No description provided in function signature\n },\n \"unit\": {\n \"type\": \"string\",\n \"description\": \"\",\n \"default\": \"Celsius\",\n },\n },\n \"required\": [\"location\"],\n },\n },\n }\n ]\n tools = self.tool_manager.tools()\n print(tools)\n self.assertIn(\n \"get_current_temperature\", [tool[\"function\"][\"name\"] for tool in tools]\n )\n assert (\n tools == expected_tool_spec\n ), f\"Expected {expected_tool_spec}, but got {tools}\"", "creation_date": "2025-01-24T04:13:50Z", "repo": "andrewyng/aisuite", "file_path": "tests/utils/test_tool_manager.py", "stars": 12234, "label": 0} +{"function": " def test_add_tool_missing_annotation_raises_exception(self):\n \"\"\"Test that adding a tool with missing type annotations raises a TypeError.\"\"\"\n with self.assertRaises(TypeError):\n self.tool_manager._add_tool(missing_annotation_tool)", "creation_date": "2025-01-24T04:13:50Z", "repo": "andrewyng/aisuite", "file_path": "tests/utils/test_tool_manager.py", "stars": 12234, "label": 0} +{"function": " def test_execute_tool_valid_parameters(self):\n \"\"\"Test executing a registered tool with valid parameters.\"\"\"\n self.tool_manager._add_tool(get_current_temperature, TemperatureParams)\n tool_call = {\n \"id\": \"call_1\",\n \"function\": {\n \"name\": \"get_current_temperature\",\n \"arguments\": {\"location\": \"San Francisco\", \"unit\": \"Celsius\"},\n },\n }\n result, result_message = self.tool_manager.execute_tool(tool_call)\n\n # Assuming result is returned as a list with a single dictionary\n result_dict = result[0] if isinstance(result, list) else result\n\n # Check that the result matches expected output\n self.assertEqual(result_dict[\"location\"], \"San Francisco\")\n self.assertEqual(result_dict[\"unit\"], \"Celsius\")\n self.assertEqual(result_dict[\"temperature\"], \"72\")", "creation_date": "2025-01-24T04:13:50Z", "repo": "andrewyng/aisuite", "file_path": "tests/utils/test_tool_manager.py", "stars": 12234, "label": 0} +{"function": " def test_execute_tool_invalid_parameters(self):\n \"\"\"Test that executing a tool with invalid parameters raises a ValueError.\"\"\"\n self.tool_manager._add_tool(get_current_temperature, TemperatureParams)\n tool_call = {\n \"id\": \"call_1\",\n \"function\": {\n \"name\": \"get_current_temperature\",\n \"arguments\": {\"location\": 123}, # Invalid type for location\n },\n }\n\n with self.assertRaises(ValueError) as context:\n self.tool_manager.execute_tool(tool_call)\n\n # Verify the error message contains information about the validation error\n self.assertIn(\n \"Error in tool 'get_current_temperature' parameters\", str(context.exception)\n )", "creation_date": "2025-01-24T04:13:50Z", "repo": "andrewyng/aisuite", "file_path": "tests/utils/test_tool_manager.py", "stars": 12234, "label": 0} +{"function": " def test_add_tool_with_enum(self):\n \"\"\"Test adding a tool with an enum parameter.\"\"\"\n self.tool_manager._add_tool(get_current_temperature_v2, TemperatureParamsV2)\n\n expected_tool_spec = [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_temperature_v2\",\n \"description\": \"Gets the current temperature for a specific location and unit (with enum support).\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"\",\n },\n \"unit\": {\n \"type\": \"string\",\n \"enum\": [\"Celsius\", \"Fahrenheit\"],\n \"description\": \"\",\n \"default\": \"Celsius\",\n },\n },\n \"required\": [\"location\"],\n },\n },\n }\n ]\n\n tools = self.tool_manager.tools()\n assert (\n tools == expected_tool_spec\n ), f\"Expected {expected_tool_spec}, but got {tools}\"", "creation_date": "2025-01-24T04:13:50Z", "repo": "andrewyng/aisuite", "file_path": "tests/utils/test_tool_manager.py", "stars": 12234, "label": 0} +{"function": "def pad_dataproto_to_divisor(data: 'DataProto', size_divisor: int):\n \"\"\"Pad a DataProto to size divisible by size_divisor\n\n Args:\n size_divisor (int): size divisor\n\n Returns:\n data: (DataProto): the padded DataProto\n pad_size (int)\n \"\"\"\n assert isinstance(data, DataProto), 'data must be a DataProto'\n if len(data) % size_divisor != 0:\n pad_size = size_divisor - len(data) % size_divisor\n data_padded = DataProto.concat([data, data[:pad_size]])\n else:\n pad_size = 0\n data_padded = data\n return data_padded, pad_size", "creation_date": "2024-10-31T06:29:44Z", "repo": "Jiayi-Pan/TinyZero", "file_path": "verl/protocol.py", "stars": 12007, "label": 0} +{"function": "def unpad_dataproto(data: 'DataProto', pad_size):\n if pad_size != 0:\n data = data[:-pad_size]\n return data", "creation_date": "2024-10-31T06:29:44Z", "repo": "Jiayi-Pan/TinyZero", "file_path": "verl/protocol.py", "stars": 12007, "label": 0} +{"function": "def union_tensor_dict(tensor_dict1: TensorDict, tensor_dict2: TensorDict) -> TensorDict:\n \"\"\"Union two tensordicts.\"\"\"\n assert tensor_dict1.batch_size == tensor_dict2.batch_size, \\\n f'Two tensor dict must have identical batch size. Got {tensor_dict1.batch_size} and {tensor_dict2.batch_size}'\n for key in tensor_dict2.keys():\n if key not in tensor_dict1.keys():\n tensor_dict1[key] = tensor_dict2[key]\n else:\n assert tensor_dict1[key].equal(tensor_dict2[key]), \\\n f'{key} in tensor_dict1 and tensor_dict2 are not the same object'\n\n return tensor_dict1", "creation_date": "2024-10-31T06:29:44Z", "repo": "Jiayi-Pan/TinyZero", "file_path": "verl/protocol.py", "stars": 12007, "label": 0} +{"function": "def union_numpy_dict(tensor_dict1: dict[np.ndarray], tensor_dict2: dict[np.ndarray]) -> dict[np.ndarray]:\n for key, val in tensor_dict2.items():\n if key in tensor_dict1:\n assert isinstance(tensor_dict2[key], np.ndarray)\n assert isinstance(tensor_dict1[key], np.ndarray)\n assert np.all(tensor_dict2[key] == tensor_dict1[key]), \\\n f'{key} in tensor_dict1 and tensor_dict2 are not the same object'\n tensor_dict1[key] = val\n\n return tensor_dict1", "creation_date": "2024-10-31T06:29:44Z", "repo": "Jiayi-Pan/TinyZero", "file_path": "verl/protocol.py", "stars": 12007, "label": 0} +{"function": "def list_of_dict_to_dict_of_list(list_of_dict: list[dict]):\n if len(list_of_dict) == 0:\n return {}\n keys = list_of_dict[0].keys()\n output = {key: [] for key in keys}\n for data in list_of_dict:\n for key, item in data.items():\n assert key in output\n output[key].append(item)\n return output", "creation_date": "2024-10-31T06:29:44Z", "repo": "Jiayi-Pan/TinyZero", "file_path": "verl/protocol.py", "stars": 12007, "label": 0} +{"function": "def fold_batch_dim(data: 'DataProto', new_batch_size):\n \"\"\"\n Fold a batch dim from [bsz, xxx] into [new_bsz, bsz // new_bsz, xxx]\n \"\"\"\n batch_size = data.batch.batch_size[0]\n\n assert batch_size % new_batch_size == 0\n\n tensor: TensorDict = data.batch\n non_tensor = data.non_tensor_batch\n\n tensor = tensor.view(new_batch_size, -1)\n tensor.auto_batch_size_(batch_dims=1)\n\n for key, val in non_tensor.items():\n non_tensor[key] = np.reshape(val, newshape=(new_batch_size, -1, *val.shape[1:]))\n\n return DataProto(batch=tensor, non_tensor_batch=non_tensor, meta_info=data.meta_info)", "creation_date": "2024-10-31T06:29:44Z", "repo": "Jiayi-Pan/TinyZero", "file_path": "verl/protocol.py", "stars": 12007, "label": 0} +{"function": "def unfold_batch_dim(data: 'DataProto', batch_dims=2):\n \"\"\"\n Unfold the first n dims as new batch dim\n \"\"\"\n tensor: TensorDict = data.batch\n non_tensor = data.non_tensor_batch\n tensor.auto_batch_size_(batch_dims=batch_dims)\n tensor = tensor.view(-1)\n\n batch_size = tensor.batch_size[0]\n\n non_tensor_new = {}\n\n for key, val in non_tensor.items():\n non_tensor_new[key] = np.reshape(val, newshape=(batch_size, *val.shape[batch_dims:]))\n\n return DataProto(batch=tensor, non_tensor_batch=non_tensor_new, meta_info=data.meta_info)", "creation_date": "2024-10-31T06:29:44Z", "repo": "Jiayi-Pan/TinyZero", "file_path": "verl/protocol.py", "stars": 12007, "label": 0} +{"function": "def collate_fn(x: list['DataProtoItem']):\n batch = []\n non_tensor_batch = []\n for data in x:\n batch.append(data.batch)\n non_tensor_batch.append(data.non_tensor_batch)\n batch = torch.stack(batch).contiguous()\n non_tensor_batch = list_of_dict_to_dict_of_list(non_tensor_batch)\n for key, val in non_tensor_batch.items():\n non_tensor_batch[key] = np.array(val, dtype=object)\n return DataProto(batch=batch, non_tensor_batch=non_tensor_batch)", "creation_date": "2024-10-31T06:29:44Z", "repo": "Jiayi-Pan/TinyZero", "file_path": "verl/protocol.py", "stars": 12007, "label": 0} +{"function": " def __post_init__(self):\n # perform necessary checking\n self.check_consistency()", "creation_date": "2024-10-31T06:29:44Z", "repo": "Jiayi-Pan/TinyZero", "file_path": "verl/protocol.py", "stars": 12007, "label": 0} +{"function": " def __len__(self):\n if self.batch is not None:\n return self.batch.batch_size[0]\n elif self.non_tensor_batch is not None and len(self.non_tensor_batch) > 0:\n random_key = list(self.non_tensor_batch.keys())[0]\n return self.non_tensor_batch[random_key].shape[0]\n else:\n return 0", "creation_date": "2024-10-31T06:29:44Z", "repo": "Jiayi-Pan/TinyZero", "file_path": "verl/protocol.py", "stars": 12007, "label": 0} +{"function": "def compress_video(input_file, output_file, out_size):\n \"\"\"\u4f7f\u7528 ffmpeg \u538b\u7f29\u89c6\u9891\u6587\u4ef6\u3002\"\"\"\n command = [\n 'ffmpeg',\n '-i', input_file,\n '-vf', f\"scale='min({out_size},iw)':'min({out_size},ih)':force_original_aspect_ratio=decrease\",\n '-c:v', 'libx264',\n '-crf', '18',\n '-preset', 'slow',\n '-c:a', 'copy',\n output_file\n ]\n subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)", "creation_date": "2024-07-24T10:31:04Z", "repo": "PKU-YuanGroup/Open-Sora-Plan", "file_path": "opensora/npu_config.py", "stars": 11997, "label": 0} +{"function": "def set_run_dtype(x, dtype=None):\n # \u4fdd\u5b58\u539f\u59cb\u73af\u5883\u53d8\u91cf\u7684\u503c\uff08\u5982\u679c\u5b58\u5728\uff09\n npu_config.original_run_dtype = x.dtype\n # \u8bbe\u7f6e\u73af\u5883\u53d8\u91cf\u4e3a\u6307\u5b9a\u7684\u503c\n npu_config.current_run_dtype = dtype\n try:\n # Yield control back to the body of the `with` statement\n yield\n finally:\n # \u6062\u590d\u539f\u59cb\u7684\u73af\u5883\u53d8\u91cf\u503c\n npu_config.current_run_dtype = None\n npu_config.original_run_dtype = None", "creation_date": "2024-07-24T10:31:04Z", "repo": "PKU-YuanGroup/Open-Sora-Plan", "file_path": "opensora/npu_config.py", "stars": 11997, "label": 0} +{"function": " def __init__(self):\n self.on_npu = npu_is_available\n self.node_world_size = self.N_NPU_PER_NODE\n self.profiling = False\n self.profiling_step = 5\n self.enable_FA = True\n self.enable_FP32 = False\n self.load_pickle = True\n self.use_small_dataset = False\n self.current_run_dtype = None\n self.original_run_dtype = None\n self.zp_manager = zp_manager\n self.replaced_type = torch.float32\n self.conv_dtype = torch.float16\n if self.enable_FA and self.enable_FP32:\n self.inf_float = -10000.0\n else:\n self.inf_float = -10000.0\n\n if self.use_small_dataset:\n self.load_pickle = False\n\n self._loss = []\n self.work_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n self.pickle_save_path = f\"{self.work_path}/pickles\"\n self.mm = dict()\n\n if self.on_npu:\n import deepspeed\n import sys\n torch_npu.npu.set_compile_mode(jit_compile=False)\n\n import deepspeed.runtime.utils as utils\n from opensora.adaptor.utils import all_gather_dp_groups, all_gather_into_tensor_dp_groups\n utils.all_gather_dp_groups = all_gather_dp_groups\n\n import deepspeed.runtime.bf16_optimizer as bf16_optimizer\n from opensora.adaptor.bf16_optimizer import BF16_Optimizer\n self.replace_methods(bf16_optimizer.BF16_Optimizer, BF16_Optimizer)\n\n from opensora.adaptor.stage_1_and_2 import DeepSpeedZeroOptimizer\n import deepspeed.runtime.zero.stage_1_and_2 as stage_1_and_2\n self.replace_methods(stage_1_and_2.DeepSpeedZeroOptimizer, DeepSpeedZeroOptimizer, ['_has_inf_or_nan'])\n\n import deepspeed.runtime.engine as engine\n from opensora.adaptor.engine import DeepSpeedEngine\n self.replace_methods(engine.DeepSpeedEngine, DeepSpeedEngine, skip_fcns=['__init__', '_copy_recovery_script', '_change_recovery_script_permissions'])\n\n if \"RANK\" in os.environ:\n self.rank = int(os.environ[\"RANK\"])\n self.world_size = int(os.environ[\"WORLD_SIZE\"])\n torch_npu.npu.set_device(self.get_local_rank())\n else:\n self.rank = torch.cuda.current_device()\n self.world_size = self.N_NPU_PER_NODE\n self.print_with_rank(f\"The npu_config.on_npu is {self.on_npu}\")\n self.bind_thread_to_cpu()\n gc.set_threshold(700, 10, 10000)", "creation_date": "2024-07-24T10:31:04Z", "repo": "PKU-YuanGroup/Open-Sora-Plan", "file_path": "opensora/npu_config.py", "stars": 11997, "label": 0} +{"function": " def get_total_cores(self):\n try:\n total_cores = os.sysconf('SC_NPROCESSORS_ONLN')\n except (AttributeError, ValueError):\n total_cores = os.cpu_count()\n return total_cores", "creation_date": "2024-07-24T10:31:04Z", "repo": "PKU-YuanGroup/Open-Sora-Plan", "file_path": "opensora/npu_config.py", "stars": 11997, "label": 0} +{"function": " def bind_thread_to_cpu(self):\n total_cores = self.get_total_cores()\n # \u6bcf\u4e2a\u5361\u7684\u6838\u5fc3\u6570\u91cf\n cores_per_rank = total_cores // 8\n # \u8ba1\u7b97\u672c\u5730rank\n local_rank = self.rank % 8\n # \u8ba1\u7b97\u5f53\u524d rank \u7684 CPU \u6838\u8303\u56f4\n start_core = local_rank * cores_per_rank\n end_core = start_core + cores_per_rank - 1\n # \u6784\u5efa CPU \u6838\u8303\u56f4\u5b57\u7b26\u4e32\n cpu_cores_range = f\"{start_core}-{end_core}\"\n pid = os.getpid()\n command = f\"taskset -cp {cpu_cores_range} {pid}\"\n\n subprocess.run(command, shell=True, check=True)\n return f\"Binding Cores:{self.rank}:{pid}:{cpu_cores_range}\"", "creation_date": "2024-07-24T10:31:04Z", "repo": "PKU-YuanGroup/Open-Sora-Plan", "file_path": "opensora/npu_config.py", "stars": 11997, "label": 0} +{"function": " def replace_methods(self, target_class, source_class, skip_fcns=[], only_include_fcns=None):\n for attr_name in dir(source_class):\n attr_value = getattr(source_class, attr_name)\n if attr_name in source_class.__dict__:\n attr_class_value = source_class.__dict__[attr_name]\n else:\n attr_class_value = attr_value\n if (isinstance(attr_class_value, staticmethod) or isinstance(attr_class_value, classmethod)\n or attr_name in skip_fcns):\n print(f\"skip replace {attr_name}\")\n continue\n\n if only_include_fcns is not None and attr_name not in only_include_fcns:\n continue\n\n elif isinstance(attr_value, types.FunctionType):\n setattr(target_class, attr_name, attr_value)", "creation_date": "2024-07-24T10:31:04Z", "repo": "PKU-YuanGroup/Open-Sora-Plan", "file_path": "opensora/npu_config.py", "stars": 11997, "label": 0} +{"function": " def get_attention_mask(self, attention_mask, repeat_num):\n if self.on_npu and attention_mask is not None:\n if npu_config.enable_FA:\n attention_mask = attention_mask.to(torch.bool)\n attention_mask = attention_mask.repeat_interleave(repeat_num, dim=-2)\n return attention_mask", "creation_date": "2024-07-24T10:31:04Z", "repo": "PKU-YuanGroup/Open-Sora-Plan", "file_path": "opensora/npu_config.py", "stars": 11997, "label": 0} +{"function": " def set_current_run_dtype(self, variables):\n if variables[0].dtype != self.current_run_dtype and self.current_run_dtype is not None:\n for index, var in enumerate(variables):\n variables[index] = var.to(self.current_run_dtype)\n return tuple(variables)", "creation_date": "2024-07-24T10:31:04Z", "repo": "PKU-YuanGroup/Open-Sora-Plan", "file_path": "opensora/npu_config.py", "stars": 11997, "label": 0} +{"function": " def restore_dtype(self, x):\n if x.dtype != self.original_run_dtype and self.original_run_dtype is not None:\n x = x.to(self.original_run_dtype)\n return x", "creation_date": "2024-07-24T10:31:04Z", "repo": "PKU-YuanGroup/Open-Sora-Plan", "file_path": "opensora/npu_config.py", "stars": 11997, "label": 0} +{"function": " def get_output_video_path(self, name):\n os.makedirs(f\"{self.work_path}/output_videos\", exist_ok=True)\n return f\"{self.work_path}/output_videos/{name}\"", "creation_date": "2024-07-24T10:31:04Z", "repo": "PKU-YuanGroup/Open-Sora-Plan", "file_path": "opensora/npu_config.py", "stars": 11997, "label": 0} +{"function": "def generate_response(prompt: str, ai_model: str) -> str:\n \"\"\"\n Generate a script for a video, depending on the subject of the video.\n\n Args:\n video_subject (str): The subject of the video.\n ai_model (str): The AI model to use for generation.\n\n\n Returns:\n\n str: The response from the AI model.\n\n \"\"\"\n\n if ai_model == 'g4f':\n # Newest G4F Architecture\n client = Client()\n response = client.chat.completions.create(\n model=\"gpt-3.5-turbo\",\n provider=g4f.Provider.You, \n messages=[{\"role\": \"user\", \"content\": prompt}],\n ).choices[0].message.content\n\n elif ai_model in [\"gpt3.5-turbo\", \"gpt4\"]:\n\n model_name = \"gpt-3.5-turbo\" if ai_model == \"gpt3.5-turbo\" else \"gpt-4-1106-preview\"\n\n response = openai.chat.completions.create(\n\n model=model_name,\n\n messages=[{\"role\": \"user\", \"content\": prompt}],\n\n ).choices[0].message.content\n elif ai_model == 'gemmini':\n model = genai.GenerativeModel('gemini-pro')\n response_model = model.generate_content(prompt)\n response = response_model.text\n\n else:\n\n raise ValueError(\"Invalid AI model selected.\")\n\n return response", "creation_date": "2024-01-31T18:53:27Z", "repo": "FujiwaraChoki/MoneyPrinter", "file_path": "Backend/gpt.py", "stars": 11956, "label": 0} +{"function": "def generate_script(video_subject: str, paragraph_number: int, ai_model: str, voice: str, customPrompt: str) -> str:\n\n \"\"\"\n Generate a script for a video, depending on the subject of the video, the number of paragraphs, and the AI model.\n\n\n\n Args:\n\n video_subject (str): The subject of the video.\n\n paragraph_number (int): The number of paragraphs to generate.\n\n ai_model (str): The AI model to use for generation.\n\n\n\n Returns:\n\n str: The script for the video.\n\n \"\"\"\n\n # Build prompt\n \n if customPrompt:\n prompt = customPrompt\n else:\n prompt = \"\"\"\n Generate a script for a video, depending on the subject of the video.\n\n The script is to be returned as a string with the specified number of paragraphs.\n\n Here is an example of a string:\n \"This is an example string.\"\n\n Do not under any circumstance reference this prompt in your response.\n\n Get straight to the point, don't start with unnecessary things like, \"welcome to this video\".\n\n Obviously, the script should be related to the subject of the video.\n\n YOU MUST NOT INCLUDE ANY TYPE OF MARKDOWN OR FORMATTING IN THE SCRIPT, NEVER USE A TITLE.\n YOU MUST WRITE THE SCRIPT IN THE LANGUAGE SPECIFIED IN [LANGUAGE].\n ONLY RETURN THE RAW CONTENT OF THE SCRIPT. DO NOT INCLUDE \"VOICEOVER\", \"NARRATOR\" OR SIMILAR INDICATORS OF WHAT SHOULD BE SPOKEN AT THE BEGINNING OF EACH PARAGRAPH OR LINE. YOU MUST NOT MENTION THE PROMPT, OR ANYTHING ABOUT THE SCRIPT ITSELF. ALSO, NEVER TALK ABOUT THE AMOUNT OF PARAGRAPHS OR LINES. JUST WRITE THE SCRIPT.\n\n \"\"\"\n\n prompt += f\"\"\"\n \n Subject: {video_subject}\n Number of paragraphs: {paragraph_number}\n Language: {voice}\n\n \"\"\"\n\n # Generate script\n response = generate_response(prompt, ai_model)\n\n print(colored(response, \"cyan\"))\n\n # Return the generated script\n if response:\n # Clean the script\n # Remove asterisks, hashes\n response = response.replace(\"*\", \"\")\n response = response.replace(\"#\", \"\")\n\n # Remove markdown syntax\n response = re.sub(r\"\\[.*\\]\", \"\", response)\n response = re.sub(r\"\\(.*\\)\", \"\", response)\n\n # Split the script into paragraphs\n paragraphs = response.split(\"\\n\\n\")\n\n # Select the specified number of paragraphs\n selected_paragraphs = paragraphs[:paragraph_number]\n\n # Join the selected paragraphs into a single string\n final_script = \"\\n\\n\".join(selected_paragraphs)\n\n # Print to console the number of paragraphs used\n print(colored(f\"Number of paragraphs used: {len(selected_paragraphs)}\", \"green\"))\n\n return final_script\n else:\n print(colored(\"[-] GPT returned an empty response.\", \"red\"))\n return None", "creation_date": "2024-01-31T18:53:27Z", "repo": "FujiwaraChoki/MoneyPrinter", "file_path": "Backend/gpt.py", "stars": 11956, "label": 0} +{"function": "def get_search_terms(video_subject: str, amount: int, script: str, ai_model: str) -> List[str]:\n \"\"\"\n Generate a JSON-Array of search terms for stock videos,\n depending on the subject of a video.\n\n Args:\n video_subject (str): The subject of the video.\n amount (int): The amount of search terms to generate.\n script (str): The script of the video.\n ai_model (str): The AI model to use for generation.\n\n Returns:\n List[str]: The search terms for the video subject.\n \"\"\"\n\n # Build prompt\n prompt = f\"\"\"\n Generate {amount} search terms for stock videos,\n depending on the subject of a video.\n Subject: {video_subject}\n\n The search terms are to be returned as\n a JSON-Array of strings.\n\n Each search term should consist of 1-3 words,\n always add the main subject of the video.\n \n YOU MUST ONLY RETURN THE JSON-ARRAY OF STRINGS.\n YOU MUST NOT RETURN ANYTHING ELSE. \n YOU MUST NOT RETURN THE SCRIPT.\n \n The search terms must be related to the subject of the video.\n Here is an example of a JSON-Array of strings:\n [\"search term 1\", \"search term 2\", \"search term 3\"]\n\n For context, here is the full text:\n {script}\n \"\"\"\n\n # Generate search terms\n response = generate_response(prompt, ai_model)\n print(response)\n\n # Parse response into a list of search terms\n search_terms = []\n \n try:\n search_terms = json.loads(response)\n if not isinstance(search_terms, list) or not all(isinstance(term, str) for term in search_terms):\n raise ValueError(\"Response is not a list of strings.\")\n\n except (json.JSONDecodeError, ValueError):\n # Get everything between the first and last square brackets\n response = response[response.find(\"[\") + 1:response.rfind(\"]\")]\n\n print(colored(\"[*] GPT returned an unformatted response. Attempting to clean...\", \"yellow\"))\n\n # Attempt to extract list-like string and convert to list\n match = re.search(r'\\[\"(?:[^\"\\\\]|\\\\.)*\"(?:,\\s*\"[^\"\\\\]*\")*\\]', response)\n print(match.group())\n if match:\n try:\n search_terms = json.loads(match.group())\n except json.JSONDecodeError:\n print(colored(\"[-] Could not parse response.\", \"red\"))\n return []\n\n\n # Let user know\n print(colored(f\"\\nGenerated {len(search_terms)} search terms: {', '.join(search_terms)}\", \"cyan\"))\n\n # Return search terms\n return search_terms", "creation_date": "2024-01-31T18:53:27Z", "repo": "FujiwaraChoki/MoneyPrinter", "file_path": "Backend/gpt.py", "stars": 11956, "label": 0} +{"function": "def generate_metadata(video_subject: str, script: str, ai_model: str) -> Tuple[str, str, List[str]]: \n \"\"\" \n Generate metadata for a YouTube video, including the title, description, and keywords. \n \n Args: \n video_subject (str): The subject of the video. \n script (str): The script of the video. \n ai_model (str): The AI model to use for generation. \n \n Returns: \n Tuple[str, str, List[str]]: The title, description, and keywords for the video. \n \"\"\" \n \n # Build prompt for title \n title_prompt = f\"\"\" \n Generate a catchy and SEO-friendly title for a YouTube shorts video about {video_subject}. \n \"\"\" \n \n # Generate title \n title = generate_response(title_prompt, ai_model).strip() \n \n # Build prompt for description \n description_prompt = f\"\"\" \n Write a brief and engaging description for a YouTube shorts video about {video_subject}. \n The video is based on the following script: \n {script} \n \"\"\" \n \n # Generate description \n description = generate_response(description_prompt, ai_model).strip() \n \n # Generate keywords \n keywords = get_search_terms(video_subject, 6, script, ai_model) \n\n return title, description, keywords ", "creation_date": "2024-01-31T18:53:27Z", "repo": "FujiwaraChoki/MoneyPrinter", "file_path": "Backend/gpt.py", "stars": 11956, "label": 0} +{"function": "def generate():\n try:\n # Set global variable\n global GENERATING\n GENERATING = True\n\n # Clean\n clean_dir(\"../temp/\")\n clean_dir(\"../subtitles/\")\n\n\n # Parse JSON\n data = request.get_json()\n paragraph_number = int(data.get('paragraphNumber', 1)) # Default to 1 if not provided\n ai_model = data.get('aiModel') # Get the AI model selected by the user\n n_threads = data.get('threads') # Amount of threads to use for video generation\n subtitles_position = data.get('subtitlesPosition') # Position of the subtitles in the video\n text_color = data.get('color') # Color of subtitle text\n\n # Get 'useMusic' from the request data and default to False if not provided\n use_music = data.get('useMusic', False)\n\n # Get 'automateYoutubeUpload' from the request data and default to False if not provided\n automate_youtube_upload = data.get('automateYoutubeUpload', False)\n\n # Get the ZIP Url of the songs\n songs_zip_url = data.get('zipUrl')\n\n # Download songs\n if use_music:\n # Downloads a ZIP file containing popular TikTok Songs\n if songs_zip_url:\n fetch_songs(songs_zip_url)\n else:\n # Default to a ZIP file containing popular TikTok Songs\n fetch_songs(\"https://filebin.net/2avx134kdibc4c3q/drive-download-20240209T180019Z-001.zip\")\n\n # Print little information about the video which is to be generated\n print(colored(\"[Video to be generated]\", \"blue\"))\n print(colored(\" Subject: \" + data[\"videoSubject\"], \"blue\"))\n print(colored(\" AI Model: \" + ai_model, \"blue\")) # Print the AI model being used\n print(colored(\" Custom Prompt: \" + data[\"customPrompt\"], \"blue\")) # Print the AI model being used\n\n\n\n if not GENERATING:\n return jsonify(\n {\n \"status\": \"error\",\n \"message\": \"Video generation was cancelled.\",\n \"data\": [],\n }\n )\n \n voice = data[\"voice\"]\n voice_prefix = voice[:2]\n\n\n if not voice:\n print(colored(\"[!] No voice was selected. Defaulting to \\\"en_us_001\\\"\", \"yellow\"))\n voice = \"en_us_001\"\n voice_prefix = voice[:2]\n\n\n # Generate a script\n script = generate_script(data[\"videoSubject\"], paragraph_number, ai_model, voice, data[\"customPrompt\"]) # Pass the AI model to the script generation\n\n # Generate search terms\n search_terms = get_search_terms(\n data[\"videoSubject\"], AMOUNT_OF_STOCK_VIDEOS, script, ai_model\n )\n\n # Search for a video of the given search term\n video_urls = []\n\n # Defines how many results it should query and search through\n it = 15\n\n # Defines the minimum duration of each clip\n min_dur = 10\n\n # Loop through all search terms,\n # and search for a video of the given search term\n for search_term in search_terms:\n if not GENERATING:\n return jsonify(\n {\n \"status\": \"error\",\n \"message\": \"Video generation was cancelled.\",\n \"data\": [],\n }\n )\n found_urls = search_for_stock_videos(\n search_term, os.getenv(\"PEXELS_API_KEY\"), it, min_dur\n )\n # Check for duplicates\n for url in found_urls:\n if url not in video_urls:\n video_urls.append(url)\n break\n\n # Check if video_urls is empty\n if not video_urls:\n print(colored(\"[-] No videos found to download.\", \"red\"))\n return jsonify(\n {\n \"status\": \"error\",\n \"message\": \"No videos found to download.\",\n \"data\": [],\n }\n )\n \n # Define video_paths\n video_paths = []\n\n # Let user know\n print(colored(f\"[+] Downloading {len(video_urls)} videos...\", \"blue\"))\n\n # Save the videos\n for video_url in video_urls:\n if not GENERATING:\n return jsonify(\n {\n \"status\": \"error\",\n \"message\": \"Video generation was cancelled.\",\n \"data\": [],\n }\n )\n try:\n saved_video_path = save_video(video_url)\n video_paths.append(saved_video_path)\n except Exception:\n print(colored(f\"[-] Could not download video: {video_url}\", \"red\"))\n\n # Let user know\n print(colored(\"[+] Videos downloaded!\", \"green\"))\n\n # Let user know\n print(colored(\"[+] Script generated!\\n\", \"green\"))\n\n if not GENERATING:\n return jsonify(\n {\n \"status\": \"error\",\n \"message\": \"Video generation was cancelled.\",\n \"data\": [],\n }\n )\n\n # Split script into sentences\n sentences = script.split(\". \")\n\n # Remove empty strings\n sentences = list(filter(lambda x: x != \"\", sentences))\n paths = []\n\n # Generate TTS for every sentence\n for sentence in sentences:\n if not GENERATING:\n return jsonify(\n {\n \"status\": \"error\",\n \"message\": \"Video generation was cancelled.\",\n \"data\": [],\n }\n )\n current_tts_path = f\"../temp/{uuid4()}.mp3\"\n tts(sentence, voice, filename=current_tts_path)\n audio_clip = AudioFileClip(current_tts_path)\n paths.append(audio_clip)\n\n # Combine all TTS files using moviepy\n final_audio = concatenate_audioclips(paths)\n tts_path = f\"../temp/{uuid4()}.mp3\"\n final_audio.write_audiofile(tts_path)\n\n try:\n subtitles_path = generate_subtitles(audio_path=tts_path, sentences=sentences, audio_clips=paths, voice=voice_prefix)\n except Exception as e:\n print(colored(f\"[-] Error generating subtitles: {e}\", \"red\"))\n subtitles_path = None\n\n # Concatenate videos\n temp_audio = AudioFileClip(tts_path)\n combined_video_path = combine_videos(video_paths, temp_audio.duration, 5, n_threads or 2)\n\n # Put everything together\n try:\n final_video_path = generate_video(combined_video_path, tts_path, subtitles_path, n_threads or 2, subtitles_position, text_color or \"#FFFF00\")\n except Exception as e:\n print(colored(f\"[-] Error generating final video: {e}\", \"red\"))\n final_video_path = None\n\n # Define metadata for the video, we will display this to the user, and use it for the YouTube upload\n title, description, keywords = generate_metadata(data[\"videoSubject\"], script, ai_model)\n\n print(colored(\"[-] Metadata for YouTube upload:\", \"blue\"))\n print(colored(\" Title: \", \"blue\"))\n print(colored(f\" {title}\", \"blue\"))\n print(colored(\" Description: \", \"blue\"))\n print(colored(f\" {description}\", \"blue\"))\n print(colored(\" Keywords: \", \"blue\"))\n print(colored(f\" {', '.join(keywords)}\", \"blue\"))\n\n if automate_youtube_upload:\n # Start Youtube Uploader\n # Check if the CLIENT_SECRETS_FILE exists\n client_secrets_file = os.path.abspath(\"./client_secret.json\")\n SKIP_YT_UPLOAD = False\n if not os.path.exists(client_secrets_file):\n SKIP_YT_UPLOAD = True\n print(colored(\"[-] Client secrets file missing. YouTube upload will be skipped.\", \"yellow\"))\n print(colored(\"[-] Please download the client_secret.json from Google Cloud Platform and store this inside the /Backend directory.\", \"red\"))\n\n # Only proceed with YouTube upload if the toggle is True and client_secret.json exists.\n if not SKIP_YT_UPLOAD:\n # Choose the appropriate category ID for your videos\n video_category_id = \"28\" # Science & Technology\n privacyStatus = \"private\" # \"public\", \"private\", \"unlisted\"\n video_metadata = {\n 'video_path': os.path.abspath(f\"../temp/{final_video_path}\"),\n 'title': title,\n 'description': description,\n 'category': video_category_id,\n 'keywords': \",\".join(keywords),\n 'privacyStatus': privacyStatus,\n }\n\n # Upload the video to YouTube\n try:\n # Unpack the video_metadata dictionary into individual arguments\n video_response = upload_video(\n video_path=video_metadata['video_path'],\n title=video_metadata['title'],\n description=video_metadata['description'],\n category=video_metadata['category'],\n keywords=video_metadata['keywords'],\n privacy_status=video_metadata['privacyStatus']\n )\n print(f\"Uploaded video ID: {video_response.get('id')}\")\n except HttpError as e:\n print(f\"An HTTP error {e.resp.status} occurred:\\n{e.content}\")\n\n video_clip = VideoFileClip(f\"../temp/{final_video_path}\")\n if use_music:\n # Select a random song\n song_path = choose_random_song()\n\n # Add song to video at 30% volume using moviepy\n original_duration = video_clip.duration\n original_audio = video_clip.audio\n song_clip = AudioFileClip(song_path).set_fps(44100)\n\n # Set the volume of the song to 10% of the original volume\n song_clip = song_clip.volumex(0.1).set_fps(44100)\n\n # Add the song to the video\n comp_audio = CompositeAudioClip([original_audio, song_clip])\n video_clip = video_clip.set_audio(comp_audio)\n video_clip = video_clip.set_fps(30)\n video_clip = video_clip.set_duration(original_duration)\n video_clip.write_videofile(f\"../{final_video_path}\", threads=n_threads or 1)\n else:\n video_clip.write_videofile(f\"../{final_video_path}\", threads=n_threads or 1)\n\n\n # Let user know\n print(colored(f\"[+] Video generated: {final_video_path}!\", \"green\"))\n\n # Stop FFMPEG processes\n if os.name == \"nt\":\n # Windows\n os.system(\"taskkill /f /im ffmpeg.exe\")\n else:\n # Other OS\n os.system(\"pkill -f ffmpeg\")\n\n GENERATING = False\n\n # Return JSON\n return jsonify(\n {\n \"status\": \"success\",\n \"message\": \"Video generated! See MoneyPrinter/output.mp4 for result.\",\n \"data\": final_video_path,\n }\n )\n except Exception as err:\n print(colored(f\"[-] Error: {str(err)}\", \"red\"))\n return jsonify(\n {\n \"status\": \"error\",\n \"message\": f\"Could not retrieve stock videos: {str(err)}\",\n \"data\": [],\n }\n )", "creation_date": "2024-01-31T18:53:27Z", "repo": "FujiwaraChoki/MoneyPrinter", "file_path": "Backend/main.py", "stars": 11956, "label": 0} +{"function": "def cancel():\n print(colored(\"[!] Received cancellation request...\", \"yellow\"))\n\n global GENERATING\n GENERATING = False\n\n return jsonify({\"status\": \"success\", \"message\": \"Cancelled video generation.\"})", "creation_date": "2024-01-31T18:53:27Z", "repo": "FujiwaraChoki/MoneyPrinter", "file_path": "Backend/main.py", "stars": 11956, "label": 0} +{"function": "def search_for_stock_videos(query: str, api_key: str, it: int, min_dur: int) -> List[str]:\n \"\"\"\n Searches for stock videos based on a query.\n\n Args:\n query (str): The query to search for.\n api_key (str): The API key to use.\n\n Returns:\n List[str]: A list of stock videos.\n \"\"\"\n \n # Build headers\n headers = {\n \"Authorization\": api_key\n }\n\n # Build URL\n qurl = f\"https://api.pexels.com/videos/search?query={query}&per_page={it}\"\n\n # Send the request\n r = requests.get(qurl, headers=headers)\n\n # Parse the response\n response = r.json()\n\n # Parse each video\n raw_urls = []\n video_url = []\n video_res = 0\n try:\n # loop through each video in the result\n for i in range(it):\n #check if video has desired minimum duration\n if response[\"videos\"][i][\"duration\"] < min_dur:\n continue\n raw_urls = response[\"videos\"][i][\"video_files\"]\n temp_video_url = \"\"\n \n # loop through each url to determine the best quality\n for video in raw_urls:\n # Check if video has a valid download link\n if \".com/video-files\" in video[\"link\"]:\n # Only save the URL with the largest resolution\n if (video[\"width\"]*video[\"height\"]) > video_res:\n temp_video_url = video[\"link\"]\n video_res = video[\"width\"]*video[\"height\"]\n \n # add the url to the return list if it's not empty\n if temp_video_url != \"\":\n video_url.append(temp_video_url)\n \n except Exception as e:\n print(colored(\"[-] No Videos found.\", \"red\"))\n print(colored(e, \"red\"))\n\n # Let user know\n print(colored(f\"\\t=> \\\"{query}\\\" found {len(video_url)} Videos\", \"cyan\"))\n\n # Return the video url\n return video_url", "creation_date": "2024-01-31T18:53:27Z", "repo": "FujiwaraChoki/MoneyPrinter", "file_path": "Backend/search.py", "stars": 11956, "label": 0} +{"function": "def split_string(string: str, chunk_size: int) -> List[str]:\n words = string.split()\n result = []\n current_chunk = \"\"\n for word in words:\n if (\n len(current_chunk) + len(word) + 1 <= chunk_size\n ): # Check if adding the word exceeds the chunk size\n current_chunk += f\" {word}\"\n else:\n if current_chunk: # Append the current chunk if not empty\n result.append(current_chunk.strip())\n current_chunk = word\n if current_chunk: # Append the last chunk if not empty\n result.append(current_chunk.strip())\n return result", "creation_date": "2024-02-01T09:25:47Z", "repo": "FujiwaraChoki/MoneyPrinter", "file_path": "Backend/tiktokvoice.py", "stars": 11956, "label": 0} +{"function": "def get_api_response() -> requests.Response:\n url = f'{ENDPOINTS[current_endpoint].split(\"/a\")[0]}'\n response = requests.get(url)\n return response", "creation_date": "2024-02-01T09:25:47Z", "repo": "FujiwaraChoki/MoneyPrinter", "file_path": "Backend/tiktokvoice.py", "stars": 11956, "label": 0} +{"function": "def save_audio_file(base64_data: str, filename: str = \"output.mp3\") -> None:\n audio_bytes = base64.b64decode(base64_data)\n with open(filename, \"wb\") as file:\n file.write(audio_bytes)", "creation_date": "2024-02-01T09:25:47Z", "repo": "FujiwaraChoki/MoneyPrinter", "file_path": "Backend/tiktokvoice.py", "stars": 11956, "label": 0} +{"function": "def patched_init(self, scope, receive=None, send=None):\n if 'session' not in scope:\n scope['session'] = dict()\n _original_init(self, scope, receive, send)\n return", "creation_date": "2024-08-17T15:29:08Z", "repo": "lllyasviel/stable-diffusion-webui-forge", "file_path": "spaces.py", "stars": 11191, "label": 0} +{"function": "def unload_module():\n global module_in_gpu\n\n if module_in_gpu is None:\n return\n\n DynamicSwapInstaller.uninstall_model(module_in_gpu)\n module_in_gpu.to(cpu)\n print(f'Move module to CPU: {type(module_in_gpu).__name__}')\n\n module_in_gpu = None\n memory_management.soft_empty_cache()\n return", "creation_date": "2024-08-17T15:29:08Z", "repo": "lllyasviel/stable-diffusion-webui-forge", "file_path": "spaces.py", "stars": 11191, "label": 0} +{"function": "def greedy_move_to_gpu(model, model_gpu_memory_when_using_cpu_swap):\n mem_counter = 0\n memory_in_swap = 0\n for m in model.modules():\n if hasattr(m, \"weight\"):\n module_mem = memory_management.module_size(m)\n if mem_counter + module_mem < model_gpu_memory_when_using_cpu_swap:\n m.to(gpu)\n mem_counter += module_mem\n else:\n m.to(cpu)\n memory_in_swap += module_mem\n\n print(f\"[Memory Management] Loaded to CPU Swap: {memory_in_swap / (1024 * 1024):.2f} MB\")\n print(f\"[Memory Management] Loaded to GPU: {mem_counter / (1024 * 1024):.2f} MB\")\n return", "creation_date": "2024-08-17T15:29:08Z", "repo": "lllyasviel/stable-diffusion-webui-forge", "file_path": "spaces.py", "stars": 11191, "label": 0} +{"function": "def load_module(m):\n global module_in_gpu\n\n if module_in_gpu == m:\n return\n\n unload_module()\n\n model_memory = memory_management.module_size(m)\n current_free_mem = memory_management.get_free_memory(gpu)\n inference_memory = 1.5 * 1024 * 1024 * 1024 # memory_management.minimum_inference_memory() # TODO: connect to main memory system\n estimated_remaining_memory = current_free_mem - model_memory - inference_memory\n\n print(f\"[Memory Management] Current Free GPU Memory: {current_free_mem / (1024 * 1024):.2f} MB\")\n print(f\"[Memory Management] Required Model Memory: {model_memory / (1024 * 1024):.2f} MB\")\n print(f\"[Memory Management] Required Inference Memory: {inference_memory / (1024 * 1024):.2f} MB\")\n print(f\"[Memory Management] Estimated Remaining GPU Memory: {estimated_remaining_memory / (1024 * 1024):.2f} MB\")\n\n is_torch_jit = 'ScriptModule' in type(m).__name__\n\n if is_torch_jit:\n print(f'Detected torch jit module: {type(m).__name__}')\n\n if (ALWAYS_SWAP or estimated_remaining_memory < 0) and not is_torch_jit:\n print(f'Move module to SWAP: {type(m).__name__}')\n DynamicSwapInstaller.install_model(m, target_device=gpu)\n model_gpu_memory_when_using_cpu_swap = memory_management.compute_model_gpu_memory_when_using_cpu_swap(current_free_mem, inference_memory)\n greedy_move_to_gpu(m, model_gpu_memory_when_using_cpu_swap)\n else:\n print(f'Move module to GPU: {type(m).__name__}')\n m.to(gpu)\n\n module_in_gpu = m\n return", "creation_date": "2024-08-17T15:29:08Z", "repo": "lllyasviel/stable-diffusion-webui-forge", "file_path": "spaces.py", "stars": 11191, "label": 0} +{"function": "def capture_gpu_object(capture=True):\n if capture:\n return GPUObject()\n else:\n return contextlib.nullcontext()", "creation_date": "2024-08-17T15:29:08Z", "repo": "lllyasviel/stable-diffusion-webui-forge", "file_path": "spaces.py", "stars": 11191, "label": 0} +{"function": "def GPU(gpu_objects=None, manual_load=False, **kwargs):\n gpu_objects = gpu_objects or []\n\n if not isinstance(gpu_objects, (list, tuple)):\n gpu_objects = [gpu_objects]\n\n def decorator(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n print(\"Entering Forge Space GPU ...\")\n memory_management.unload_all_models()\n if not manual_load:\n for o in gpu_objects:\n o.gpu()\n result = func(*args, **kwargs)\n print(\"Cleaning Forge Space GPU ...\")\n unload_module()\n for o in gpu_objects:\n o.to(device=torch.device('cpu'))\n memory_management.soft_empty_cache()\n return result\n return wrapper\n return decorator", "creation_date": "2024-08-17T15:29:08Z", "repo": "lllyasviel/stable-diffusion-webui-forge", "file_path": "spaces.py", "stars": 11191, "label": 0} +{"function": "def convert_root_path():\n frame = inspect.currentframe().f_back\n caller_file = frame.f_code.co_filename\n caller_file = os.path.abspath(caller_file)\n result = os.path.join(os.path.dirname(caller_file), 'huggingface_space_mirror')\n return result + '/'", "creation_date": "2024-08-17T15:29:08Z", "repo": "lllyasviel/stable-diffusion-webui-forge", "file_path": "spaces.py", "stars": 11191, "label": 0} +{"function": "def download_single_file(\n url: str,\n *,\n model_dir: str,\n progress: bool = True,\n file_name: str | None = None,\n hash_prefix: str | None = None,\n) -> str:\n os.makedirs(model_dir, exist_ok=True)\n if not file_name:\n from urllib.parse import urlparse\n parts = urlparse(url)\n file_name = os.path.basename(parts.path)\n cached_file = os.path.abspath(os.path.join(model_dir, file_name))\n if not os.path.exists(cached_file):\n tmp_filename = cached_file + '.tmp'\n print(f'Downloading: \"{url}\" to {cached_file} using temp file {tmp_filename}\\n')\n from torch.hub import download_url_to_file\n download_url_to_file(url, tmp_filename, progress=progress, hash_prefix=hash_prefix)\n os.replace(tmp_filename, cached_file)\n return cached_file", "creation_date": "2024-08-17T15:29:08Z", "repo": "lllyasviel/stable-diffusion-webui-forge", "file_path": "spaces.py", "stars": 11191, "label": 0} +{"function": "def automatically_move_to_gpu_when_forward(m: torch.nn.Module, target_model: torch.nn.Module = None):\n if target_model is None:\n target_model = m\n\n def patch_method(method_name):\n if not hasattr(m, method_name):\n return\n\n if not hasattr(m, 'forge_space_hooked_names'):\n m.forge_space_hooked_names = []\n\n if method_name in m.forge_space_hooked_names:\n print(f'Already hooked {type(m).__name__}.{method_name}')\n return\n\n print(f'Automatic hook: {type(m).__name__}.{method_name}')\n\n original_method = getattr(m, method_name)\n\n def patched_method(*args, **kwargs):\n load_module(target_model)\n return original_method(*args, **kwargs)\n\n setattr(m, method_name, patched_method)\n\n m.forge_space_hooked_names.append(method_name)\n return\n\n for method_name in ['forward', 'encode', 'decode']:\n patch_method(method_name)\n\n return", "creation_date": "2024-08-17T15:29:08Z", "repo": "lllyasviel/stable-diffusion-webui-forge", "file_path": "spaces.py", "stars": 11191, "label": 0} +{"function": "def automatically_move_pipeline_components(pipe):\n for attr_name in dir(pipe):\n attr_value = getattr(pipe, attr_name, None)\n if isinstance(attr_value, torch.nn.Module):\n automatically_move_to_gpu_when_forward(attr_value)\n return", "creation_date": "2024-08-17T15:29:08Z", "repo": "lllyasviel/stable-diffusion-webui-forge", "file_path": "spaces.py", "stars": 11191, "label": 0} +{"function": "def home():\n return render_template('index.html')", "creation_date": "2024-12-06T00:21:31Z", "repo": "Doriandarko/claude-engineer", "file_path": "app.py", "stars": 11052, "label": 0} +{"function": "def chat():\n data = request.json\n message = data.get('message', '')\n image_data = data.get('image') # Get the base64 image data\n \n # Prepare the message content\n if image_data:\n # Create a message with both text and image in correct order\n message_content = [\n {\n \"type\": \"image\",\n \"source\": {\n \"type\": \"base64\",\n \"media_type\": \"image/jpeg\", # We should detect this from the image\n \"data\": image_data.split(',')[1] if ',' in image_data else image_data # Remove data URL prefix if present\n }\n }\n ]\n \n # Only add text message if there is actual text\n if message.strip():\n message_content.append({\n \"type\": \"text\",\n \"text\": message\n })\n else:\n # Text-only message\n message_content = message\n \n try:\n # Handle the chat message with the appropriate content\n response = assistant.chat(message_content)\n \n # Get token usage from assistant\n token_usage = {\n 'total_tokens': assistant.total_tokens_used,\n 'max_tokens': Config.MAX_CONVERSATION_TOKENS\n }\n \n # Get the last used tool from the conversation history\n tool_name = None\n if assistant.conversation_history:\n for msg in reversed(assistant.conversation_history):\n if msg.get('role') == 'assistant' and msg.get('content'):\n content = msg['content']\n if isinstance(content, list):\n for block in content:\n if isinstance(block, dict) and block.get('type') == 'tool_use':\n tool_name = block.get('name')\n break\n if tool_name:\n break\n \n return jsonify({\n 'response': response,\n 'thinking': False,\n 'tool_name': tool_name,\n 'token_usage': token_usage\n })\n \n except Exception as e:\n return jsonify({\n 'response': f\"Error: {str(e)}\",\n 'thinking': False,\n 'tool_name': None,\n 'token_usage': None\n }), 200 # Return 200 even for errors to handle them gracefully in frontend", "creation_date": "2024-12-06T00:21:31Z", "repo": "Doriandarko/claude-engineer", "file_path": "app.py", "stars": 11052, "label": 0} +{"function": "def upload_file():\n if 'file' not in request.files:\n return jsonify({'error': 'No file part'}), 400\n \n file = request.files['file']\n if file.filename == '':\n return jsonify({'error': 'No selected file'}), 400\n \n if file and file.filename.lower().endswith(('.png', '.jpg', '.jpeg', '.gif', '.webp')):\n filename = secure_filename(file.filename)\n filepath = os.path.join(app.config['UPLOAD_FOLDER'], filename)\n file.save(filepath)\n \n # Get the actual media type\n media_type = file.content_type or 'image/jpeg' # Default to jpeg if not detected\n \n # Convert image to base64\n with open(filepath, \"rb\") as image_file:\n encoded_string = base64.b64encode(image_file.read()).decode('utf-8')\n \n # Clean up the file\n os.remove(filepath)\n \n return jsonify({\n 'success': True,\n 'image_data': encoded_string,\n 'media_type': media_type\n })\n \n return jsonify({'error': 'Invalid file type'}), 400", "creation_date": "2024-12-06T00:21:31Z", "repo": "Doriandarko/claude-engineer", "file_path": "app.py", "stars": 11052, "label": 0} +{"function": "def reset():\n # Reset the assistant's conversation history\n assistant.reset()\n return jsonify({'status': 'success'})", "creation_date": "2024-12-06T00:21:31Z", "repo": "Doriandarko/claude-engineer", "file_path": "app.py", "stars": 11052, "label": 0} +{"function": "def main():\n \"\"\"\n Entry point for the assistant CLI loop.\n Provides a prompt for user input and handles 'quit' and 'reset' commands.\n \"\"\"\n console = Console()\n style = Style.from_dict({'prompt': 'orange'})\n\n try:\n assistant = Assistant()\n except ValueError as e:\n console.print(f\"[bold red]Error:[/bold red] {str(e)}\")\n console.print(\"Please ensure ANTHROPIC_API_KEY is set correctly.\")\n return\n\n welcome_text = \"\"\"\n# Claude Engineer v3. A self-improving assistant framework with tool creation\n\nType 'refresh' to reload available tools\nType 'reset' to clear conversation history\nType 'quit' to exit\n\nAvailable tools:\n\"\"\"\n console.print(Markdown(welcome_text))\n assistant.display_available_tools()\n\n while True:\n try:\n user_input = prompt(\"You: \", style=style).strip()\n\n if user_input.lower() == 'quit':\n console.print(\"\\n[bold blue]\ud83d\udc4b Goodbye![/bold blue]\")\n break\n elif user_input.lower() == 'reset':\n assistant.reset()\n continue\n\n response = assistant.chat(user_input)\n console.print(\"\\n[bold purple]Claude Engineer:[/bold purple]\")\n if isinstance(response, str):\n safe_response = response.replace('[', '\\\\[').replace(']', '\\\\]')\n console.print(safe_response)\n else:\n console.print(str(response))\n\n except KeyboardInterrupt:\n continue\n except EOFError:\n break", "creation_date": "2024-11-29T02:25:20Z", "repo": "Doriandarko/claude-engineer", "file_path": "ce3.py", "stars": 11052, "label": 0} +{"function": " def __init__(self):\n if not getattr(Config, 'ANTHROPIC_API_KEY', None):\n raise ValueError(\"No ANTHROPIC_API_KEY found in environment variables\")\n\n # Initialize Anthropics client\n self.client = anthropic.Anthropic(api_key=Config.ANTHROPIC_API_KEY)\n\n self.conversation_history: List[Dict[str, Any]] = []\n self.console = Console()\n\n self.thinking_enabled = getattr(Config, 'ENABLE_THINKING', False)\n self.temperature = getattr(Config, 'DEFAULT_TEMPERATURE', 0.7)\n self.total_tokens_used = 0\n\n self.tools = self._load_tools()", "creation_date": "2024-11-29T02:25:20Z", "repo": "Doriandarko/claude-engineer", "file_path": "ce3.py", "stars": 11052, "label": 0} +{"function": " def _execute_uv_install(self, package_name: str) -> bool:\n \"\"\"\n Execute the uvpackagemanager tool directly to install the missing package.\n Returns True if installation seems successful (no errors in output), otherwise False.\n \"\"\"\n class ToolUseMock:\n name = \"uvpackagemanager\"\n input = {\n \"command\": \"install\",\n \"packages\": [package_name]\n }\n\n result = self._execute_tool(ToolUseMock())\n if \"Error\" not in result and \"failed\" not in result.lower():\n self.console.print(\"[green]The package was installed successfully.[/green]\")\n return True\n else:\n self.console.print(f\"[red]Failed to install {package_name}. Output:[/red] {result}\")\n return False", "creation_date": "2024-11-29T02:25:20Z", "repo": "Doriandarko/claude-engineer", "file_path": "ce3.py", "stars": 11052, "label": 0} +{"function": " def _load_tools(self) -> List[Dict[str, Any]]:\n \"\"\"\n Dynamically load all tool classes from the tools directory.\n If a dependency is missing, prompt the user to install it via uvpackagemanager.\n \n Returns:\n A list of tools (dicts) containing their 'name', 'description', and 'input_schema'.\n \"\"\"\n tools = []\n tools_path = getattr(Config, 'TOOLS_DIR', None)\n\n if tools_path is None:\n self.console.print(\"[red]TOOLS_DIR not set in Config[/red]\")\n return tools\n\n # Clear cached tool modules for fresh import\n for module_name in list(sys.modules.keys()):\n if module_name.startswith('tools.') and module_name != 'tools.base':\n del sys.modules[module_name]\n\n try:\n for module_info in pkgutil.iter_modules([str(tools_path)]):\n if module_info.name == 'base':\n continue\n\n # Attempt loading the tool module\n try:\n module = importlib.import_module(f'tools.{module_info.name}')\n self._extract_tools_from_module(module, tools)\n except ImportError as e:\n # Handle missing dependencies\n missing_module = self._parse_missing_dependency(str(e))\n self.console.print(f\"\\n[yellow]Missing dependency:[/yellow] {missing_module} for tool {module_info.name}\")\n user_response = input(f\"Would you like to install {missing_module}? (y/n): \").lower()\n\n if user_response == 'y':\n success = self._execute_uv_install(missing_module)\n if success:\n # Retry loading the module after installation\n try:\n module = importlib.import_module(f'tools.{module_info.name}')\n self._extract_tools_from_module(module, tools)\n except Exception as retry_err:\n self.console.print(f\"[red]Failed to load tool after installation: {str(retry_err)}[/red]\")\n else:\n self.console.print(f\"[red]Installation of {missing_module} failed. Skipping this tool.[/red]\")\n else:\n self.console.print(f\"[yellow]Skipping tool {module_info.name} due to missing dependency[/yellow]\")\n except Exception as mod_err:\n self.console.print(f\"[red]Error loading module {module_info.name}:[/red] {str(mod_err)}\")\n except Exception as overall_err:\n self.console.print(f\"[red]Error in tool loading process:[/red] {str(overall_err)}\")\n\n return tools", "creation_date": "2024-11-29T02:25:20Z", "repo": "Doriandarko/claude-engineer", "file_path": "ce3.py", "stars": 11052, "label": 0} +{"function": " def _parse_missing_dependency(self, error_str: str) -> str:\n \"\"\"\n Parse the missing dependency name from an ImportError string.\n \"\"\"\n if \"No module named\" in error_str:\n parts = error_str.split(\"No module named\")\n missing_module = parts[-1].strip(\" '\\\"\")\n else:\n missing_module = error_str\n return missing_module", "creation_date": "2024-11-29T02:25:20Z", "repo": "Doriandarko/claude-engineer", "file_path": "ce3.py", "stars": 11052, "label": 0} +{"function": " def _extract_tools_from_module(self, module, tools: List[Dict[str, Any]]) -> None:\n \"\"\"\n Given a tool module, find and instantiate all tool classes (subclasses of BaseTool).\n Append them to the 'tools' list.\n \"\"\"\n for name, obj in inspect.getmembers(module):\n if (inspect.isclass(obj) and issubclass(obj, BaseTool) and obj != BaseTool):\n try:\n tool_instance = obj()\n tools.append({\n \"name\": tool_instance.name,\n \"description\": tool_instance.description,\n \"input_schema\": tool_instance.input_schema\n })\n self.console.print(f\"[green]Loaded tool:[/green] {tool_instance.name}\")\n except Exception as tool_init_err:\n self.console.print(f\"[red]Error initializing tool {name}:[/red] {str(tool_init_err)}\")", "creation_date": "2024-11-29T02:25:20Z", "repo": "Doriandarko/claude-engineer", "file_path": "ce3.py", "stars": 11052, "label": 0} +{"function": "def sample_query() -> IngestionQuery:\n \"\"\"Provide a default ``IngestionQuery`` object for use in tests.\n\n This fixture returns a ``IngestionQuery`` pre-populated with typical fields and some default ignore patterns.\n\n Returns\n -------\n IngestionQuery\n The sample ``IngestionQuery`` object.\n\n \"\"\"\n return IngestionQuery(\n user_name=\"test_user\",\n repo_name=\"test_repo\",\n local_path=Path(\"/tmp/test_repo\").resolve(),\n slug=\"test_user/test_repo\",\n id=\"id\",\n branch=\"main\",\n max_file_size=1_000_000,\n ignore_patterns={\"*.pyc\", \"__pycache__\", \".git\"},\n )", "creation_date": "2025-01-03T07:33:28Z", "repo": "coderamp-labs/gitingest", "file_path": "tests/conftest.py", "stars": 10888, "label": 0} +{"function": "def temp_directory(tmp_path: Path) -> Path:\n \"\"\"Create a temporary directory structure for testing repository scanning.\n\n The structure includes:\n test_repo/\n \u251c\u2500\u2500 file1.txt\n \u251c\u2500\u2500 file2.py\n \u251c\u2500\u2500 src/\n \u2502 \u251c\u2500\u2500 subfile1.txt\n \u2502 \u251c\u2500\u2500 subfile2.py\n \u2502 \u2514\u2500\u2500 subdir/\n \u2502 \u251c\u2500\u2500 file_subdir.txt\n \u2502 \u2514\u2500\u2500 file_subdir.py\n \u251c\u2500\u2500 dir1/\n \u2502 \u2514\u2500\u2500 file_dir1.txt\n \u2514\u2500\u2500 dir2/\n \u2514\u2500\u2500 file_dir2.txt\n\n Parameters\n ----------\n tmp_path : Path\n The temporary directory path provided by the ``tmp_path`` fixture.\n\n Returns\n -------\n Path\n The path to the created ``test_repo`` directory.\n\n \"\"\"\n test_dir = tmp_path / \"test_repo\"\n test_dir.mkdir()\n\n # Root files\n (test_dir / \"file1.txt\").write_text(\"Hello World\")\n (test_dir / \"file2.py\").write_text(\"print('Hello')\")\n\n # src directory and its files\n src_dir = test_dir / \"src\"\n src_dir.mkdir()\n (src_dir / \"subfile1.txt\").write_text(\"Hello from src\")\n (src_dir / \"subfile2.py\").write_text(\"print('Hello from src')\")\n\n # src/subdir and its files\n subdir = src_dir / \"subdir\"\n subdir.mkdir()\n (subdir / \"file_subdir.txt\").write_text(\"Hello from subdir\")\n (subdir / \"file_subdir.py\").write_text(\"print('Hello from subdir')\")\n\n # dir1 and its file\n dir1 = test_dir / \"dir1\"\n dir1.mkdir()\n (dir1 / \"file_dir1.txt\").write_text(\"Hello from dir1\")\n\n # dir2 and its file\n dir2 = test_dir / \"dir2\"\n dir2.mkdir()\n (dir2 / \"file_dir2.txt\").write_text(\"Hello from dir2\")\n\n return test_dir", "creation_date": "2025-01-03T07:33:28Z", "repo": "coderamp-labs/gitingest", "file_path": "tests/conftest.py", "stars": 10888, "label": 0} +{"function": "def write_notebook(tmp_path: Path) -> WriteNotebookFunc:\n \"\"\"Provide a helper function to write a ``.ipynb`` notebook file with the given content.\n\n Parameters\n ----------\n tmp_path : Path\n The temporary directory path provided by the ``tmp_path`` fixture.\n\n Returns\n -------\n WriteNotebookFunc\n A callable that accepts a filename and a dictionary (representing JSON notebook data), writes it to a\n ``.ipynb`` file, and returns the path to the file.\n\n \"\"\"\n\n def _write_notebook(name: str, content: dict[str, Any]) -> Path:\n notebook_path = tmp_path / name\n with notebook_path.open(mode=\"w\", encoding=\"utf-8\") as f:\n json.dump(content, f)\n return notebook_path\n\n return _write_notebook", "creation_date": "2025-01-03T07:33:28Z", "repo": "coderamp-labs/gitingest", "file_path": "tests/conftest.py", "stars": 10888, "label": 0} +{"function": "def stub_branches(mocker: MockerFixture) -> Callable[[list[str]], None]:\n \"\"\"Return a function that stubs git branch discovery to *branches*.\"\"\"\n\n def _factory(branches: list[str]) -> None:\n mocker.patch(\n \"gitingest.utils.git_utils.run_command\",\n new_callable=AsyncMock,\n return_value=(\"\\n\".join(f\"refs/heads/{b}\" for b in branches).encode() + b\"\\n\", b\"\"),\n )\n mocker.patch(\n \"gitingest.utils.git_utils.fetch_remote_branches_or_tags\",\n new_callable=AsyncMock,\n return_value=branches,\n )\n\n return _factory", "creation_date": "2025-01-03T07:33:28Z", "repo": "coderamp-labs/gitingest", "file_path": "tests/conftest.py", "stars": 10888, "label": 0} +{"function": "def repo_exists_true(mocker: MockerFixture) -> AsyncMock:\n \"\"\"Patch ``gitingest.clone.check_repo_exists`` to always return ``True``.\"\"\"\n return mocker.patch(\"gitingest.clone.check_repo_exists\", return_value=True)", "creation_date": "2025-01-03T07:33:28Z", "repo": "coderamp-labs/gitingest", "file_path": "tests/conftest.py", "stars": 10888, "label": 0} +{"function": "def run_command_mock(mocker: MockerFixture) -> AsyncMock:\n \"\"\"Patch ``gitingest.clone.run_command`` with an ``AsyncMock``.\n\n The mocked function returns a dummy process whose ``communicate`` method yields generic\n ``stdout`` / ``stderr`` bytes. Tests can still access / tweak the mock via the fixture argument.\n \"\"\"\n mock_exec = mocker.patch(\"gitingest.clone.run_command\", new_callable=AsyncMock)\n\n # Provide a default dummy process so most tests don't have to create one.\n dummy_process = AsyncMock()\n dummy_process.communicate.return_value = (b\"output\", b\"error\")\n mock_exec.return_value = dummy_process\n\n return mock_exec", "creation_date": "2025-01-03T07:33:28Z", "repo": "coderamp-labs/gitingest", "file_path": "tests/conftest.py", "stars": 10888, "label": 0} +{"function": " def _write_notebook(name: str, content: dict[str, Any]) -> Path:\n notebook_path = tmp_path / name\n with notebook_path.open(mode=\"w\", encoding=\"utf-8\") as f:\n json.dump(content, f)\n return notebook_path", "creation_date": "2025-01-03T07:33:28Z", "repo": "coderamp-labs/gitingest", "file_path": "tests/conftest.py", "stars": 10888, "label": 0} +{"function": " def _factory(branches: list[str]) -> None:\n mocker.patch(\n \"gitingest.utils.git_utils.run_command\",\n new_callable=AsyncMock,\n return_value=(\"\\n\".join(f\"refs/heads/{b}\" for b in branches).encode() + b\"\\n\", b\"\"),\n )\n mocker.patch(\n \"gitingest.utils.git_utils.fetch_remote_branches_or_tags\",\n new_callable=AsyncMock,\n return_value=branches,\n )", "creation_date": "2025-01-03T07:33:28Z", "repo": "coderamp-labs/gitingest", "file_path": "tests/conftest.py", "stars": 10888, "label": 0} +{"function": "def test_cli_writes_file(\n tmp_path: Path,\n monkeypatch: pytest.MonkeyPatch,\n *,\n cli_args: list[str],\n expect_file: bool,\n) -> None:\n \"\"\"Run the CLI and verify that the SARIF file is created (or not).\"\"\"\n expectes_exit_code = 0\n # Work inside an isolated temp directory\n monkeypatch.chdir(tmp_path)\n\n result = _invoke_isolated_cli_runner(cli_args)\n\n assert result.exit_code == expectes_exit_code, result.stderr\n\n # Summary line should be on STDOUT\n stdout_lines = result.stdout.splitlines()\n assert f\"Analysis complete! Output written to: {OUTPUT_FILE_NAME}\" in stdout_lines\n\n # File side-effect\n sarif_file = tmp_path / OUTPUT_FILE_NAME\n assert sarif_file.exists() is expect_file, f\"{OUTPUT_FILE_NAME} existence did not match expectation\"", "creation_date": "2025-01-22T15:39:57Z", "repo": "coderamp-labs/gitingest", "file_path": "tests/test_cli.py", "stars": 10888, "label": 0} +{"function": "def test_cli_with_stdout_output() -> None:\n \"\"\"Test CLI invocation with output directed to STDOUT.\"\"\"\n output_file = Path(OUTPUT_FILE_NAME)\n # Clean up any existing digest.txt file before test\n if output_file.exists():\n output_file.unlink()\n\n try:\n result = _invoke_isolated_cli_runner([\"./\", \"--output\", \"-\", \"--exclude-pattern\", \"tests/\"])\n\n # \u2500\u2500\u2500 core expectations (stdout) \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500-\n assert result.exit_code == 0, f\"CLI exited with code {result.exit_code}, stderr: {result.stderr}\"\n assert \"---\" in result.stdout, \"Expected file separator '---' not found in STDOUT\"\n assert \"src/gitingest/__main__.py\" in result.stdout, (\n \"Expected content (e.g., src/gitingest/__main__.py) not found in STDOUT\"\n )\n assert not output_file.exists(), f\"Output file {output_file} was unexpectedly created.\"\n\n # \u2500\u2500\u2500 the summary must *not* pollute STDOUT, must appear on STDERR \u2500\u2500\u2500\n summary = \"Analysis complete! Output sent to stdout.\"\n stdout_lines = result.stdout.splitlines()\n stderr_lines = result.stderr.splitlines()\n assert summary not in stdout_lines, \"Unexpected summary message found in STDOUT\"\n assert summary in stderr_lines, \"Expected summary message not found in STDERR\"\n assert f\"Output written to: {output_file.name}\" not in stderr_lines\n finally:\n # Clean up any digest.txt file that might have been created during test\n if output_file.exists():\n output_file.unlink()", "creation_date": "2025-01-22T15:39:57Z", "repo": "coderamp-labs/gitingest", "file_path": "tests/test_cli.py", "stars": 10888, "label": 0} +{"function": " def get(self, key: str, default: Any = None) -> Any:\n \"\"\"Get the value associated with the given key. If the key does not exist, return the default value.\n\n Args:\n key (str): The attribute name to retrieve.\n default (Any, optional): The value to return if the attribute does not exist. Defaults to None.\n\n Returns:\n Any: The value of the attribute or the default value.\n \"\"\"\n try:\n return getattr(self, key)\n except AttributeError:\n return default", "creation_date": "2025-07-04T15:12:09Z", "repo": "volcengine/verl", "file_path": "verl/base_config.py", "stars": 10883, "label": 0} +{"function": " def __getitem__(self, key: str):\n \"\"\"Implement the [] operator for the class. Allows accessing attributes like dictionary items.\n\n Args:\n key (str): The attribute name to retrieve.\n\n Returns:\n Any: The value of the attribute.\n\n Raises:\n AttributeError: If the attribute does not exist.\n TypeError: If the key type is not string\n \"\"\"\n return getattr(self, key)", "creation_date": "2025-07-04T15:12:09Z", "repo": "volcengine/verl", "file_path": "verl/base_config.py", "stars": 10883, "label": 0} +{"function": " def __iter__(self):\n \"\"\"Implement the iterator protocol. Allows iterating over the attribute names of the instance.\n\n Yields:\n str: The name of each field in the dataclass.\n \"\"\"\n for f in fields(self):\n yield f.name", "creation_date": "2025-07-04T15:12:09Z", "repo": "volcengine/verl", "file_path": "verl/base_config.py", "stars": 10883, "label": 0} +{"function": " def __len__(self):\n \"\"\"\n Return the number of fields in the dataclass.\n\n Returns:\n int: The number of fields in the dataclass.\n \"\"\"\n return len(fields(self))", "creation_date": "2025-07-04T15:12:09Z", "repo": "volcengine/verl", "file_path": "verl/base_config.py", "stars": 10883, "label": 0} +{"function": "def pad_dataproto_to_divisor(data: \"DataProto\", size_divisor: int):\n \"\"\"Pad a DataProto to size divisible by size_divisor\n\n Args:\n size_divisor (int): size divisor\n\n Returns:\n data: (DataProto): the padded DataProto\n pad_size (int)\n \"\"\"\n assert isinstance(data, DataProto), \"data must be a DataProto\"\n if len(data) % size_divisor != 0:\n pad_size = size_divisor - len(data) % size_divisor\n padding_protos = []\n remaining_pad = pad_size\n while remaining_pad > 0:\n take_size = min(remaining_pad, len(data))\n padding_protos.append(data[:take_size])\n remaining_pad -= take_size\n data_padded = DataProto.concat([data] + padding_protos)\n else:\n if len(data) == 0:\n logging.warning(\"padding a DataProto with no item, no changed made\")\n pad_size = 0\n data_padded = data\n return data_padded, pad_size", "creation_date": "2024-10-31T06:29:44Z", "repo": "volcengine/verl", "file_path": "verl/protocol.py", "stars": 10883, "label": 0} +{"function": "def unpad_dataproto(data: \"DataProto\", pad_size):\n \"\"\"Unpad the data proto with pad_size. i.e. `data[:-pad_size]`\"\"\"\n if pad_size != 0:\n data = data[:-pad_size]\n return data", "creation_date": "2024-10-31T06:29:44Z", "repo": "volcengine/verl", "file_path": "verl/protocol.py", "stars": 10883, "label": 0} +{"function": "def union_tensor_dict(tensor_dict1: TensorDict, tensor_dict2: TensorDict) -> TensorDict:\n \"\"\"Union two tensordicts.\"\"\"\n assert tensor_dict1.batch_size == tensor_dict2.batch_size, (\n f\"Two tensor dict must have identical batch size. Got {tensor_dict1.batch_size} and {tensor_dict2.batch_size}\"\n )\n for key in tensor_dict2.keys():\n if key not in tensor_dict1.keys():\n tensor_dict1[key] = tensor_dict2[key]\n else:\n assert tensor_dict1[key].equal(tensor_dict2[key]), (\n f\"{key} in tensor_dict1 and tensor_dict2 are not the same object\"\n )\n\n return tensor_dict1", "creation_date": "2024-10-31T06:29:44Z", "repo": "volcengine/verl", "file_path": "verl/protocol.py", "stars": 10883, "label": 0} +{"function": "def union_numpy_dict(tensor_dict1: dict[str, np.ndarray], tensor_dict2: dict[str, np.ndarray]) -> dict[str, np.ndarray]:\n for key, val in tensor_dict2.items():\n if key in tensor_dict1:\n assert isinstance(tensor_dict2[key], np.ndarray)\n assert isinstance(tensor_dict1[key], np.ndarray)\n # to properly deal with nan and object type\n assert pd.DataFrame(tensor_dict2[key]).equals(pd.DataFrame(tensor_dict1[key])), (\n f\"{key} in tensor_dict1 and tensor_dict2 are not the same object\"\n )\n tensor_dict1[key] = val\n\n return tensor_dict1", "creation_date": "2024-10-31T06:29:44Z", "repo": "volcengine/verl", "file_path": "verl/protocol.py", "stars": 10883, "label": 0} +{"function": "def list_of_dict_to_dict_of_list(list_of_dict: list[dict]):\n if len(list_of_dict) == 0:\n return {}\n keys = list_of_dict[0].keys()\n output = {key: [] for key in keys}\n for data in list_of_dict:\n for key, item in data.items():\n assert key in output\n output[key].append(item)\n return output", "creation_date": "2024-10-31T06:29:44Z", "repo": "volcengine/verl", "file_path": "verl/protocol.py", "stars": 10883, "label": 0} +{"function": "def fold_batch_dim(data: \"DataProto\", new_batch_size):\n \"\"\"\n Fold a batch dim from [bsz, xxx] into [new_bsz, bsz // new_bsz, xxx]\n \"\"\"\n batch_size = data.batch.batch_size[0]\n\n assert batch_size % new_batch_size == 0\n\n tensor: TensorDict = data.batch\n non_tensor = data.non_tensor_batch\n\n tensor = tensor.view(new_batch_size, -1)\n tensor.auto_batch_size_(batch_dims=1)\n\n for key, val in non_tensor.items():\n non_tensor[key] = np.reshape(val, newshape=(new_batch_size, -1, *val.shape[1:]))\n\n return type(data)(batch=tensor, non_tensor_batch=non_tensor, meta_info=data.meta_info)", "creation_date": "2024-10-31T06:29:44Z", "repo": "volcengine/verl", "file_path": "verl/protocol.py", "stars": 10883, "label": 0} +{"function": " def __init__(\n self,\n config: \"AgentConfig\",\n id: str | None = None,\n name: str | None = None,\n agent0: \"Agent|None\" = None,\n log: Log.Log | None = None,\n paused: bool = False,\n streaming_agent: \"Agent|None\" = None,\n created_at: datetime | None = None,\n type: AgentContextType = AgentContextType.USER,\n last_message: datetime | None = None,\n ):\n # build context\n self.id = id or str(uuid.uuid4())\n self.name = name\n self.config = config\n self.log = log or Log.Log()\n self.agent0 = agent0 or Agent(0, self.config, self)\n self.paused = paused\n self.streaming_agent = streaming_agent\n self.task: DeferredTask | None = None\n self.created_at = created_at or datetime.now(timezone.utc)\n self.type = type\n AgentContext._counter += 1\n self.no = AgentContext._counter\n # set to start of unix epoch\n self.last_message = last_message or datetime.now(timezone.utc)\n\n existing = self._contexts.get(self.id, None)\n if existing:\n AgentContext.remove(self.id)\n self._contexts[self.id] = self", "creation_date": "2024-06-10T09:10:31Z", "repo": "frdel/agent-zero", "file_path": "agent.py", "stars": 10879, "label": 0} +{"function": " def get(id: str):\n return AgentContext._contexts.get(id, None)", "creation_date": "2024-06-10T09:10:31Z", "repo": "frdel/agent-zero", "file_path": "agent.py", "stars": 10879, "label": 0} +{"function": " def first():\n if not AgentContext._contexts:\n return None\n return list(AgentContext._contexts.values())[0]", "creation_date": "2024-06-10T09:10:31Z", "repo": "frdel/agent-zero", "file_path": "agent.py", "stars": 10879, "label": 0} +{"function": " def all():\n return list(AgentContext._contexts.values())", "creation_date": "2024-06-10T09:10:31Z", "repo": "frdel/agent-zero", "file_path": "agent.py", "stars": 10879, "label": 0} +{"function": " def remove(id: str):\n context = AgentContext._contexts.pop(id, None)\n if context and context.task:\n context.task.kill()\n return context", "creation_date": "2024-06-10T09:10:31Z", "repo": "frdel/agent-zero", "file_path": "agent.py", "stars": 10879, "label": 0} +{"function": " def serialize(self):\n return {\n \"id\": self.id,\n \"name\": self.name,\n \"created_at\": (\n Localization.get().serialize_datetime(self.created_at)\n if self.created_at\n else Localization.get().serialize_datetime(datetime.fromtimestamp(0))\n ),\n \"no\": self.no,\n \"log_guid\": self.log.guid,\n \"log_version\": len(self.log.updates),\n \"log_length\": len(self.log.logs),\n \"paused\": self.paused,\n \"last_message\": (\n Localization.get().serialize_datetime(self.last_message)\n if self.last_message\n else Localization.get().serialize_datetime(datetime.fromtimestamp(0))\n ),\n \"type\": self.type.value,\n }", "creation_date": "2024-06-10T09:10:31Z", "repo": "frdel/agent-zero", "file_path": "agent.py", "stars": 10879, "label": 0} +{"function": " def log_to_all(\n type: Log.Type,\n heading: str | None = None,\n content: str | None = None,\n kvps: dict | None = None,\n temp: bool | None = None,\n update_progress: Log.ProgressUpdate | None = None,\n id: str | None = None, # Add id parameter\n **kwargs,\n ) -> list[Log.LogItem]:\n items: list[Log.LogItem] = []\n for context in AgentContext.all():\n items.append(\n context.log.log(\n type, heading, content, kvps, temp, update_progress, id, **kwargs\n )\n )\n return items", "creation_date": "2024-06-10T09:10:31Z", "repo": "frdel/agent-zero", "file_path": "agent.py", "stars": 10879, "label": 0} +{"function": " def kill_process(self):\n if self.task:\n self.task.kill()", "creation_date": "2024-06-10T09:10:31Z", "repo": "frdel/agent-zero", "file_path": "agent.py", "stars": 10879, "label": 0} +{"function": " def reset(self):\n self.kill_process()\n self.log.reset()\n self.agent0 = Agent(0, self.config, self)\n self.streaming_agent = None\n self.paused = False", "creation_date": "2024-06-10T09:10:31Z", "repo": "frdel/agent-zero", "file_path": "agent.py", "stars": 10879, "label": 0} +{"function": " def nudge(self):\n self.kill_process()\n self.paused = False\n self.task = self.run_task(self.get_agent().monologue)\n return self.task", "creation_date": "2024-06-10T09:10:31Z", "repo": "frdel/agent-zero", "file_path": "agent.py", "stars": 10879, "label": 0} +{"function": "def env_variables(request: FixtureRequest):\n # Set up the environment\n env_name: str = request.param\n envs = ENV_SETUPS[env_name]\n original_env = {key: os.environ.get(key) for key in envs}\n os.environ.update(envs)\n\n yield # Run the test\n\n # Restore the environment\n for key in envs:\n if (original_val := original_env.get(key)) is None:\n os.environ.pop(key, None)\n else:\n os.environ[key] = original_val", "creation_date": "2025-04-11T15:25:59Z", "repo": "google/adk-python", "file_path": "tests/unittests/conftest.py", "stars": 10863, "label": 0} +{"function": "def pytest_generate_tests(metafunc: Metafunc):\n \"\"\"Generate test cases for each environment setup.\"\"\"\n if env_variables.__name__ in metafunc.fixturenames:\n if not _is_explicitly_marked(env_variables.__name__, metafunc):\n metafunc.parametrize(\n env_variables.__name__, ENV_SETUPS.keys(), indirect=True\n )", "creation_date": "2025-04-11T15:25:59Z", "repo": "google/adk-python", "file_path": "tests/unittests/conftest.py", "stars": 10863, "label": 0} +{"function": "def _is_explicitly_marked(mark_name: str, metafunc: Metafunc) -> bool:\n if hasattr(metafunc.function, 'pytestmark'):\n for mark in metafunc.function.pytestmark:\n if mark.name == 'parametrize' and mark.args[0] == mark_name:\n return True\n return False", "creation_date": "2025-04-11T15:25:59Z", "repo": "google/adk-python", "file_path": "tests/unittests/conftest.py", "stars": 10863, "label": 0} +{"function": " def __init__(\n self,\n name: str,\n parent_agent: Optional[BaseAgent] = None,\n ):\n super().__init__(name=name, sub_agents=[])\n # BaseAgent doesn't have disallow_transfer_to_parent field\n # This is intentional as we want to test non-LLM agents\n if parent_agent:\n self.parent_agent = parent_agent", "creation_date": "2025-06-21T00:15:29Z", "repo": "google/adk-python", "file_path": "tests/unittests/test_runners.py", "stars": 10863, "label": 0} +{"function": " async def _run_async_impl(self, invocation_context):\n yield Event(\n invocation_id=invocation_context.invocation_id,\n author=self.name,\n content=types.Content(\n role=\"model\", parts=[types.Part(text=\"Test response\")]\n ),\n )", "creation_date": "2025-06-21T00:15:29Z", "repo": "google/adk-python", "file_path": "tests/unittests/test_runners.py", "stars": 10863, "label": 0} +{"function": " def __init__(\n self,\n name: str,\n disallow_transfer_to_parent: bool = False,\n parent_agent: Optional[BaseAgent] = None,\n ):\n # Use a string model instead of mock\n super().__init__(name=name, model=\"gemini-1.5-pro\", sub_agents=[])\n self.disallow_transfer_to_parent = disallow_transfer_to_parent\n self.parent_agent = parent_agent", "creation_date": "2025-06-21T00:15:29Z", "repo": "google/adk-python", "file_path": "tests/unittests/test_runners.py", "stars": 10863, "label": 0} +{"function": " async def _run_async_impl(self, invocation_context):\n yield Event(\n invocation_id=invocation_context.invocation_id,\n author=self.name,\n content=types.Content(\n role=\"model\", parts=[types.Part(text=\"Test LLM response\")]\n ),\n )", "creation_date": "2025-06-21T00:15:29Z", "repo": "google/adk-python", "file_path": "tests/unittests/test_runners.py", "stars": 10863, "label": 0} +{"function": " def __init__(self):\n super().__init__(name=\"mock_plugin\")\n self.enable_user_message_callback = False\n self.enable_event_callback = False", "creation_date": "2025-06-21T00:15:29Z", "repo": "google/adk-python", "file_path": "tests/unittests/test_runners.py", "stars": 10863, "label": 0} +{"function": " async def on_user_message_callback(\n self,\n *,\n invocation_context: InvocationContext,\n user_message: types.Content,\n ) -> Optional[types.Content]:\n if not self.enable_user_message_callback:\n return None\n return types.Content(\n role=\"model\",\n parts=[types.Part(text=self.ON_USER_CALLBACK_MSG)],\n )", "creation_date": "2025-06-21T00:15:29Z", "repo": "google/adk-python", "file_path": "tests/unittests/test_runners.py", "stars": 10863, "label": 0} +{"function": " async def on_event_callback(\n self, *, invocation_context: InvocationContext, event: Event\n ) -> Optional[Event]:\n if not self.enable_event_callback:\n return None\n return Event(\n invocation_id=\"\",\n author=\"\",\n content=types.Content(\n parts=[\n types.Part(\n text=self.ON_EVENT_CALLBACK_MSG,\n )\n ],\n role=event.content.role,\n ),\n )", "creation_date": "2025-06-21T00:15:29Z", "repo": "google/adk-python", "file_path": "tests/unittests/test_runners.py", "stars": 10863, "label": 0} +{"function": "def yolov10_inference(image, video, model_id, image_size, conf_threshold):\n model = YOLOv10.from_pretrained(f'jameslahm/{model_id}')\n if image:\n results = model.predict(source=image, imgsz=image_size, conf=conf_threshold)\n annotated_image = results[0].plot()\n return annotated_image[:, :, ::-1], None\n else:\n video_path = tempfile.mktemp(suffix=\".webm\")\n with open(video_path, \"wb\") as f:\n with open(video, \"rb\") as g:\n f.write(g.read())\n\n cap = cv2.VideoCapture(video_path)\n fps = cap.get(cv2.CAP_PROP_FPS)\n frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n\n output_video_path = tempfile.mktemp(suffix=\".webm\")\n out = cv2.VideoWriter(output_video_path, cv2.VideoWriter_fourcc(*'vp80'), fps, (frame_width, frame_height))\n\n while cap.isOpened():\n ret, frame = cap.read()\n if not ret:\n break\n\n results = model.predict(source=frame, imgsz=image_size, conf=conf_threshold)\n annotated_frame = results[0].plot()\n out.write(annotated_frame)\n\n cap.release()\n out.release()\n\n return None, output_video_path", "creation_date": "2024-05-29T03:03:33Z", "repo": "THU-MIG/yolov10", "file_path": "app.py", "stars": 10840, "label": 0} +{"function": "def yolov10_inference_for_examples(image, model_path, image_size, conf_threshold):\n annotated_image, _ = yolov10_inference(image, None, model_path, image_size, conf_threshold)\n return annotated_image", "creation_date": "2024-05-29T03:03:33Z", "repo": "THU-MIG/yolov10", "file_path": "app.py", "stars": 10840, "label": 0} +{"function": "def app():\n with gr.Blocks():\n with gr.Row():\n with gr.Column():\n image = gr.Image(type=\"pil\", label=\"Image\", visible=True)\n video = gr.Video(label=\"Video\", visible=False)\n input_type = gr.Radio(\n choices=[\"Image\", \"Video\"],\n value=\"Image\",\n label=\"Input Type\",\n )\n model_id = gr.Dropdown(\n label=\"Model\",\n choices=[\n \"yolov10n\",\n \"yolov10s\",\n \"yolov10m\",\n \"yolov10b\",\n \"yolov10l\",\n \"yolov10x\",\n ],\n value=\"yolov10m\",\n )\n image_size = gr.Slider(\n label=\"Image Size\",\n minimum=320,\n maximum=1280,\n step=32,\n value=640,\n )\n conf_threshold = gr.Slider(\n label=\"Confidence Threshold\",\n minimum=0.0,\n maximum=1.0,\n step=0.05,\n value=0.25,\n )\n yolov10_infer = gr.Button(value=\"Detect Objects\")\n\n with gr.Column():\n output_image = gr.Image(type=\"numpy\", label=\"Annotated Image\", visible=True)\n output_video = gr.Video(label=\"Annotated Video\", visible=False)\n\n def update_visibility(input_type):\n image = gr.update(visible=True) if input_type == \"Image\" else gr.update(visible=False)\n video = gr.update(visible=False) if input_type == \"Image\" else gr.update(visible=True)\n output_image = gr.update(visible=True) if input_type == \"Image\" else gr.update(visible=False)\n output_video = gr.update(visible=False) if input_type == \"Image\" else gr.update(visible=True)\n\n return image, video, output_image, output_video\n\n input_type.change(\n fn=update_visibility,\n inputs=[input_type],\n outputs=[image, video, output_image, output_video],\n )\n\n def run_inference(image, video, model_id, image_size, conf_threshold, input_type):\n if input_type == \"Image\":\n return yolov10_inference(image, None, model_id, image_size, conf_threshold)\n else:\n return yolov10_inference(None, video, model_id, image_size, conf_threshold)\n\n\n yolov10_infer.click(\n fn=run_inference,\n inputs=[image, video, model_id, image_size, conf_threshold, input_type],\n outputs=[output_image, output_video],\n )\n\n gr.Examples(\n examples=[\n [\n \"ultralytics/assets/bus.jpg\",\n \"yolov10s\",\n 640,\n 0.25,\n ],\n [\n \"ultralytics/assets/zidane.jpg\",\n \"yolov10s\",\n 640,\n 0.25,\n ],\n ],\n fn=yolov10_inference_for_examples,\n inputs=[\n image,\n model_id,\n image_size,\n conf_threshold,\n ],\n outputs=[output_image],\n cache_examples='lazy',\n )", "creation_date": "2024-05-29T03:03:33Z", "repo": "THU-MIG/yolov10", "file_path": "app.py", "stars": 10840, "label": 0} +{"function": " def update_visibility(input_type):\n image = gr.update(visible=True) if input_type == \"Image\" else gr.update(visible=False)\n video = gr.update(visible=False) if input_type == \"Image\" else gr.update(visible=True)\n output_image = gr.update(visible=True) if input_type == \"Image\" else gr.update(visible=False)\n output_video = gr.update(visible=False) if input_type == \"Image\" else gr.update(visible=True)\n\n return image, video, output_image, output_video", "creation_date": "2024-05-29T03:03:33Z", "repo": "THU-MIG/yolov10", "file_path": "app.py", "stars": 10840, "label": 0} +{"function": " def run_inference(image, video, model_id, image_size, conf_threshold, input_type):\n if input_type == \"Image\":\n return yolov10_inference(image, None, model_id, image_size, conf_threshold)\n else:\n return yolov10_inference(None, video, model_id, image_size, conf_threshold)", "creation_date": "2024-05-29T03:03:33Z", "repo": "THU-MIG/yolov10", "file_path": "app.py", "stars": 10840, "label": 0} +{"function": " def __init__(self):\n \"\"\"Initializes the distance calculation class with default values for Visual, Image, track and distance\n parameters.\n \"\"\"\n\n # Visual & im0 information\n self.im0 = None\n self.annotator = None\n self.view_img = False\n self.line_color = (255, 255, 0)\n self.centroid_color = (255, 0, 255)\n\n # Predict/track information\n self.clss = None\n self.names = None\n self.boxes = None\n self.line_thickness = 2\n self.trk_ids = None\n\n # Distance calculation information\n self.centroids = []\n self.pixel_per_meter = 10\n\n # Mouse event\n self.left_mouse_count = 0\n self.selected_boxes = {}\n\n # Check if environment support imshow\n self.env_check = check_imshow(warn=True)", "creation_date": "2024-01-05T09:38:13Z", "repo": "THU-MIG/yolov10", "file_path": "ultralytics/solutions/distance_calculation.py", "stars": 10840, "label": 0} +{"function": " def set_args(\n self,\n names,\n pixels_per_meter=10,\n view_img=False,\n line_thickness=2,\n line_color=(255, 255, 0),\n centroid_color=(255, 0, 255),\n ):\n \"\"\"\n Configures the distance calculation and display parameters.\n\n Args:\n names (dict): object detection classes names\n pixels_per_meter (int): Number of pixels in meter\n view_img (bool): Flag indicating frame display\n line_thickness (int): Line thickness for bounding boxes.\n line_color (RGB): color of centroids line\n centroid_color (RGB): colors of bbox centroids\n \"\"\"\n self.names = names\n self.pixel_per_meter = pixels_per_meter\n self.view_img = view_img\n self.line_thickness = line_thickness\n self.line_color = line_color\n self.centroid_color = centroid_color", "creation_date": "2024-01-05T09:38:13Z", "repo": "THU-MIG/yolov10", "file_path": "ultralytics/solutions/distance_calculation.py", "stars": 10840, "label": 0} +{"function": " def mouse_event_for_distance(self, event, x, y, flags, param):\n \"\"\"\n This function is designed to move region with mouse events in a real-time video stream.\n\n Args:\n event (int): The type of mouse event (e.g., cv2.EVENT_MOUSEMOVE, cv2.EVENT_LBUTTONDOWN, etc.).\n x (int): The x-coordinate of the mouse pointer.\n y (int): The y-coordinate of the mouse pointer.\n flags (int): Any flags associated with the event (e.g., cv2.EVENT_FLAG_CTRLKEY,\n cv2.EVENT_FLAG_SHIFTKEY, etc.).\n param (dict): Additional parameters you may want to pass to the function.\n \"\"\"\n global selected_boxes\n global left_mouse_count\n if event == cv2.EVENT_LBUTTONDOWN:\n self.left_mouse_count += 1\n if self.left_mouse_count <= 2:\n for box, track_id in zip(self.boxes, self.trk_ids):\n if box[0] < x < box[2] and box[1] < y < box[3] and track_id not in self.selected_boxes:\n self.selected_boxes[track_id] = []\n self.selected_boxes[track_id] = box\n\n if event == cv2.EVENT_RBUTTONDOWN:\n self.selected_boxes = {}\n self.left_mouse_count = 0", "creation_date": "2024-01-05T09:38:13Z", "repo": "THU-MIG/yolov10", "file_path": "ultralytics/solutions/distance_calculation.py", "stars": 10840, "label": 0} +{"function": " def extract_tracks(self, tracks):\n \"\"\"\n Extracts results from the provided data.\n\n Args:\n tracks (list): List of tracks obtained from the object tracking process.\n \"\"\"\n self.boxes = tracks[0].boxes.xyxy.cpu()\n self.clss = tracks[0].boxes.cls.cpu().tolist()\n self.trk_ids = tracks[0].boxes.id.int().cpu().tolist()", "creation_date": "2024-01-05T09:38:13Z", "repo": "THU-MIG/yolov10", "file_path": "ultralytics/solutions/distance_calculation.py", "stars": 10840, "label": 0} +{"function": " def calculate_centroid(self, box):\n \"\"\"\n Calculate the centroid of bounding box.\n\n Args:\n box (list): Bounding box data\n \"\"\"\n return int((box[0] + box[2]) // 2), int((box[1] + box[3]) // 2)", "creation_date": "2024-01-05T09:38:13Z", "repo": "THU-MIG/yolov10", "file_path": "ultralytics/solutions/distance_calculation.py", "stars": 10840, "label": 0} +{"function": "def build_logger(logger_name, logger_filename):\n global handler\n\n formatter = logging.Formatter(\n fmt=\"%(asctime)s | %(levelname)s | %(name)s | %(message)s\",\n datefmt=\"%Y-%m-%d %H:%M:%S\",\n )\n\n # Set the format of root handlers\n if not logging.getLogger().handlers:\n logging.basicConfig(level=logging.INFO)\n logging.getLogger().handlers[0].setFormatter(formatter)\n\n # Redirect stdout and stderr to loggers\n stdout_logger = logging.getLogger(\"stdout\")\n stdout_logger.setLevel(logging.INFO)\n sl = StreamToLogger(stdout_logger, logging.INFO)\n sys.stdout = sl\n\n stderr_logger = logging.getLogger(\"stderr\")\n stderr_logger.setLevel(logging.ERROR)\n sl = StreamToLogger(stderr_logger, logging.ERROR)\n sys.stderr = sl\n\n # Get logger\n logger = logging.getLogger(logger_name)\n logger.setLevel(logging.INFO)\n\n # Add a file handler for all loggers\n if handler is None:\n os.makedirs(LOGDIR, exist_ok=True)\n filename = os.path.join(LOGDIR, logger_filename)\n handler = logging.handlers.TimedRotatingFileHandler(\n filename, when='D', utc=True, encoding='UTF-8')\n handler.setFormatter(formatter)\n\n for name, item in logging.root.manager.loggerDict.items():\n if isinstance(item, logging.Logger):\n item.addHandler(handler)\n\n return logger", "creation_date": "2025-01-27T06:56:01Z", "repo": "Tencent-Hunyuan/Hunyuan3D-2", "file_path": "api_server.py", "stars": 10815, "label": 0} +{"function": "def pretty_print_semaphore(semaphore):\n if semaphore is None:\n return \"None\"\n return f\"Semaphore(value={semaphore._value}, locked={semaphore.locked()})\"", "creation_date": "2025-01-27T06:56:01Z", "repo": "Tencent-Hunyuan/Hunyuan3D-2", "file_path": "api_server.py", "stars": 10815, "label": 0} +{"function": "def load_image_from_base64(image):\n return Image.open(BytesIO(base64.b64decode(image)))", "creation_date": "2025-01-27T06:56:01Z", "repo": "Tencent-Hunyuan/Hunyuan3D-2", "file_path": "api_server.py", "stars": 10815, "label": 0} +{"function": "async def generate(request: Request):\n logger.info(\"Worker generating...\")\n params = await request.json()\n uid = uuid.uuid4()\n try:\n file_path, uid = worker.generate(uid, params)\n return FileResponse(file_path)\n except ValueError as e:\n traceback.print_exc()\n print(\"Caught ValueError:\", e)\n ret = {\n \"text\": server_error_msg,\n \"error_code\": 1,\n }\n return JSONResponse(ret, status_code=404)\n except torch.cuda.CudaError as e:\n print(\"Caught torch.cuda.CudaError:\", e)\n ret = {\n \"text\": server_error_msg,\n \"error_code\": 1,\n }\n return JSONResponse(ret, status_code=404)\n except Exception as e:\n print(\"Caught Unknown Error\", e)\n traceback.print_exc()\n ret = {\n \"text\": server_error_msg,\n \"error_code\": 1,\n }\n return JSONResponse(ret, status_code=404)", "creation_date": "2025-01-27T06:56:01Z", "repo": "Tencent-Hunyuan/Hunyuan3D-2", "file_path": "api_server.py", "stars": 10815, "label": 0} +{"function": "async def generate(request: Request):\n logger.info(\"Worker send...\")\n params = await request.json()\n uid = uuid.uuid4()\n threading.Thread(target=worker.generate, args=(uid, params,)).start()\n ret = {\"uid\": str(uid)}\n return JSONResponse(ret, status_code=200)", "creation_date": "2025-01-27T06:56:01Z", "repo": "Tencent-Hunyuan/Hunyuan3D-2", "file_path": "api_server.py", "stars": 10815, "label": 0} +{"function": "async def status(uid: str):\n save_file_path = os.path.join(SAVE_DIR, f'{uid}.glb')\n print(save_file_path, os.path.exists(save_file_path))\n if not os.path.exists(save_file_path):\n response = {'status': 'processing'}\n return JSONResponse(response, status_code=200)\n else:\n base64_str = base64.b64encode(open(save_file_path, 'rb').read()).decode()\n response = {'status': 'completed', 'model_base64': base64_str}\n return JSONResponse(response, status_code=200)", "creation_date": "2025-01-27T06:56:01Z", "repo": "Tencent-Hunyuan/Hunyuan3D-2", "file_path": "api_server.py", "stars": 10815, "label": 0} +{"function": " def __init__(self, logger, log_level=logging.INFO):\n self.terminal = sys.stdout\n self.logger = logger\n self.log_level = log_level\n self.linebuf = ''", "creation_date": "2025-01-27T06:56:01Z", "repo": "Tencent-Hunyuan/Hunyuan3D-2", "file_path": "api_server.py", "stars": 10815, "label": 0} +{"function": " def __getattr__(self, attr):\n return getattr(self.terminal, attr)", "creation_date": "2025-01-27T06:56:01Z", "repo": "Tencent-Hunyuan/Hunyuan3D-2", "file_path": "api_server.py", "stars": 10815, "label": 0} +{"function": " def write(self, buf):\n temp_linebuf = self.linebuf + buf\n self.linebuf = ''\n for line in temp_linebuf.splitlines(True):\n # From the io.TextIOWrapper docs:\n # On output, if newline is None, any '\\n' characters written\n # are translated to the system default line separator.\n # By default sys.stdout.write() expects '\\n' newlines and then\n # translates them so this is still cross platform.\n if line[-1] == '\\n':\n self.logger.log(self.log_level, line.rstrip())\n else:\n self.linebuf += line", "creation_date": "2025-01-27T06:56:01Z", "repo": "Tencent-Hunyuan/Hunyuan3D-2", "file_path": "api_server.py", "stars": 10815, "label": 0} +{"function": " def flush(self):\n if self.linebuf != '':\n self.logger.log(self.log_level, self.linebuf.rstrip())\n self.linebuf = ''", "creation_date": "2025-01-27T06:56:01Z", "repo": "Tencent-Hunyuan/Hunyuan3D-2", "file_path": "api_server.py", "stars": 10815, "label": 0} +{"function": "def create_tutorial_flow():\n \"\"\"Creates and returns the codebase tutorial generation flow.\"\"\"\n\n # Instantiate nodes\n fetch_repo = FetchRepo()\n identify_abstractions = IdentifyAbstractions(max_retries=5, wait=20)\n analyze_relationships = AnalyzeRelationships(max_retries=5, wait=20)\n order_chapters = OrderChapters(max_retries=5, wait=20)\n write_chapters = WriteChapters(max_retries=5, wait=20) # This is a BatchNode\n combine_tutorial = CombineTutorial()\n\n # Connect nodes in sequence based on the design\n fetch_repo >> identify_abstractions\n identify_abstractions >> analyze_relationships\n analyze_relationships >> order_chapters\n order_chapters >> write_chapters\n write_chapters >> combine_tutorial\n\n # Create the flow starting with FetchRepo\n tutorial_flow = Flow(start=fetch_repo)\n\n return tutorial_flow", "creation_date": "2025-04-02T20:56:35Z", "repo": "The-Pocket/PocketFlow-Tutorial-Codebase-Knowledge", "file_path": "flow.py", "stars": 10805, "label": 0} +{"function": "def main():\n parser = argparse.ArgumentParser(description=\"Generate a tutorial for a GitHub codebase or local directory.\")\n\n # Create mutually exclusive group for source\n source_group = parser.add_mutually_exclusive_group(required=True)\n source_group.add_argument(\"--repo\", help=\"URL of the public GitHub repository.\")\n source_group.add_argument(\"--dir\", help=\"Path to local directory.\")\n\n parser.add_argument(\"-n\", \"--name\", help=\"Project name (optional, derived from repo/directory if omitted).\")\n parser.add_argument(\"-t\", \"--token\", help=\"GitHub personal access token (optional, reads from GITHUB_TOKEN env var if not provided).\")\n parser.add_argument(\"-o\", \"--output\", default=\"output\", help=\"Base directory for output (default: ./output).\")\n parser.add_argument(\"-i\", \"--include\", nargs=\"+\", help=\"Include file patterns (e.g. '*.py' '*.js'). Defaults to common code files if not specified.\")\n parser.add_argument(\"-e\", \"--exclude\", nargs=\"+\", help=\"Exclude file patterns (e.g. 'tests/*' 'docs/*'). Defaults to test/build directories if not specified.\")\n parser.add_argument(\"-s\", \"--max-size\", type=int, default=100000, help=\"Maximum file size in bytes (default: 100000, about 100KB).\")\n # Add language parameter for multi-language support\n parser.add_argument(\"--language\", default=\"english\", help=\"Language for the generated tutorial (default: english)\")\n # Add use_cache parameter to control LLM caching\n parser.add_argument(\"--no-cache\", action=\"store_true\", help=\"Disable LLM response caching (default: caching enabled)\")\n # Add max_abstraction_num parameter to control the number of abstractions\n parser.add_argument(\"--max-abstractions\", type=int, default=10, help=\"Maximum number of abstractions to identify (default: 10)\")\n\n args = parser.parse_args()\n\n # Get GitHub token from argument or environment variable if using repo\n github_token = None\n if args.repo:\n github_token = args.token or os.environ.get('GITHUB_TOKEN')\n if not github_token:\n print(\"Warning: No GitHub token provided. You might hit rate limits for public repositories.\")\n\n # Initialize the shared dictionary with inputs\n shared = {\n \"repo_url\": args.repo,\n \"local_dir\": args.dir,\n \"project_name\": args.name, # Can be None, FetchRepo will derive it\n \"github_token\": github_token,\n \"output_dir\": args.output, # Base directory for CombineTutorial output\n\n # Add include/exclude patterns and max file size\n \"include_patterns\": set(args.include) if args.include else DEFAULT_INCLUDE_PATTERNS,\n \"exclude_patterns\": set(args.exclude) if args.exclude else DEFAULT_EXCLUDE_PATTERNS,\n \"max_file_size\": args.max_size,\n\n # Add language for multi-language support\n \"language\": args.language,\n \n # Add use_cache flag (inverse of no-cache flag)\n \"use_cache\": not args.no_cache,\n \n # Add max_abstraction_num parameter\n \"max_abstraction_num\": args.max_abstractions,\n\n # Outputs will be populated by the nodes\n \"files\": [],\n \"abstractions\": [],\n \"relationships\": {},\n \"chapter_order\": [],\n \"chapters\": [],\n \"final_output_dir\": None\n }\n\n # Display starting message with repository/directory and language\n print(f\"Starting tutorial generation for: {args.repo or args.dir} in {args.language.capitalize()} language\")\n print(f\"LLM caching: {'Disabled' if args.no_cache else 'Enabled'}\")\n\n # Create the flow instance\n tutorial_flow = create_tutorial_flow()\n\n # Run the flow\n tutorial_flow.run(shared)", "creation_date": "2025-04-02T20:56:35Z", "repo": "The-Pocket/PocketFlow-Tutorial-Codebase-Knowledge", "file_path": "main.py", "stars": 10805, "label": 0} +{"function": "def get_content_for_indices(files_data, indices):\n content_map = {}\n for i in indices:\n if 0 <= i < len(files_data):\n path, content = files_data[i]\n content_map[f\"{i} # {path}\"] = (\n content # Use index + path as key for context\n )\n return content_map", "creation_date": "2025-04-02T20:56:35Z", "repo": "The-Pocket/PocketFlow-Tutorial-Codebase-Knowledge", "file_path": "nodes.py", "stars": 10805, "label": 0} +{"function": " def prep(self, shared):\n repo_url = shared.get(\"repo_url\")\n local_dir = shared.get(\"local_dir\")\n project_name = shared.get(\"project_name\")\n\n if not project_name:\n # Basic name derivation from URL or directory\n if repo_url:\n project_name = repo_url.split(\"/\")[-1].replace(\".git\", \"\")\n else:\n project_name = os.path.basename(os.path.abspath(local_dir))\n shared[\"project_name\"] = project_name\n\n # Get file patterns directly from shared\n include_patterns = shared[\"include_patterns\"]\n exclude_patterns = shared[\"exclude_patterns\"]\n max_file_size = shared[\"max_file_size\"]\n\n return {\n \"repo_url\": repo_url,\n \"local_dir\": local_dir,\n \"token\": shared.get(\"github_token\"),\n \"include_patterns\": include_patterns,\n \"exclude_patterns\": exclude_patterns,\n \"max_file_size\": max_file_size,\n \"use_relative_paths\": True,\n }", "creation_date": "2025-04-02T20:56:35Z", "repo": "The-Pocket/PocketFlow-Tutorial-Codebase-Knowledge", "file_path": "nodes.py", "stars": 10805, "label": 0} +{"function": " def exec(self, prep_res):\n if prep_res[\"repo_url\"]:\n print(f\"Crawling repository: {prep_res['repo_url']}...\")\n result = crawl_github_files(\n repo_url=prep_res[\"repo_url\"],\n token=prep_res[\"token\"],\n include_patterns=prep_res[\"include_patterns\"],\n exclude_patterns=prep_res[\"exclude_patterns\"],\n max_file_size=prep_res[\"max_file_size\"],\n use_relative_paths=prep_res[\"use_relative_paths\"],\n )\n else:\n print(f\"Crawling directory: {prep_res['local_dir']}...\")\n\n result = crawl_local_files(\n directory=prep_res[\"local_dir\"],\n include_patterns=prep_res[\"include_patterns\"],\n exclude_patterns=prep_res[\"exclude_patterns\"],\n max_file_size=prep_res[\"max_file_size\"],\n use_relative_paths=prep_res[\"use_relative_paths\"]\n )\n\n # Convert dict to list of tuples: [(path, content), ...]\n files_list = list(result.get(\"files\", {}).items())\n if len(files_list) == 0:\n raise (ValueError(\"Failed to fetch files\"))\n print(f\"Fetched {len(files_list)} files.\")\n return files_list", "creation_date": "2025-04-02T20:56:35Z", "repo": "The-Pocket/PocketFlow-Tutorial-Codebase-Knowledge", "file_path": "nodes.py", "stars": 10805, "label": 0} +{"function": " def post(self, shared, prep_res, exec_res):\n shared[\"files\"] = exec_res # List of (path, content) tuples", "creation_date": "2025-04-02T20:56:35Z", "repo": "The-Pocket/PocketFlow-Tutorial-Codebase-Knowledge", "file_path": "nodes.py", "stars": 10805, "label": 0} +{"function": " def prep(self, shared):\n files_data = shared[\"files\"]\n project_name = shared[\"project_name\"] # Get project name\n language = shared.get(\"language\", \"english\") # Get language\n use_cache = shared.get(\"use_cache\", True) # Get use_cache flag, default to True\n max_abstraction_num = shared.get(\"max_abstraction_num\", 10) # Get max_abstraction_num, default to 10\n\n # Helper to create context from files, respecting limits (basic example)\n def create_llm_context(files_data):\n context = \"\"\n file_info = [] # Store tuples of (index, path)\n for i, (path, content) in enumerate(files_data):\n entry = f\"--- File Index {i}: {path} ---\\n{content}\\n\\n\"\n context += entry\n file_info.append((i, path))\n\n return context, file_info # file_info is list of (index, path)\n\n context, file_info = create_llm_context(files_data)\n # Format file info for the prompt (comment is just a hint for LLM)\n file_listing_for_prompt = \"\\n\".join(\n [f\"- {idx} # {path}\" for idx, path in file_info]\n )\n return (\n context,\n file_listing_for_prompt,\n len(files_data),\n project_name,\n language,\n use_cache,\n max_abstraction_num,\n ) # Return all parameters", "creation_date": "2025-04-02T20:56:35Z", "repo": "The-Pocket/PocketFlow-Tutorial-Codebase-Knowledge", "file_path": "nodes.py", "stars": 10805, "label": 0} +{"function": " def exec(self, prep_res):\n (\n context,\n file_listing_for_prompt,\n file_count,\n project_name,\n language,\n use_cache,\n max_abstraction_num,\n ) = prep_res # Unpack all parameters\n print(f\"Identifying abstractions using LLM...\")\n\n # Add language instruction and hints only if not English\n language_instruction = \"\"\n name_lang_hint = \"\"\n desc_lang_hint = \"\"\n if language.lower() != \"english\":\n language_instruction = f\"IMPORTANT: Generate the `name` and `description` for each abstraction in **{language.capitalize()}** language. Do NOT use English for these fields.\\n\\n\"\n # Keep specific hints here as name/description are primary targets\n name_lang_hint = f\" (value in {language.capitalize()})\"\n desc_lang_hint = f\" (value in {language.capitalize()})\"\n\n prompt = f\"\"\"\nFor the project `{project_name}`:\n\nCodebase Context:\n{context}\n\n{language_instruction}Analyze the codebase context.\nIdentify the top 5-{max_abstraction_num} core most important abstractions to help those new to the codebase.\n\nFor each abstraction, provide:\n1. A concise `name`{name_lang_hint}.\n2. A beginner-friendly `description` explaining what it is with a simple analogy, in around 100 words{desc_lang_hint}.\n3. A list of relevant `file_indices` (integers) using the format `idx # path/comment`.\n\nList of file indices and paths present in the context:\n{file_listing_for_prompt}\n\nFormat the output as a YAML list of dictionaries:\n\n```yaml\n- name: |\n Query Processing{name_lang_hint}\n description: |\n Explains what the abstraction does.\n It's like a central dispatcher routing requests.{desc_lang_hint}\n file_indices:\n - 0 # path/to/file1.py\n - 3 # path/to/related.py\n- name: |\n Query Optimization{name_lang_hint}\n description: |\n Another core concept, similar to a blueprint for objects.{desc_lang_hint}\n file_indices:\n - 5 # path/to/another.js\n# ... up to {max_abstraction_num} abstractions\n```\"\"\"\n response = call_llm(prompt, use_cache=(use_cache and self.cur_retry == 0)) # Use cache only if enabled and not retrying\n\n # --- Validation ---\n yaml_str = response.strip().split(\"```yaml\")[1].split(\"```\")[0].strip()\n abstractions = yaml.safe_load(yaml_str)\n\n if not isinstance(abstractions, list):\n raise ValueError(\"LLM Output is not a list\")\n\n validated_abstractions = []\n for item in abstractions:\n if not isinstance(item, dict) or not all(\n k in item for k in [\"name\", \"description\", \"file_indices\"]\n ):\n raise ValueError(f\"Missing keys in abstraction item: {item}\")\n if not isinstance(item[\"name\"], str):\n raise ValueError(f\"Name is not a string in item: {item}\")\n if not isinstance(item[\"description\"], str):\n raise ValueError(f\"Description is not a string in item: {item}\")\n if not isinstance(item[\"file_indices\"], list):\n raise ValueError(f\"file_indices is not a list in item: {item}\")\n\n # Validate indices\n validated_indices = []\n for idx_entry in item[\"file_indices\"]:\n try:\n if isinstance(idx_entry, int):\n idx = idx_entry\n elif isinstance(idx_entry, str) and \"#\" in idx_entry:\n idx = int(idx_entry.split(\"#\")[0].strip())\n else:\n idx = int(str(idx_entry).strip())\n\n if not (0 <= idx < file_count):\n raise ValueError(\n f\"Invalid file index {idx} found in item {item['name']}. Max index is {file_count - 1}.\"\n )\n validated_indices.append(idx)\n except (ValueError, TypeError):\n raise ValueError(\n f\"Could not parse index from entry: {idx_entry} in item {item['name']}\"\n )\n\n item[\"files\"] = sorted(list(set(validated_indices)))\n # Store only the required fields\n validated_abstractions.append(\n {\n \"name\": item[\"name\"], # Potentially translated name\n \"description\": item[\n \"description\"\n ], # Potentially translated description\n \"files\": item[\"files\"],\n }\n )\n\n print(f\"Identified {len(validated_abstractions)} abstractions.\")\n return validated_abstractions", "creation_date": "2025-04-02T20:56:35Z", "repo": "The-Pocket/PocketFlow-Tutorial-Codebase-Knowledge", "file_path": "nodes.py", "stars": 10805, "label": 0} +{"function": " def post(self, shared, prep_res, exec_res):\n shared[\"abstractions\"] = (\n exec_res # List of {\"name\": str, \"description\": str, \"files\": [int]}\n )", "creation_date": "2025-04-02T20:56:35Z", "repo": "The-Pocket/PocketFlow-Tutorial-Codebase-Knowledge", "file_path": "nodes.py", "stars": 10805, "label": 0} +{"function": " def prep(self, shared):\n abstractions = shared[\n \"abstractions\"\n ] # Now contains 'files' list of indices, name/description potentially translated\n files_data = shared[\"files\"]\n project_name = shared[\"project_name\"] # Get project name\n language = shared.get(\"language\", \"english\") # Get language\n use_cache = shared.get(\"use_cache\", True) # Get use_cache flag, default to True\n\n # Get the actual number of abstractions directly\n num_abstractions = len(abstractions)\n\n # Create context with abstraction names, indices, descriptions, and relevant file snippets\n context = \"Identified Abstractions:\\\\n\"\n all_relevant_indices = set()\n abstraction_info_for_prompt = []\n for i, abstr in enumerate(abstractions):\n # Use 'files' which contains indices directly\n file_indices_str = \", \".join(map(str, abstr[\"files\"]))\n # Abstraction name and description might be translated already\n info_line = f\"- Index {i}: {abstr['name']} (Relevant file indices: [{file_indices_str}])\\\\n Description: {abstr['description']}\"\n context += info_line + \"\\\\n\"\n abstraction_info_for_prompt.append(\n f\"{i} # {abstr['name']}\"\n ) # Use potentially translated name here too\n all_relevant_indices.update(abstr[\"files\"])\n\n context += \"\\\\nRelevant File Snippets (Referenced by Index and Path):\\\\n\"\n # Get content for relevant files using helper\n relevant_files_content_map = get_content_for_indices(\n files_data, sorted(list(all_relevant_indices))\n )\n # Format file content for context\n file_context_str = \"\\\\n\\\\n\".join(\n f\"--- File: {idx_path} ---\\\\n{content}\"\n for idx_path, content in relevant_files_content_map.items()\n )\n context += file_context_str\n\n return (\n context,\n \"\\n\".join(abstraction_info_for_prompt),\n num_abstractions, # Pass the actual count\n project_name,\n language,\n use_cache,\n ) # Return use_cache", "creation_date": "2025-04-02T20:56:35Z", "repo": "The-Pocket/PocketFlow-Tutorial-Codebase-Knowledge", "file_path": "nodes.py", "stars": 10805, "label": 0} +{"function": "def env() -> Iterator[TestEnv]:\n test_env = TestEnv()\n\n yield test_env\n\n test_env.reset()", "creation_date": "2024-10-17T06:06:35Z", "repo": "pydantic/pydantic-ai", "file_path": "tests/conftest.py", "stars": 10745, "label": 0} +{"function": "def anyio_backend():\n return 'asyncio'", "creation_date": "2024-10-17T06:06:35Z", "repo": "pydantic/pydantic-ai", "file_path": "tests/conftest.py", "stars": 10745, "label": 0} +{"function": "def allow_model_requests():\n with pydantic_ai.models.override_allow_model_requests(True):\n yield", "creation_date": "2024-10-17T06:06:35Z", "repo": "pydantic/pydantic-ai", "file_path": "tests/conftest.py", "stars": 10745, "label": 0} +{"function": "async def client_with_handler() -> AsyncIterator[ClientWithHandler]:\n client: httpx.AsyncClient | None = None\n\n def create_client(handler: Callable[[httpx.Request], httpx.Response]) -> httpx.AsyncClient:\n nonlocal client\n assert client is None, 'client_with_handler can only be called once'\n client = httpx.AsyncClient(mounts={'all://': httpx.MockTransport(handler)})\n return client\n\n try:\n yield create_client\n finally:\n if client: # pragma: no branch\n await client.aclose()", "creation_date": "2024-10-17T06:06:35Z", "repo": "pydantic/pydantic-ai", "file_path": "tests/conftest.py", "stars": 10745, "label": 0} +{"function": "def create_module(tmp_path: Path, request: pytest.FixtureRequest) -> Callable[[str], Any]:\n \"\"\"Taken from `pydantic/tests/conftest.py`, create module object, execute and return it.\"\"\"\n\n def run(\n source_code: str,\n rewrite_assertions: bool = True,\n module_name_prefix: str | None = None,\n ) -> ModuleType:\n \"\"\"Create module object, execute and return it.\n\n Can be used as a decorator of the function from the source code of which the module will be constructed.\n\n Args:\n source_code: Python source code of the module\n rewrite_assertions: whether to rewrite assertions in module or not\n module_name_prefix: string prefix to use in the name of the module, does not affect the name of the file.\n\n \"\"\"\n\n # Max path length in Windows is 260. Leaving some buffer here\n max_name_len = 240 - len(str(tmp_path))\n # Windows does not allow these characters in paths. Linux bans slashes only.\n sanitized_name = re.sub('[' + re.escape('<>:\"/\\\\|?*') + ']', '-', request.node.name)[:max_name_len]\n module_name = f'{sanitized_name}_{secrets.token_hex(5)}'\n path = tmp_path / f'{module_name}.py'\n path.write_text(source_code)\n filename = str(path)\n\n if module_name_prefix: # pragma: no cover\n module_name = module_name_prefix + module_name\n\n if rewrite_assertions:\n loader = AssertionRewritingHook(config=request.config)\n loader.mark_rewrite(module_name)\n else: # pragma: no cover\n loader = None\n\n spec = importlib.util.spec_from_file_location(module_name, filename, loader=loader)\n sys.modules[module_name] = module = importlib.util.module_from_spec(spec) # pyright: ignore[reportArgumentType]\n spec.loader.exec_module(module) # pyright: ignore[reportOptionalMemberAccess]\n return module\n\n return run", "creation_date": "2024-10-17T06:06:35Z", "repo": "pydantic/pydantic-ai", "file_path": "tests/conftest.py", "stars": 10745, "label": 0} +{"function": "def try_import() -> Iterator[Callable[[], bool]]:\n import_success = False\n\n def check_import() -> bool:\n return import_success\n\n try:\n yield check_import\n except ImportError:\n pass\n else:\n import_success = True", "creation_date": "2024-10-17T06:06:35Z", "repo": "pydantic/pydantic-ai", "file_path": "tests/conftest.py", "stars": 10745, "label": 0} +{"function": "def event_loop() -> Iterator[None]:\n new_loop = asyncio.new_event_loop()\n asyncio.set_event_loop(new_loop)\n yield\n new_loop.close()", "creation_date": "2024-10-17T06:06:35Z", "repo": "pydantic/pydantic-ai", "file_path": "tests/conftest.py", "stars": 10745, "label": 0} +{"function": "def raise_if_exception(e: Any) -> None:\n if isinstance(e, Exception):\n raise e", "creation_date": "2024-10-17T06:06:35Z", "repo": "pydantic/pydantic-ai", "file_path": "tests/conftest.py", "stars": 10745, "label": 0} +{"function": "def pytest_recording_configure(config: Any, vcr: VCR):\n from . import json_body_serializer\n\n vcr.register_serializer('yaml', json_body_serializer)\n\n def method_matcher(r1: vcr_request.Request, r2: vcr_request.Request) -> None:\n if r1.method.upper() != r2.method.upper():\n raise AssertionError(f'{r1.method} != {r2.method}')\n\n vcr.register_matcher('method', method_matcher)", "creation_date": "2024-10-17T06:06:35Z", "repo": "pydantic/pydantic-ai", "file_path": "tests/conftest.py", "stars": 10745, "label": 0} +{"function": "def mock_vcr_aiohttp_content(mocker: MockerFixture):\n try:\n from vcr.stubs import aiohttp_stubs\n except ImportError:\n return\n\n # google-genai calls `self.response_stream.content.readline()` where `self.response_stream` is a `MockClientResponse`,\n # which creates a new `MockStream` each time instead of returning the same one, resulting in the readline cursor not being respected.\n # So we turn `content` into a cached property to return the same one each time.\n # VCR issue: https://github.com/kevin1024/vcrpy/issues/927. Once that's is resolved, we can remove this patch.\n cached_content = cached_property(aiohttp_stubs.MockClientResponse.content.fget) # type: ignore\n cached_content.__set_name__(aiohttp_stubs.MockClientResponse, 'content')\n mocker.patch('vcr.stubs.aiohttp_stubs.MockClientResponse.content', new=cached_content)\n mocker.patch('vcr.stubs.aiohttp_stubs.MockStream.set_exception', return_value=None)", "creation_date": "2024-10-17T06:06:35Z", "repo": "pydantic/pydantic-ai", "file_path": "tests/conftest.py", "stars": 10745, "label": 0} +{"function": "def initialize_model(model_path):\n args = parse_args()\n models_root_path = Path(model_path)\n if not models_root_path.exists():\n raise ValueError(f\"`models_root` not exists: {models_root_path}\")\n \n hunyuan_video_sampler = HunyuanVideoSampler.from_pretrained(models_root_path, args=args)\n return hunyuan_video_sampler", "creation_date": "2024-12-09T08:27:01Z", "repo": "Tencent-Hunyuan/HunyuanVideo", "file_path": "gradio_server.py", "stars": 10651, "label": 0} +{"function": "def generate_video(\n model,\n prompt,\n resolution,\n video_length,\n seed,\n num_inference_steps,\n guidance_scale,\n flow_shift,\n embedded_guidance_scale\n):\n seed = None if seed == -1 else seed\n width, height = resolution.split(\"x\")\n width, height = int(width), int(height)\n negative_prompt = \"\" # not applicable in the inference\n\n outputs = model.predict(\n prompt=prompt,\n height=height,\n width=width, \n video_length=video_length,\n seed=seed,\n negative_prompt=negative_prompt,\n infer_steps=num_inference_steps,\n guidance_scale=guidance_scale,\n num_videos_per_prompt=1,\n flow_shift=flow_shift,\n batch_size=1,\n embedded_guidance_scale=embedded_guidance_scale\n )\n \n samples = outputs['samples']\n sample = samples[0].unsqueeze(0)\n \n save_path = os.path.join(os.getcwd(), \"gradio_outputs\")\n os.makedirs(save_path, exist_ok=True)\n \n time_flag = datetime.fromtimestamp(time.time()).strftime(\"%Y-%m-%d-%H:%M:%S\")\n video_path = f\"{save_path}/{time_flag}_seed{outputs['seeds'][0]}_{outputs['prompts'][0][:100].replace('/','')}.mp4\"\n save_videos_grid(sample, video_path, fps=24)\n logger.info(f'Sample saved to: {video_path}')\n \n return video_path", "creation_date": "2024-12-09T08:27:01Z", "repo": "Tencent-Hunyuan/HunyuanVideo", "file_path": "gradio_server.py", "stars": 10651, "label": 0} +{"function": "def create_demo(model_path, save_path):\n model = initialize_model(model_path)\n \n with gr.Blocks() as demo:\n gr.Markdown(\"# Hunyuan Video Generation\")\n \n with gr.Row():\n with gr.Column():\n prompt = gr.Textbox(label=\"Prompt\", value=\"A cat walks on the grass, realistic style.\")\n with gr.Row():\n resolution = gr.Dropdown(\n choices=[\n # 720p\n (\"1280x720 (16:9, 720p)\", \"1280x720\"),\n (\"720x1280 (9:16, 720p)\", \"720x1280\"), \n (\"1104x832 (4:3, 720p)\", \"1104x832\"),\n (\"832x1104 (3:4, 720p)\", \"832x1104\"),\n (\"960x960 (1:1, 720p)\", \"960x960\"),\n # 540p\n (\"960x544 (16:9, 540p)\", \"960x544\"),\n (\"544x960 (9:16, 540p)\", \"544x960\"),\n (\"832x624 (4:3, 540p)\", \"832x624\"), \n (\"624x832 (3:4, 540p)\", \"624x832\"),\n (\"720x720 (1:1, 540p)\", \"720x720\"),\n ],\n value=\"1280x720\",\n label=\"Resolution\"\n )\n video_length = gr.Dropdown(\n label=\"Video Length\",\n choices=[\n (\"2s(65f)\", 65),\n (\"5s(129f)\", 129),\n ],\n value=129,\n )\n num_inference_steps = gr.Slider(1, 100, value=50, step=1, label=\"Number of Inference Steps\")\n show_advanced = gr.Checkbox(label=\"Show Advanced Options\", value=False)\n with gr.Row(visible=False) as advanced_row:\n with gr.Column():\n seed = gr.Number(value=-1, label=\"Seed (-1 for random)\")\n guidance_scale = gr.Slider(1.0, 20.0, value=1.0, step=0.5, label=\"Guidance Scale\")\n flow_shift = gr.Slider(0.0, 10.0, value=7.0, step=0.1, label=\"Flow Shift\") \n embedded_guidance_scale = gr.Slider(1.0, 20.0, value=6.0, step=0.5, label=\"Embedded Guidance Scale\")\n show_advanced.change(fn=lambda x: gr.Row(visible=x), inputs=[show_advanced], outputs=[advanced_row])\n generate_btn = gr.Button(\"Generate\")\n \n with gr.Column():\n output = gr.Video(label=\"Generated Video\")\n \n generate_btn.click(\n fn=lambda *inputs: generate_video(model, *inputs),\n inputs=[\n prompt,\n resolution,\n video_length,\n seed,\n num_inference_steps,\n guidance_scale,\n flow_shift,\n embedded_guidance_scale\n ],\n outputs=output\n )\n \n return demo", "creation_date": "2024-12-09T08:27:01Z", "repo": "Tencent-Hunyuan/HunyuanVideo", "file_path": "gradio_server.py", "stars": 10651, "label": 0} +{"function": "def main():\n args = parse_args()\n print(args)\n models_root_path = Path(args.model_base)\n if not models_root_path.exists():\n raise ValueError(f\"`models_root` not exists: {models_root_path}\")\n \n # Create save folder to save the samples\n save_path = args.save_path if args.save_path_suffix==\"\" else f'{args.save_path}_{args.save_path_suffix}'\n if not os.path.exists(save_path):\n os.makedirs(save_path, exist_ok=True)\n\n # Load models\n hunyuan_video_sampler = HunyuanVideoSampler.from_pretrained(models_root_path, args=args)\n \n # Get the updated args\n args = hunyuan_video_sampler.args\n\n # Start sampling\n # TODO: batch inference check\n outputs = hunyuan_video_sampler.predict(\n prompt=args.prompt, \n height=args.video_size[0],\n width=args.video_size[1],\n video_length=args.video_length,\n seed=args.seed,\n negative_prompt=args.neg_prompt,\n infer_steps=args.infer_steps,\n guidance_scale=args.cfg_scale,\n num_videos_per_prompt=args.num_videos,\n flow_shift=args.flow_shift,\n batch_size=args.batch_size,\n embedded_guidance_scale=args.embedded_cfg_scale\n )\n samples = outputs['samples']\n \n # Save samples\n if 'LOCAL_RANK' not in os.environ or int(os.environ['LOCAL_RANK']) == 0:\n for i, sample in enumerate(samples):\n sample = samples[i].unsqueeze(0)\n time_flag = datetime.fromtimestamp(time.time()).strftime(\"%Y-%m-%d-%H:%M:%S\")\n cur_save_path = f\"{save_path}/{time_flag}_seed{outputs['seeds'][i]}_{outputs['prompts'][i][:100].replace('/','')}.mp4\"\n save_videos_grid(sample, cur_save_path, fps=24)\n logger.info(f'Sample save to: {cur_save_path}')", "creation_date": "2024-12-03T02:17:28Z", "repo": "Tencent-Hunyuan/HunyuanVideo", "file_path": "sample_video.py", "stars": 10651, "label": 0} +{"function": "def is_rocm_pytorch() -> bool:\n \"\"\"Check whether the PyTorch is compiled on ROCm.\"\"\"\n is_rocm = False\n if TORCH_VERSION != 'parrots':\n try:\n from torch.utils.cpp_extension import ROCM_HOME\n is_rocm = True if ((torch.version.hip is not None) and\n (ROCM_HOME is not None)) else False\n except ImportError:\n pass\n return is_rocm", "creation_date": "2024-12-03T02:17:28Z", "repo": "Tencent-Hunyuan/HunyuanVideo", "file_path": "utils/collect_env.py", "stars": 10651, "label": 0} +{"function": "def get_build_config():\n \"\"\"Obtain the build information of PyTorch or Parrots.\"\"\"\n if TORCH_VERSION == 'parrots':\n from parrots.config import get_build_info\n return get_build_info()\n else:\n return torch.__config__.show()", "creation_date": "2024-12-03T02:17:28Z", "repo": "Tencent-Hunyuan/HunyuanVideo", "file_path": "utils/collect_env.py", "stars": 10651, "label": 0} +{"function": "def is_musa_available() -> bool:\n return IS_MUSA_AVAILABLE", "creation_date": "2024-12-03T02:17:28Z", "repo": "Tencent-Hunyuan/HunyuanVideo", "file_path": "utils/collect_env.py", "stars": 10651, "label": 0} +{"function": "def is_cuda_available() -> bool:\n \"\"\"Returns True if cuda devices exist.\"\"\"\n return torch.cuda.is_available()", "creation_date": "2024-12-03T02:17:28Z", "repo": "Tencent-Hunyuan/HunyuanVideo", "file_path": "utils/collect_env.py", "stars": 10651, "label": 0} +{"function": "def _get_cuda_home():\n if TORCH_VERSION == 'parrots':\n from parrots.utils.build_extension import CUDA_HOME\n else:\n if is_rocm_pytorch():\n from torch.utils.cpp_extension import ROCM_HOME\n CUDA_HOME = ROCM_HOME\n else:\n from torch.utils.cpp_extension import CUDA_HOME\n return CUDA_HOME", "creation_date": "2024-12-03T02:17:28Z", "repo": "Tencent-Hunyuan/HunyuanVideo", "file_path": "utils/collect_env.py", "stars": 10651, "label": 0} +{"function": "def _get_musa_home():\n return os.environ.get('MUSA_HOME')", "creation_date": "2024-12-03T02:17:28Z", "repo": "Tencent-Hunyuan/HunyuanVideo", "file_path": "utils/collect_env.py", "stars": 10651, "label": 0} +{"function": "def start_session(req: gr.Request):\n user_dir = os.path.join(TMP_DIR, str(req.session_hash))\n os.makedirs(user_dir, exist_ok=True)", "creation_date": "2024-12-05T13:13:45Z", "repo": "microsoft/TRELLIS", "file_path": "app.py", "stars": 10090, "label": 0} +{"function": "def end_session(req: gr.Request):\n user_dir = os.path.join(TMP_DIR, str(req.session_hash))\n shutil.rmtree(user_dir)", "creation_date": "2024-12-05T13:13:45Z", "repo": "microsoft/TRELLIS", "file_path": "app.py", "stars": 10090, "label": 0} +{"function": "def preprocess_image(image: Image.Image) -> Image.Image:\n \"\"\"\n Preprocess the input image.\n\n Args:\n image (Image.Image): The input image.\n\n Returns:\n Image.Image: The preprocessed image.\n \"\"\"\n processed_image = pipeline.preprocess_image(image)\n return processed_image", "creation_date": "2024-12-05T13:13:45Z", "repo": "microsoft/TRELLIS", "file_path": "app.py", "stars": 10090, "label": 0} +{"function": "def preprocess_images(images: List[Tuple[Image.Image, str]]) -> List[Image.Image]:\n \"\"\"\n Preprocess a list of input images.\n \n Args:\n images (List[Tuple[Image.Image, str]]): The input images.\n \n Returns:\n List[Image.Image]: The preprocessed images.\n \"\"\"\n images = [image[0] for image in images]\n processed_images = [pipeline.preprocess_image(image) for image in images]\n return processed_images", "creation_date": "2024-12-05T13:13:45Z", "repo": "microsoft/TRELLIS", "file_path": "app.py", "stars": 10090, "label": 0} +{"function": "def pack_state(gs: Gaussian, mesh: MeshExtractResult) -> dict:\n return {\n 'gaussian': {\n **gs.init_params,\n '_xyz': gs._xyz.cpu().numpy(),\n '_features_dc': gs._features_dc.cpu().numpy(),\n '_scaling': gs._scaling.cpu().numpy(),\n '_rotation': gs._rotation.cpu().numpy(),\n '_opacity': gs._opacity.cpu().numpy(),\n },\n 'mesh': {\n 'vertices': mesh.vertices.cpu().numpy(),\n 'faces': mesh.faces.cpu().numpy(),\n },\n }", "creation_date": "2024-12-05T13:13:45Z", "repo": "microsoft/TRELLIS", "file_path": "app.py", "stars": 10090, "label": 0} +{"function": "def unpack_state(state: dict) -> Tuple[Gaussian, edict, str]:\n gs = Gaussian(\n aabb=state['gaussian']['aabb'],\n sh_degree=state['gaussian']['sh_degree'],\n mininum_kernel_size=state['gaussian']['mininum_kernel_size'],\n scaling_bias=state['gaussian']['scaling_bias'],\n opacity_bias=state['gaussian']['opacity_bias'],\n scaling_activation=state['gaussian']['scaling_activation'],\n )\n gs._xyz = torch.tensor(state['gaussian']['_xyz'], device='cuda')\n gs._features_dc = torch.tensor(state['gaussian']['_features_dc'], device='cuda')\n gs._scaling = torch.tensor(state['gaussian']['_scaling'], device='cuda')\n gs._rotation = torch.tensor(state['gaussian']['_rotation'], device='cuda')\n gs._opacity = torch.tensor(state['gaussian']['_opacity'], device='cuda')\n \n mesh = edict(\n vertices=torch.tensor(state['mesh']['vertices'], device='cuda'),\n faces=torch.tensor(state['mesh']['faces'], device='cuda'),\n )\n \n return gs, mesh", "creation_date": "2024-12-05T13:13:45Z", "repo": "microsoft/TRELLIS", "file_path": "app.py", "stars": 10090, "label": 0} +{"function": "def get_seed(randomize_seed: bool, seed: int) -> int:\n \"\"\"\n Get the random seed.\n \"\"\"\n return np.random.randint(0, MAX_SEED) if randomize_seed else seed", "creation_date": "2024-12-05T13:13:45Z", "repo": "microsoft/TRELLIS", "file_path": "app.py", "stars": 10090, "label": 0} +{"function": "def image_to_3d(\n image: Image.Image,\n multiimages: List[Tuple[Image.Image, str]],\n is_multiimage: bool,\n seed: int,\n ss_guidance_strength: float,\n ss_sampling_steps: int,\n slat_guidance_strength: float,\n slat_sampling_steps: int,\n multiimage_algo: Literal[\"multidiffusion\", \"stochastic\"],\n req: gr.Request,\n) -> Tuple[dict, str]:\n \"\"\"\n Convert an image to a 3D model.\n\n Args:\n image (Image.Image): The input image.\n multiimages (List[Tuple[Image.Image, str]]): The input images in multi-image mode.\n is_multiimage (bool): Whether is in multi-image mode.\n seed (int): The random seed.\n ss_guidance_strength (float): The guidance strength for sparse structure generation.\n ss_sampling_steps (int): The number of sampling steps for sparse structure generation.\n slat_guidance_strength (float): The guidance strength for structured latent generation.\n slat_sampling_steps (int): The number of sampling steps for structured latent generation.\n multiimage_algo (Literal[\"multidiffusion\", \"stochastic\"]): The algorithm for multi-image generation.\n\n Returns:\n dict: The information of the generated 3D model.\n str: The path to the video of the 3D model.\n \"\"\"\n user_dir = os.path.join(TMP_DIR, str(req.session_hash))\n if not is_multiimage:\n outputs = pipeline.run(\n image,\n seed=seed,\n formats=[\"gaussian\", \"mesh\"],\n preprocess_image=False,\n sparse_structure_sampler_params={\n \"steps\": ss_sampling_steps,\n \"cfg_strength\": ss_guidance_strength,\n },\n slat_sampler_params={\n \"steps\": slat_sampling_steps,\n \"cfg_strength\": slat_guidance_strength,\n },\n )\n else:\n outputs = pipeline.run_multi_image(\n [image[0] for image in multiimages],\n seed=seed,\n formats=[\"gaussian\", \"mesh\"],\n preprocess_image=False,\n sparse_structure_sampler_params={\n \"steps\": ss_sampling_steps,\n \"cfg_strength\": ss_guidance_strength,\n },\n slat_sampler_params={\n \"steps\": slat_sampling_steps,\n \"cfg_strength\": slat_guidance_strength,\n },\n mode=multiimage_algo,\n )\n video = render_utils.render_video(outputs['gaussian'][0], num_frames=120)['color']\n video_geo = render_utils.render_video(outputs['mesh'][0], num_frames=120)['normal']\n video = [np.concatenate([video[i], video_geo[i]], axis=1) for i in range(len(video))]\n video_path = os.path.join(user_dir, 'sample.mp4')\n imageio.mimsave(video_path, video, fps=15)\n state = pack_state(outputs['gaussian'][0], outputs['mesh'][0])\n torch.cuda.empty_cache()\n return state, video_path", "creation_date": "2024-12-05T13:13:45Z", "repo": "microsoft/TRELLIS", "file_path": "app.py", "stars": 10090, "label": 0} +{"function": "def extract_glb(\n state: dict,\n mesh_simplify: float,\n texture_size: int,\n req: gr.Request,\n) -> Tuple[str, str]:\n \"\"\"\n Extract a GLB file from the 3D model.\n\n Args:\n state (dict): The state of the generated 3D model.\n mesh_simplify (float): The mesh simplification factor.\n texture_size (int): The texture resolution.\n\n Returns:\n str: The path to the extracted GLB file.\n \"\"\"\n user_dir = os.path.join(TMP_DIR, str(req.session_hash))\n gs, mesh = unpack_state(state)\n glb = postprocessing_utils.to_glb(gs, mesh, simplify=mesh_simplify, texture_size=texture_size, verbose=False)\n glb_path = os.path.join(user_dir, 'sample.glb')\n glb.export(glb_path)\n torch.cuda.empty_cache()\n return glb_path, glb_path", "creation_date": "2024-12-05T13:13:45Z", "repo": "microsoft/TRELLIS", "file_path": "app.py", "stars": 10090, "label": 0} +{"function": "def extract_gaussian(state: dict, req: gr.Request) -> Tuple[str, str]:\n \"\"\"\n Extract a Gaussian file from the 3D model.\n\n Args:\n state (dict): The state of the generated 3D model.\n\n Returns:\n str: The path to the extracted Gaussian file.\n \"\"\"\n user_dir = os.path.join(TMP_DIR, str(req.session_hash))\n gs, _ = unpack_state(state)\n gaussian_path = os.path.join(user_dir, 'sample.ply')\n gs.save_ply(gaussian_path)\n torch.cuda.empty_cache()\n return gaussian_path, gaussian_path", "creation_date": "2024-12-05T13:13:45Z", "repo": "microsoft/TRELLIS", "file_path": "app.py", "stars": 10090, "label": 0} +{"function": "def initialize_model(model_dir=\"pretrained_models/Spark-TTS-0.5B\", device=0):\n \"\"\"Load the model once at the beginning.\"\"\"\n logging.info(f\"Loading model from: {model_dir}\")\n\n # Determine appropriate device based on platform and availability\n if platform.system() == \"Darwin\":\n # macOS with MPS support (Apple Silicon)\n device = torch.device(f\"mps:{device}\")\n logging.info(f\"Using MPS device: {device}\")\n elif torch.cuda.is_available():\n # System with CUDA support\n device = torch.device(f\"cuda:{device}\")\n logging.info(f\"Using CUDA device: {device}\")\n else:\n # Fall back to CPU\n device = torch.device(\"cpu\")\n logging.info(\"GPU acceleration not available, using CPU\")\n\n model = SparkTTS(model_dir, device)\n return model", "creation_date": "2025-02-26T12:24:09Z", "repo": "SparkAudio/Spark-TTS", "file_path": "webui.py", "stars": 10017, "label": 0} +{"function": "def run_tts(\n text,\n model,\n prompt_text=None,\n prompt_speech=None,\n gender=None,\n pitch=None,\n speed=None,\n save_dir=\"example/results\",\n):\n \"\"\"Perform TTS inference and save the generated audio.\"\"\"\n logging.info(f\"Saving audio to: {save_dir}\")\n\n if prompt_text is not None:\n prompt_text = None if len(prompt_text) <= 1 else prompt_text\n\n # Ensure the save directory exists\n os.makedirs(save_dir, exist_ok=True)\n\n # Generate unique filename using timestamp\n timestamp = datetime.now().strftime(\"%Y%m%d%H%M%S\")\n save_path = os.path.join(save_dir, f\"{timestamp}.wav\")\n\n logging.info(\"Starting inference...\")\n\n # Perform inference and save the output audio\n with torch.no_grad():\n wav = model.inference(\n text,\n prompt_speech,\n prompt_text,\n gender,\n pitch,\n speed,\n )\n\n sf.write(save_path, wav, samplerate=16000)\n\n logging.info(f\"Audio saved at: {save_path}\")\n\n return save_path", "creation_date": "2025-02-26T12:24:09Z", "repo": "SparkAudio/Spark-TTS", "file_path": "webui.py", "stars": 10017, "label": 0} +{"function": "def build_ui(model_dir, device=0):\n\n # Initialize model\n model = initialize_model(model_dir, device=device)\n\n # Define callback function for voice cloning\n def voice_clone(text, prompt_text, prompt_wav_upload, prompt_wav_record):\n \"\"\"\n Gradio callback to clone voice using text and optional prompt speech.\n - text: The input text to be synthesised.\n - prompt_text: Additional textual info for the prompt (optional).\n - prompt_wav_upload/prompt_wav_record: Audio files used as reference.\n \"\"\"\n prompt_speech = prompt_wav_upload if prompt_wav_upload else prompt_wav_record\n prompt_text_clean = None if len(prompt_text) < 2 else prompt_text\n\n audio_output_path = run_tts(\n text,\n model,\n prompt_text=prompt_text_clean,\n prompt_speech=prompt_speech\n )\n return audio_output_path\n\n # Define callback function for creating new voices\n def voice_creation(text, gender, pitch, speed):\n \"\"\"\n Gradio callback to create a synthetic voice with adjustable parameters.\n - text: The input text for synthesis.\n - gender: 'male' or 'female'.\n - pitch/speed: Ranges mapped by LEVELS_MAP_UI.\n \"\"\"\n pitch_val = LEVELS_MAP_UI[int(pitch)]\n speed_val = LEVELS_MAP_UI[int(speed)]\n audio_output_path = run_tts(\n text,\n model,\n gender=gender,\n pitch=pitch_val,\n speed=speed_val\n )\n return audio_output_path\n\n with gr.Blocks() as demo:\n # Use HTML for centered title\n gr.HTML('

Spark-TTS by SparkAudio

')\n with gr.Tabs():\n # Voice Clone Tab\n with gr.TabItem(\"Voice Clone\"):\n gr.Markdown(\n \"### Upload reference audio or recording \uff08\u4e0a\u4f20\u53c2\u8003\u97f3\u9891\u6216\u8005\u5f55\u97f3\uff09\"\n )\n\n with gr.Row():\n prompt_wav_upload = gr.Audio(\n sources=\"upload\",\n type=\"filepath\",\n label=\"Choose the prompt audio file, ensuring the sampling rate is no lower than 16kHz.\",\n )\n prompt_wav_record = gr.Audio(\n sources=\"microphone\",\n type=\"filepath\",\n label=\"Record the prompt audio file.\",\n )\n\n with gr.Row():\n text_input = gr.Textbox(\n label=\"Text\", lines=3, placeholder=\"Enter text here\"\n )\n prompt_text_input = gr.Textbox(\n label=\"Text of prompt speech (Optional; recommended for cloning in the same language.)\",\n lines=3,\n placeholder=\"Enter text of the prompt speech.\",\n )\n\n audio_output = gr.Audio(\n label=\"Generated Audio\", autoplay=True, streaming=True\n )\n\n generate_buttom_clone = gr.Button(\"Generate\")\n\n generate_buttom_clone.click(\n voice_clone,\n inputs=[\n text_input,\n prompt_text_input,\n prompt_wav_upload,\n prompt_wav_record,\n ],\n outputs=[audio_output],\n )\n\n # Voice Creation Tab\n with gr.TabItem(\"Voice Creation\"):\n gr.Markdown(\n \"### Create your own voice based on the following parameters\"\n )\n\n with gr.Row():\n with gr.Column():\n gender = gr.Radio(\n choices=[\"male\", \"female\"], value=\"male\", label=\"Gender\"\n )\n pitch = gr.Slider(\n minimum=1, maximum=5, step=1, value=3, label=\"Pitch\"\n )\n speed = gr.Slider(\n minimum=1, maximum=5, step=1, value=3, label=\"Speed\"\n )\n with gr.Column():\n text_input_creation = gr.Textbox(\n label=\"Input Text\",\n lines=3,\n placeholder=\"Enter text here\",\n value=\"You can generate a customized voice by adjusting parameters such as pitch and speed.\",\n )\n create_button = gr.Button(\"Create Voice\")\n\n audio_output = gr.Audio(\n label=\"Generated Audio\", autoplay=True, streaming=True\n )\n create_button.click(\n voice_creation,\n inputs=[text_input_creation, gender, pitch, speed],\n outputs=[audio_output],\n )\n\n return demo", "creation_date": "2025-02-26T12:24:09Z", "repo": "SparkAudio/Spark-TTS", "file_path": "webui.py", "stars": 10017, "label": 0} +{"function": "def parse_arguments():\n \"\"\"\n Parse command-line arguments such as model directory and device ID.\n \"\"\"\n parser = argparse.ArgumentParser(description=\"Spark TTS Gradio server.\")\n parser.add_argument(\n \"--model_dir\",\n type=str,\n default=\"pretrained_models/Spark-TTS-0.5B\",\n help=\"Path to the model directory.\"\n )\n parser.add_argument(\n \"--device\",\n type=int,\n default=0,\n help=\"ID of the GPU device to use (e.g., 0 for cuda:0).\"\n )\n parser.add_argument(\n \"--server_name\",\n type=str,\n default=\"0.0.0.0\",\n help=\"Server host/IP for Gradio app.\"\n )\n parser.add_argument(\n \"--server_port\",\n type=int,\n default=7860,\n help=\"Server port for Gradio app.\"\n )\n return parser.parse_args()", "creation_date": "2025-02-26T12:24:09Z", "repo": "SparkAudio/Spark-TTS", "file_path": "webui.py", "stars": 10017, "label": 0} +{"function": " def voice_clone(text, prompt_text, prompt_wav_upload, prompt_wav_record):\n \"\"\"\n Gradio callback to clone voice using text and optional prompt speech.\n - text: The input text to be synthesised.\n - prompt_text: Additional textual info for the prompt (optional).\n - prompt_wav_upload/prompt_wav_record: Audio files used as reference.\n \"\"\"\n prompt_speech = prompt_wav_upload if prompt_wav_upload else prompt_wav_record\n prompt_text_clean = None if len(prompt_text) < 2 else prompt_text\n\n audio_output_path = run_tts(\n text,\n model,\n prompt_text=prompt_text_clean,\n prompt_speech=prompt_speech\n )\n return audio_output_path", "creation_date": "2025-02-26T12:24:09Z", "repo": "SparkAudio/Spark-TTS", "file_path": "webui.py", "stars": 10017, "label": 0} +{"function": " def voice_creation(text, gender, pitch, speed):\n \"\"\"\n Gradio callback to create a synthetic voice with adjustable parameters.\n - text: The input text for synthesis.\n - gender: 'male' or 'female'.\n - pitch/speed: Ranges mapped by LEVELS_MAP_UI.\n \"\"\"\n pitch_val = LEVELS_MAP_UI[int(pitch)]\n speed_val = LEVELS_MAP_UI[int(speed)]\n audio_output_path = run_tts(\n text,\n model,\n gender=gender,\n pitch=pitch_val,\n speed=speed_val\n )\n return audio_output_path", "creation_date": "2025-02-26T12:24:09Z", "repo": "SparkAudio/Spark-TTS", "file_path": "webui.py", "stars": 10017, "label": 0} +{"function": "def audio_volume_normalize(audio: np.ndarray, coeff: float = 0.2) -> np.ndarray:\n \"\"\"\n Normalize the volume of an audio signal.\n\n Parameters:\n audio (numpy array): Input audio signal array.\n coeff (float): Target coefficient for normalization, default is 0.2.\n\n Returns:\n numpy array: The volume-normalized audio signal.\n \"\"\"\n # Sort the absolute values of the audio signal\n temp = np.sort(np.abs(audio))\n\n # If the maximum value is less than 0.1, scale the array to have a maximum of 0.1\n if temp[-1] < 0.1:\n scaling_factor = max(\n temp[-1], 1e-3\n ) # Prevent division by zero with a small constant\n audio = audio / scaling_factor * 0.1\n\n # Filter out values less than 0.01 from temp\n temp = temp[temp > 0.01]\n L = temp.shape[0] # Length of the filtered array\n\n # If there are fewer than or equal to 10 significant values, return the audio without further processing\n if L <= 10:\n return audio\n\n # Compute the average of the top 10% to 1% of values in temp\n volume = np.mean(temp[int(0.9 * L) : int(0.99 * L)])\n\n # Normalize the audio to the target coefficient level, clamping the scale factor between 0.1 and 10\n audio = audio * np.clip(coeff / volume, a_min=0.1, a_max=10)\n\n # Ensure the maximum absolute value in the audio does not exceed 1\n max_value = np.max(np.abs(audio))\n if max_value > 1:\n audio = audio / max_value\n\n return audio", "creation_date": "2025-02-25T06:15:54Z", "repo": "SparkAudio/Spark-TTS", "file_path": "sparktts/utils/audio.py", "stars": 10017, "label": 0} +{"function": "def load_audio(\n adfile: Path,\n sampling_rate: int = None,\n length: int = None,\n volume_normalize: bool = False,\n segment_duration: int = None,\n) -> np.ndarray:\n r\"\"\"Load audio file with target sampling rate and lsength\n\n Args:\n adfile (Path): path to audio file.\n sampling_rate (int, optional): target sampling rate. Defaults to None.\n length (int, optional): target audio length. Defaults to None.\n volume_normalize (bool, optional): whether perform volume normalization. Defaults to False.\n segment_duration (int): random select a segment with duration of {segment_duration}s.\n Defualt to None which means the whole audio will be used.\n\n Returns:\n audio (np.ndarray): audio\n \"\"\"\n\n audio, sr = soundfile.read(adfile)\n if len(audio.shape) > 1:\n audio = audio[:, 0]\n\n if sampling_rate is not None and sr != sampling_rate:\n audio = soxr.resample(audio, sr, sampling_rate, quality=\"VHQ\")\n sr = sampling_rate\n\n if segment_duration is not None:\n seg_length = int(sr * segment_duration)\n audio = random_select_audio_segment(audio, seg_length)\n\n # Audio volume normalize\n if volume_normalize:\n audio = audio_volume_normalize(audio)\n # check the audio length\n if length is not None:\n assert abs(audio.shape[0] - length) < 1000\n if audio.shape[0] > length:\n audio = audio[:length]\n else:\n audio = np.pad(audio, (0, int(length - audio.shape[0])))\n return audio", "creation_date": "2025-02-25T06:15:54Z", "repo": "SparkAudio/Spark-TTS", "file_path": "sparktts/utils/audio.py", "stars": 10017, "label": 0} +{"function": "def random_select_audio_segment(audio: np.ndarray, length: int) -> np.ndarray:\n \"\"\"get an audio segment given the length\n\n Args:\n audio (np.ndarray):\n length (int): audio length = sampling_rate * duration\n \"\"\"\n if audio.shape[0] < length:\n audio = np.pad(audio, (0, int(length - audio.shape[0])))\n start_index = random.randint(0, audio.shape[0] - length)\n end_index = int(start_index + length)\n\n return audio[start_index:end_index]", "creation_date": "2025-02-25T06:15:54Z", "repo": "SparkAudio/Spark-TTS", "file_path": "sparktts/utils/audio.py", "stars": 10017, "label": 0} +{"function": "def audio_highpass_filter(audio, sample_rate, highpass_cutoff_freq):\n \"\"\"apply highpass fileter to audio\n\n Args:\n audio (np.ndarray):\n sample_rate (ind):\n highpass_cutoff_freq (int):\n \"\"\"\n\n audio = torchaudio.functional.highpass_biquad(\n torch.from_numpy(audio), sample_rate, cutoff_freq=highpass_cutoff_freq\n )\n return audio.numpy()", "creation_date": "2025-02-25T06:15:54Z", "repo": "SparkAudio/Spark-TTS", "file_path": "sparktts/utils/audio.py", "stars": 10017, "label": 0} +{"function": "def unpack(text):\n # we do this because `pytest -v .` prints the arguments to console, and we don't\n # want to print the entire contents of the file, it creates a mess. So here we go.\n if text.startswith(\"FILE:\"):\n dirname = os.path.dirname(os.path.abspath(__file__))\n taylorswift_file = os.path.join(dirname, text[5:])\n contents = open(taylorswift_file, \"r\", encoding=\"utf-8\").read()\n return contents\n else:\n return text", "creation_date": "2024-02-18T01:02:17Z", "repo": "karpathy/minbpe", "file_path": "tests/test_tokenizer.py", "stars": 9747, "label": 0} +{"function": "def test_encode_decode_identity(tokenizer_factory, text):\n text = unpack(text)\n tokenizer = tokenizer_factory()\n ids = tokenizer.encode(text)\n decoded = tokenizer.decode(ids)\n assert text == decoded", "creation_date": "2024-02-18T01:02:17Z", "repo": "karpathy/minbpe", "file_path": "tests/test_tokenizer.py", "stars": 9747, "label": 0} +{"function": "def test_gpt4_tiktoken_equality(text):\n text = unpack(text)\n tokenizer = GPT4Tokenizer()\n enc = tiktoken.get_encoding(\"cl100k_base\")\n tiktoken_ids = enc.encode(text)\n gpt4_tokenizer_ids = tokenizer.encode(text)\n assert gpt4_tokenizer_ids == tiktoken_ids", "creation_date": "2024-02-18T01:02:17Z", "repo": "karpathy/minbpe", "file_path": "tests/test_tokenizer.py", "stars": 9747, "label": 0} +{"function": "def test_gpt4_tiktoken_equality_special_tokens():\n tokenizer = GPT4Tokenizer()\n enc = tiktoken.get_encoding(\"cl100k_base\")\n tiktoken_ids = enc.encode(specials_string, allowed_special=\"all\")\n gpt4_tokenizer_ids = tokenizer.encode(specials_string, allowed_special=\"all\")\n assert gpt4_tokenizer_ids == tiktoken_ids", "creation_date": "2024-02-18T01:02:17Z", "repo": "karpathy/minbpe", "file_path": "tests/test_tokenizer.py", "stars": 9747, "label": 0} +{"function": "def test_wikipedia_example(tokenizer_factory):\n \"\"\"\n Quick unit test, following along the Wikipedia example:\n https://en.wikipedia.org/wiki/Byte_pair_encoding\n\n According to Wikipedia, running bpe on the input string:\n \"aaabdaaabac\"\n\n for 3 merges will result in string:\n \"XdXac\"\n\n where:\n X=ZY\n Y=ab\n Z=aa\n\n Keep in mind that for us a=97, b=98, c=99, d=100 (ASCII values)\n so Z will be 256, Y will be 257, X will be 258.\n\n So we expect the output list of ids to be [258, 100, 258, 97, 99]\n \"\"\"\n tokenizer = tokenizer_factory()\n text = \"aaabdaaabac\"\n tokenizer.train(text, 256 + 3)\n ids = tokenizer.encode(text)\n assert ids == [258, 100, 258, 97, 99]\n assert tokenizer.decode(tokenizer.encode(text)) == text", "creation_date": "2024-02-18T01:02:17Z", "repo": "karpathy/minbpe", "file_path": "tests/test_tokenizer.py", "stars": 9747, "label": 0} +{"function": "def test_save_load(special_tokens):\n # take a bit more complex piece of text and train the tokenizer, chosen at random\n text = llama_text\n # create a Tokenizer and do 64 merges\n tokenizer = RegexTokenizer()\n tokenizer.train(text, 256 + 64)\n tokenizer.register_special_tokens(special_tokens)\n # verify that decode(encode(x)) == x\n assert tokenizer.decode(tokenizer.encode(text, \"all\")) == text\n # verify that save/load work as expected\n ids = tokenizer.encode(text, \"all\")\n # save the tokenizer (TODO use a proper temporary directory)\n tokenizer.save(\"test_tokenizer_tmp\")\n # re-load the tokenizer\n tokenizer = RegexTokenizer()\n tokenizer.load(\"test_tokenizer_tmp.model\")\n # verify that decode(encode(x)) == x\n assert tokenizer.decode(ids) == text\n assert tokenizer.decode(tokenizer.encode(text, \"all\")) == text\n assert tokenizer.encode(text, \"all\") == ids\n # delete the temporary files\n for file in [\"test_tokenizer_tmp.model\", \"test_tokenizer_tmp.vocab\"]:\n os.remove(file)", "creation_date": "2024-02-18T01:02:17Z", "repo": "karpathy/minbpe", "file_path": "tests/test_tokenizer.py", "stars": 9747, "label": 0} +{"function": "def get_stats(ids, counts=None):\n \"\"\"\n Given a list of integers, return a dictionary of counts of consecutive pairs\n Example: [1, 2, 3, 1, 2] -> {(1, 2): 2, (2, 3): 1, (3, 1): 1}\n Optionally allows to update an existing dictionary of counts\n \"\"\"\n counts = {} if counts is None else counts\n for pair in zip(ids, ids[1:]): # iterate consecutive elements\n counts[pair] = counts.get(pair, 0) + 1\n return counts", "creation_date": "2024-02-18T01:02:17Z", "repo": "karpathy/minbpe", "file_path": "minbpe/base.py", "stars": 9747, "label": 0} +{"function": "def merge(ids, pair, idx):\n \"\"\"\n In the list of integers (ids), replace all consecutive occurrences\n of pair with the new integer token idx\n Example: ids=[1, 2, 3, 1, 2], pair=(1, 2), idx=4 -> [4, 3, 4]\n \"\"\"\n newids = []\n i = 0\n while i < len(ids):\n # if not at the very last position AND the pair matches, replace it\n if ids[i] == pair[0] and i < len(ids) - 1 and ids[i+1] == pair[1]:\n newids.append(idx)\n i += 2\n else:\n newids.append(ids[i])\n i += 1\n return newids", "creation_date": "2024-02-18T01:02:17Z", "repo": "karpathy/minbpe", "file_path": "minbpe/base.py", "stars": 9747, "label": 0} +{"function": "def replace_control_characters(s: str) -> str:\n # we don't want to print control characters\n # which distort the output (e.g. \\n or much worse)\n # https://stackoverflow.com/questions/4324790/removing-control-characters-from-a-string-in-python/19016117#19016117\n # http://www.unicode.org/reports/tr44/#GC_Values_Table\n chars = []\n for ch in s:\n if unicodedata.category(ch)[0] != \"C\":\n chars.append(ch) # this character is ok\n else:\n chars.append(f\"\\\\u{ord(ch):04x}\") # escape\n return \"\".join(chars)", "creation_date": "2024-02-18T01:02:17Z", "repo": "karpathy/minbpe", "file_path": "minbpe/base.py", "stars": 9747, "label": 0} +{"function": "def render_token(t: bytes) -> str:\n # pretty print a token, escaping control characters\n s = t.decode('utf-8', errors='replace')\n s = replace_control_characters(s)\n return s", "creation_date": "2024-02-18T01:02:17Z", "repo": "karpathy/minbpe", "file_path": "minbpe/base.py", "stars": 9747, "label": 0} +{"function": "def parse_args():\n parser = argparse.ArgumentParser(description=\"VGGT Demo\")\n parser.add_argument(\"--scene_dir\", type=str, required=True, help=\"Directory containing the scene images\")\n parser.add_argument(\"--seed\", type=int, default=42, help=\"Random seed for reproducibility\")\n parser.add_argument(\"--use_ba\", action=\"store_true\", default=False, help=\"Use BA for reconstruction\")\n ######### BA parameters #########\n parser.add_argument(\n \"--max_reproj_error\", type=float, default=8.0, help=\"Maximum reprojection error for reconstruction\"\n )\n parser.add_argument(\"--shared_camera\", action=\"store_true\", default=False, help=\"Use shared camera for all images\")\n parser.add_argument(\"--camera_type\", type=str, default=\"SIMPLE_PINHOLE\", help=\"Camera type for reconstruction\")\n parser.add_argument(\"--vis_thresh\", type=float, default=0.2, help=\"Visibility threshold for tracks\")\n parser.add_argument(\"--query_frame_num\", type=int, default=8, help=\"Number of frames to query\")\n parser.add_argument(\"--max_query_pts\", type=int, default=4096, help=\"Maximum number of query points\")\n parser.add_argument(\n \"--fine_tracking\", action=\"store_true\", default=True, help=\"Use fine tracking (slower but more accurate)\"\n )\n parser.add_argument(\n \"--conf_thres_value\", type=float, default=5.0, help=\"Confidence threshold value for depth filtering (wo BA)\"\n )\n return parser.parse_args()", "creation_date": "2025-05-22T22:24:25Z", "repo": "facebookresearch/vggt", "file_path": "demo_colmap.py", "stars": 9709, "label": 0} +{"function": "def run_VGGT(model, images, dtype, resolution=518):\n # images: [B, 3, H, W]\n\n assert len(images.shape) == 4\n assert images.shape[1] == 3\n\n # hard-coded to use 518 for VGGT\n images = F.interpolate(images, size=(resolution, resolution), mode=\"bilinear\", align_corners=False)\n\n with torch.no_grad():\n with torch.cuda.amp.autocast(dtype=dtype):\n images = images[None] # add batch dimension\n aggregated_tokens_list, ps_idx = model.aggregator(images)\n\n # Predict Cameras\n pose_enc = model.camera_head(aggregated_tokens_list)[-1]\n # Extrinsic and intrinsic matrices, following OpenCV convention (camera from world)\n extrinsic, intrinsic = pose_encoding_to_extri_intri(pose_enc, images.shape[-2:])\n # Predict Depth Maps\n depth_map, depth_conf = model.depth_head(aggregated_tokens_list, images, ps_idx)\n\n extrinsic = extrinsic.squeeze(0).cpu().numpy()\n intrinsic = intrinsic.squeeze(0).cpu().numpy()\n depth_map = depth_map.squeeze(0).cpu().numpy()\n depth_conf = depth_conf.squeeze(0).cpu().numpy()\n return extrinsic, intrinsic, depth_map, depth_conf", "creation_date": "2025-05-22T22:24:25Z", "repo": "facebookresearch/vggt", "file_path": "demo_colmap.py", "stars": 9709, "label": 0} +{"function": "def demo_fn(args):\n # Print configuration\n print(\"Arguments:\", vars(args))\n\n # Set seed for reproducibility\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n random.seed(args.seed)\n if torch.cuda.is_available():\n torch.cuda.manual_seed(args.seed)\n torch.cuda.manual_seed_all(args.seed) # for multi-GPU\n print(f\"Setting seed as: {args.seed}\")\n\n # Set device and dtype\n dtype = torch.bfloat16 if torch.cuda.get_device_capability()[0] >= 8 else torch.float16\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n print(f\"Using device: {device}\")\n print(f\"Using dtype: {dtype}\")\n\n # Run VGGT for camera and depth estimation\n model = VGGT()\n _URL = \"https://huggingface.co/facebook/VGGT-1B/resolve/main/model.pt\"\n model.load_state_dict(torch.hub.load_state_dict_from_url(_URL))\n model.eval()\n model = model.to(device)\n print(f\"Model loaded\")\n\n # Get image paths and preprocess them\n image_dir = os.path.join(args.scene_dir, \"images\")\n image_path_list = glob.glob(os.path.join(image_dir, \"*\"))\n if len(image_path_list) == 0:\n raise ValueError(f\"No images found in {image_dir}\")\n base_image_path_list = [os.path.basename(path) for path in image_path_list]\n\n # Load images and original coordinates\n # Load Image in 1024, while running VGGT with 518\n vggt_fixed_resolution = 518\n img_load_resolution = 1024\n\n images, original_coords = load_and_preprocess_images_square(image_path_list, img_load_resolution)\n images = images.to(device)\n original_coords = original_coords.to(device)\n print(f\"Loaded {len(images)} images from {image_dir}\")\n\n # Run VGGT to estimate camera and depth\n # Run with 518x518 images\n extrinsic, intrinsic, depth_map, depth_conf = run_VGGT(model, images, dtype, vggt_fixed_resolution)\n points_3d = unproject_depth_map_to_point_map(depth_map, extrinsic, intrinsic)\n\n if args.use_ba:\n image_size = np.array(images.shape[-2:])\n scale = img_load_resolution / vggt_fixed_resolution\n shared_camera = args.shared_camera\n\n with torch.cuda.amp.autocast(dtype=dtype):\n # Predicting Tracks\n # Using VGGSfM tracker instead of VGGT tracker for efficiency\n # VGGT tracker requires multiple backbone runs to query different frames (this is a problem caused by the training process)\n # Will be fixed in VGGT v2\n\n # You can also change the pred_tracks to tracks from any other methods\n # e.g., from COLMAP, from CoTracker, or by chaining 2D matches from Lightglue/LoFTR.\n pred_tracks, pred_vis_scores, pred_confs, points_3d, points_rgb = predict_tracks(\n images,\n conf=depth_conf,\n points_3d=points_3d,\n masks=None,\n max_query_pts=args.max_query_pts,\n query_frame_num=args.query_frame_num,\n keypoint_extractor=\"aliked+sp\",\n fine_tracking=args.fine_tracking,\n )\n\n torch.cuda.empty_cache()\n\n # rescale the intrinsic matrix from 518 to 1024\n intrinsic[:, :2, :] *= scale\n track_mask = pred_vis_scores > args.vis_thresh\n\n # TODO: radial distortion, iterative BA, masks\n reconstruction, valid_track_mask = batch_np_matrix_to_pycolmap(\n points_3d,\n extrinsic,\n intrinsic,\n pred_tracks,\n image_size,\n masks=track_mask,\n max_reproj_error=args.max_reproj_error,\n shared_camera=shared_camera,\n camera_type=args.camera_type,\n points_rgb=points_rgb,\n )\n\n if reconstruction is None:\n raise ValueError(\"No reconstruction can be built with BA\")\n\n # Bundle Adjustment\n ba_options = pycolmap.BundleAdjustmentOptions()\n pycolmap.bundle_adjustment(reconstruction, ba_options)\n\n reconstruction_resolution = img_load_resolution\n else:\n conf_thres_value = args.conf_thres_value\n max_points_for_colmap = 100000 # randomly sample 3D points\n shared_camera = False # in the feedforward manner, we do not support shared camera\n camera_type = \"PINHOLE\" # in the feedforward manner, we only support PINHOLE camera\n\n image_size = np.array([vggt_fixed_resolution, vggt_fixed_resolution])\n num_frames, height, width, _ = points_3d.shape\n\n points_rgb = F.interpolate(\n images, size=(vggt_fixed_resolution, vggt_fixed_resolution), mode=\"bilinear\", align_corners=False\n )\n points_rgb = (points_rgb.cpu().numpy() * 255).astype(np.uint8)\n points_rgb = points_rgb.transpose(0, 2, 3, 1)\n\n # (S, H, W, 3), with x, y coordinates and frame indices\n points_xyf = create_pixel_coordinate_grid(num_frames, height, width)\n\n conf_mask = depth_conf >= conf_thres_value\n # at most writing 100000 3d points to colmap reconstruction object\n conf_mask = randomly_limit_trues(conf_mask, max_points_for_colmap)\n\n points_3d = points_3d[conf_mask]\n points_xyf = points_xyf[conf_mask]\n points_rgb = points_rgb[conf_mask]\n\n print(\"Converting to COLMAP format\")\n reconstruction = batch_np_matrix_to_pycolmap_wo_track(\n points_3d,\n points_xyf,\n points_rgb,\n extrinsic,\n intrinsic,\n image_size,\n shared_camera=shared_camera,\n camera_type=camera_type,\n )\n\n reconstruction_resolution = vggt_fixed_resolution\n\n reconstruction = rename_colmap_recons_and_rescale_camera(\n reconstruction,\n base_image_path_list,\n original_coords.cpu().numpy(),\n img_size=reconstruction_resolution,\n shift_point2d_to_original_res=True,\n shared_camera=shared_camera,\n )\n\n print(f\"Saving reconstruction to {args.scene_dir}/sparse\")\n sparse_reconstruction_dir = os.path.join(args.scene_dir, \"sparse\")\n os.makedirs(sparse_reconstruction_dir, exist_ok=True)\n reconstruction.write(sparse_reconstruction_dir)\n\n # Save point cloud for fast visualization\n trimesh.PointCloud(points_3d, colors=points_rgb).export(os.path.join(args.scene_dir, \"sparse/points.ply\"))\n\n return True", "creation_date": "2025-05-22T22:24:25Z", "repo": "facebookresearch/vggt", "file_path": "demo_colmap.py", "stars": 9709, "label": 0} +{"function": "def rename_colmap_recons_and_rescale_camera(\n reconstruction, image_paths, original_coords, img_size, shift_point2d_to_original_res=False, shared_camera=False\n):\n rescale_camera = True\n\n for pyimageid in reconstruction.images:\n # Reshaped the padded&resized image to the original size\n # Rename the images to the original names\n pyimage = reconstruction.images[pyimageid]\n pycamera = reconstruction.cameras[pyimage.camera_id]\n pyimage.name = image_paths[pyimageid - 1]\n\n if rescale_camera:\n # Rescale the camera parameters\n pred_params = copy.deepcopy(pycamera.params)\n\n real_image_size = original_coords[pyimageid - 1, -2:]\n resize_ratio = max(real_image_size) / img_size\n pred_params = pred_params * resize_ratio\n real_pp = real_image_size / 2\n pred_params[-2:] = real_pp # center of the image\n\n pycamera.params = pred_params\n pycamera.width = real_image_size[0]\n pycamera.height = real_image_size[1]\n\n if shift_point2d_to_original_res:\n # Also shift the point2D to original resolution\n top_left = original_coords[pyimageid - 1, :2]\n\n for point2D in pyimage.points2D:\n point2D.xy = (point2D.xy - top_left) * resize_ratio\n\n if shared_camera:\n # If shared_camera, all images share the same camera\n # no need to rescale any more\n rescale_camera = False\n\n return reconstruction", "creation_date": "2025-05-22T22:24:25Z", "repo": "facebookresearch/vggt", "file_path": "demo_colmap.py", "stars": 9709, "label": 0} +{"function": "def run_model(target_dir, model) -> dict:\n \"\"\"\n Run the VGGT model on images in the 'target_dir/images' folder and return predictions.\n \"\"\"\n print(f\"Processing images from {target_dir}\")\n\n # Device check\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n if not torch.cuda.is_available():\n raise ValueError(\"CUDA is not available. Check your environment.\")\n\n # Move model to device\n model = model.to(device)\n model.eval()\n\n # Load and preprocess images\n image_names = glob.glob(os.path.join(target_dir, \"images\", \"*\"))\n image_names = sorted(image_names)\n print(f\"Found {len(image_names)} images\")\n if len(image_names) == 0:\n raise ValueError(\"No images found. Check your upload.\")\n\n images = load_and_preprocess_images(image_names).to(device)\n print(f\"Preprocessed images shape: {images.shape}\")\n\n # Run inference\n print(\"Running inference...\")\n dtype = torch.bfloat16 if torch.cuda.get_device_capability()[0] >= 8 else torch.float16\n\n with torch.no_grad():\n with torch.cuda.amp.autocast(dtype=dtype):\n predictions = model(images)\n\n # Convert pose encoding to extrinsic and intrinsic matrices\n print(\"Converting pose encoding to extrinsic and intrinsic matrices...\")\n extrinsic, intrinsic = pose_encoding_to_extri_intri(predictions[\"pose_enc\"], images.shape[-2:])\n predictions[\"extrinsic\"] = extrinsic\n predictions[\"intrinsic\"] = intrinsic\n\n # Convert tensors to numpy\n for key in predictions.keys():\n if isinstance(predictions[key], torch.Tensor):\n predictions[key] = predictions[key].cpu().numpy().squeeze(0) # remove batch dimension\n\n # Generate world points from depth map\n print(\"Computing world points from depth map...\")\n depth_map = predictions[\"depth\"] # (S, H, W, 1)\n world_points = unproject_depth_map_to_point_map(depth_map, predictions[\"extrinsic\"], predictions[\"intrinsic\"])\n predictions[\"world_points_from_depth\"] = world_points\n\n # Clean up\n torch.cuda.empty_cache()\n return predictions", "creation_date": "2025-03-16T23:04:25Z", "repo": "facebookresearch/vggt", "file_path": "demo_gradio.py", "stars": 9709, "label": 0} +{"function": "def handle_uploads(input_video, input_images):\n \"\"\"\n Create a new 'target_dir' + 'images' subfolder, and place user-uploaded\n images or extracted frames from video into it. Return (target_dir, image_paths).\n \"\"\"\n start_time = time.time()\n gc.collect()\n torch.cuda.empty_cache()\n\n # Create a unique folder name\n timestamp = datetime.now().strftime(\"%Y%m%d_%H%M%S_%f\")\n target_dir = f\"input_images_{timestamp}\"\n target_dir_images = os.path.join(target_dir, \"images\")\n\n # Clean up if somehow that folder already exists\n if os.path.exists(target_dir):\n shutil.rmtree(target_dir)\n os.makedirs(target_dir)\n os.makedirs(target_dir_images)\n\n image_paths = []\n\n # --- Handle images ---\n if input_images is not None:\n for file_data in input_images:\n if isinstance(file_data, dict) and \"name\" in file_data:\n file_path = file_data[\"name\"]\n else:\n file_path = file_data\n dst_path = os.path.join(target_dir_images, os.path.basename(file_path))\n shutil.copy(file_path, dst_path)\n image_paths.append(dst_path)\n\n # --- Handle video ---\n if input_video is not None:\n if isinstance(input_video, dict) and \"name\" in input_video:\n video_path = input_video[\"name\"]\n else:\n video_path = input_video\n\n vs = cv2.VideoCapture(video_path)\n fps = vs.get(cv2.CAP_PROP_FPS)\n frame_interval = int(fps * 1) # 1 frame/sec\n\n count = 0\n video_frame_num = 0\n while True:\n gotit, frame = vs.read()\n if not gotit:\n break\n count += 1\n if count % frame_interval == 0:\n image_path = os.path.join(target_dir_images, f\"{video_frame_num:06}.png\")\n cv2.imwrite(image_path, frame)\n image_paths.append(image_path)\n video_frame_num += 1\n\n # Sort final images for gallery\n image_paths = sorted(image_paths)\n\n end_time = time.time()\n print(f\"Files copied to {target_dir_images}; took {end_time - start_time:.3f} seconds\")\n return target_dir, image_paths", "creation_date": "2025-03-16T23:04:25Z", "repo": "facebookresearch/vggt", "file_path": "demo_gradio.py", "stars": 9709, "label": 0} +{"function": "def update_gallery_on_upload(input_video, input_images):\n \"\"\"\n Whenever user uploads or changes files, immediately handle them\n and show in the gallery. Return (target_dir, image_paths).\n If nothing is uploaded, returns \"None\" and empty list.\n \"\"\"\n if not input_video and not input_images:\n return None, None, None, None\n target_dir, image_paths = handle_uploads(input_video, input_images)\n return None, target_dir, image_paths, \"Upload complete. Click 'Reconstruct' to begin 3D processing.\"", "creation_date": "2025-03-16T23:04:25Z", "repo": "facebookresearch/vggt", "file_path": "demo_gradio.py", "stars": 9709, "label": 0} +{"function": "def gradio_demo(\n target_dir,\n conf_thres=3.0,\n frame_filter=\"All\",\n mask_black_bg=False,\n mask_white_bg=False,\n show_cam=True,\n mask_sky=False,\n prediction_mode=\"Pointmap Regression\",\n):\n \"\"\"\n Perform reconstruction using the already-created target_dir/images.\n \"\"\"\n if not os.path.isdir(target_dir) or target_dir == \"None\":\n return None, \"No valid target directory found. Please upload first.\", None, None\n\n start_time = time.time()\n gc.collect()\n torch.cuda.empty_cache()\n\n # Prepare frame_filter dropdown\n target_dir_images = os.path.join(target_dir, \"images\")\n all_files = sorted(os.listdir(target_dir_images)) if os.path.isdir(target_dir_images) else []\n all_files = [f\"{i}: {filename}\" for i, filename in enumerate(all_files)]\n frame_filter_choices = [\"All\"] + all_files\n\n print(\"Running run_model...\")\n with torch.no_grad():\n predictions = run_model(target_dir, model)\n\n # Save predictions\n prediction_save_path = os.path.join(target_dir, \"predictions.npz\")\n np.savez(prediction_save_path, **predictions)\n\n # Handle None frame_filter\n if frame_filter is None:\n frame_filter = \"All\"\n\n # Build a GLB file name\n glbfile = os.path.join(\n target_dir,\n f\"glbscene_{conf_thres}_{frame_filter.replace('.', '_').replace(':', '').replace(' ', '_')}_maskb{mask_black_bg}_maskw{mask_white_bg}_cam{show_cam}_sky{mask_sky}_pred{prediction_mode.replace(' ', '_')}.glb\",\n )\n\n # Convert predictions to GLB\n glbscene = predictions_to_glb(\n predictions,\n conf_thres=conf_thres,\n filter_by_frames=frame_filter,\n mask_black_bg=mask_black_bg,\n mask_white_bg=mask_white_bg,\n show_cam=show_cam,\n mask_sky=mask_sky,\n target_dir=target_dir,\n prediction_mode=prediction_mode,\n )\n glbscene.export(file_obj=glbfile)\n\n # Cleanup\n del predictions\n gc.collect()\n torch.cuda.empty_cache()\n\n end_time = time.time()\n print(f\"Total time: {end_time - start_time:.2f} seconds (including IO)\")\n log_msg = f\"Reconstruction Success ({len(all_files)} frames). Waiting for visualization.\"\n\n return glbfile, log_msg, gr.Dropdown(choices=frame_filter_choices, value=frame_filter, interactive=True)", "creation_date": "2025-03-16T23:04:25Z", "repo": "facebookresearch/vggt", "file_path": "demo_gradio.py", "stars": 9709, "label": 0} +{"function": "def clear_fields():\n \"\"\"\n Clears the 3D viewer, the stored target_dir, and empties the gallery.\n \"\"\"\n return None", "creation_date": "2025-03-16T23:04:25Z", "repo": "facebookresearch/vggt", "file_path": "demo_gradio.py", "stars": 9709, "label": 0} +{"function": "def update_log():\n \"\"\"\n Display a quick log message while waiting.\n \"\"\"\n return \"Loading and Reconstructing...\"", "creation_date": "2025-03-16T23:04:25Z", "repo": "facebookresearch/vggt", "file_path": "demo_gradio.py", "stars": 9709, "label": 0} +{"function": "def basic_request():\n \"\"\"Fixture providing a basic request model for testing.\"\"\"\n return RequestModel(\n name=\"Test Request\", method=\"GET\", url=\"https://example.com/api\"\n )", "creation_date": "2025-03-01T15:34:17Z", "repo": "darrenburns/posting", "file_path": "tests/test_curl_export.py", "stars": 9438, "label": 0} +{"function": "def test_simple_get_request():\n \"\"\"Test a simple GET request with no parameters.\"\"\"\n request = RequestModel(\n name=\"Simple GET\", method=\"GET\", url=\"https://example.com/api\"\n )\n\n expected = \"curl \\\\\\n 'https://example.com/api'\"\n assert request.to_curl() == expected", "creation_date": "2025-03-01T15:34:17Z", "repo": "darrenburns/posting", "file_path": "tests/test_curl_export.py", "stars": 9438, "label": 0} +{"function": "def test_post_request_with_body():\n \"\"\"Test a POST request with a JSON body.\"\"\"\n request = RequestModel(\n name=\"POST with body\",\n method=\"POST\",\n url=\"https://example.com/api/users\",\n body=RequestBody(content='{\"name\": \"John Doe\", \"email\": \"john@example.com\"}'),\n )\n\n expected = 'curl \\\\\\n -X POST \\\\\\n -d \\'{\"name\": \"John Doe\", \"email\": \"john@example.com\"}\\' \\\\\\n \\'https://example.com/api/users\\''\n assert request.to_curl() == expected", "creation_date": "2025-03-01T15:34:17Z", "repo": "darrenburns/posting", "file_path": "tests/test_curl_export.py", "stars": 9438, "label": 0} +{"function": "def test_request_with_headers():\n \"\"\"Test a request with custom headers.\"\"\"\n request = RequestModel(\n name=\"Request with headers\",\n method=\"GET\",\n url=\"https://example.com/api\",\n headers=[\n Header(name=\"Content-Type\", value=\"application/json\"),\n Header(name=\"Authorization\", value=\"Bearer token123\"),\n Header(name=\"Disabled-Header\", value=\"value\", enabled=False),\n ],\n )\n\n expected = \"curl \\\\\\n -H 'Content-Type: application/json' \\\\\\n -H 'Authorization: Bearer token123' \\\\\\n 'https://example.com/api'\"\n assert request.to_curl() == expected", "creation_date": "2025-03-01T15:34:17Z", "repo": "darrenburns/posting", "file_path": "tests/test_curl_export.py", "stars": 9438, "label": 0} +{"function": "def test_request_with_query_params():\n \"\"\"Test a request with query parameters.\"\"\"\n request = RequestModel(\n name=\"Request with query params\",\n method=\"GET\",\n url=\"https://example.com/api/search\",\n params=[\n QueryParam(name=\"q\", value=\"test query\"),\n QueryParam(name=\"page\", value=\"1\"),\n QueryParam(name=\"disabled\", value=\"true\", enabled=False),\n ],\n )\n\n expected = \"curl \\\\\\n 'https://example.com/api/search?q=test+query&page=1'\"\n assert request.to_curl() == expected", "creation_date": "2025-03-01T15:34:17Z", "repo": "darrenburns/posting", "file_path": "tests/test_curl_export.py", "stars": 9438, "label": 0} +{"function": "def test_request_with_existing_query_params():\n \"\"\"Test a request with existing query parameters in the URL.\"\"\"\n request = RequestModel(\n name=\"Request with existing query params\",\n method=\"GET\",\n url=\"https://example.com/api/search?existing=value\",\n params=[\n QueryParam(name=\"q\", value=\"test query\"),\n QueryParam(name=\"page\", value=\"1\"),\n ],\n )\n\n expected = (\n \"curl \\\\\\n 'https://example.com/api/search?existing=value&q=test+query&page=1'\"\n )\n assert request.to_curl() == expected", "creation_date": "2025-03-01T15:34:17Z", "repo": "darrenburns/posting", "file_path": "tests/test_curl_export.py", "stars": 9438, "label": 0} +{"function": "def test_request_with_url_fragment():\n \"\"\"Test a request with a URL fragment.\"\"\"\n request = RequestModel(\n name=\"Request with URL fragment\",\n method=\"GET\",\n url=\"https://example.com/api/docs#section1\",\n params=[QueryParam(name=\"version\", value=\"1.0\")],\n )\n\n expected = \"curl \\\\\\n 'https://example.com/api/docs?version=1.0#section1'\"\n assert request.to_curl() == expected", "creation_date": "2025-03-01T15:34:17Z", "repo": "darrenburns/posting", "file_path": "tests/test_curl_export.py", "stars": 9438, "label": 0} +{"function": "def test_request_with_basic_auth():\n \"\"\"Test a request with basic authentication.\"\"\"\n request = RequestModel(\n name=\"Request with basic auth\",\n method=\"GET\",\n url=\"https://example.com/api/secure\",\n auth=Auth.basic_auth(\"username\", \"password\"),\n )\n\n expected = (\n \"curl \\\\\\n -u 'username:password' \\\\\\n 'https://example.com/api/secure'\"\n )\n assert request.to_curl() == expected", "creation_date": "2025-03-01T15:34:17Z", "repo": "darrenburns/posting", "file_path": "tests/test_curl_export.py", "stars": 9438, "label": 0} +{"function": "def test_request_with_digest_auth():\n \"\"\"Test a request with digest authentication.\"\"\"\n request = RequestModel(\n name=\"Request with digest auth\",\n method=\"GET\",\n url=\"https://example.com/api/secure\",\n auth=Auth.digest_auth(\"username\", \"password\"),\n )\n\n expected = \"curl \\\\\\n --digest -u 'username:password' \\\\\\n 'https://example.com/api/secure'\"\n assert request.to_curl() == expected", "creation_date": "2025-03-01T15:34:17Z", "repo": "darrenburns/posting", "file_path": "tests/test_curl_export.py", "stars": 9438, "label": 0} +{"function": "def test_request_with_cookies():\n \"\"\"Test a request with cookies.\"\"\"\n request = RequestModel(\n name=\"Request with cookies\",\n method=\"GET\",\n url=\"https://example.com/api\",\n cookies=[\n Cookie(name=\"session\", value=\"abc123\"),\n Cookie(name=\"preference\", value=\"dark-mode\"),\n Cookie(name=\"disabled\", value=\"value\", enabled=False),\n ],\n )\n\n expected = \"curl \\\\\\n --cookie 'session=abc123' \\\\\\n --cookie 'preference=dark-mode' \\\\\\n 'https://example.com/api'\"\n assert request.to_curl() == expected", "creation_date": "2025-03-01T15:34:17Z", "repo": "darrenburns/posting", "file_path": "tests/test_curl_export.py", "stars": 9438, "label": 0} +{"function": " def __init__(self):\n self.browser = None", "creation_date": "2024-12-28T13:56:48Z", "repo": "chengazhen/cursor-auto-free", "file_path": "browser_utils.py", "stars": 9353, "label": 0} +{"function": " def init_browser(self, user_agent=None):\n \"\"\"\u521d\u59cb\u5316\u6d4f\u89c8\u5668\"\"\"\n co = self._get_browser_options(user_agent)\n self.browser = Chromium(co)\n return self.browser", "creation_date": "2024-12-28T13:56:48Z", "repo": "chengazhen/cursor-auto-free", "file_path": "browser_utils.py", "stars": 9353, "label": 0} +{"function": " def _get_browser_options(self, user_agent=None):\n \"\"\"\u83b7\u53d6\u6d4f\u89c8\u5668\u914d\u7f6e\"\"\"\n co = ChromiumOptions()\n try:\n extension_path = self._get_extension_path(\"turnstilePatch\")\n co.add_extension(extension_path)\n except FileNotFoundError as e:\n logging.warning(f\"\u8b66\u544a: {e}\")\n\n browser_path = os.getenv(\"BROWSER_PATH\")\n if browser_path:\n co.set_paths(browser_path=browser_path)\n\n co.set_pref(\"credentials_enable_service\", False)\n co.set_argument(\"--hide-crash-restore-bubble\")\n proxy = os.getenv(\"BROWSER_PROXY\")\n if proxy:\n co.set_proxy(proxy)\n\n co.auto_port()\n if user_agent:\n co.set_user_agent(user_agent)\n\n co.headless(\n os.getenv(\"BROWSER_HEADLESS\", \"True\").lower() == \"true\"\n ) # \u751f\u4ea7\u73af\u5883\u4f7f\u7528\u65e0\u5934\u6a21\u5f0f\n\n # Mac \u7cfb\u7edf\u7279\u6b8a\u5904\u7406\n if sys.platform == \"darwin\":\n co.set_argument(\"--no-sandbox\")\n co.set_argument(\"--disable-gpu\")\n\n return co", "creation_date": "2024-12-28T13:56:48Z", "repo": "chengazhen/cursor-auto-free", "file_path": "browser_utils.py", "stars": 9353, "label": 0} +{"function": " def _get_extension_path(self,exname='turnstilePatch'):\n \"\"\"\u83b7\u53d6\u63d2\u4ef6\u8def\u5f84\"\"\"\n root_dir = os.getcwd()\n extension_path = os.path.join(root_dir, exname)\n\n if hasattr(sys, \"_MEIPASS\"):\n extension_path = os.path.join(sys._MEIPASS, exname)\n\n if not os.path.exists(extension_path):\n raise FileNotFoundError(f\"\u63d2\u4ef6\u4e0d\u5b58\u5728: {extension_path}\")\n\n return extension_path", "creation_date": "2024-12-28T13:56:48Z", "repo": "chengazhen/cursor-auto-free", "file_path": "browser_utils.py", "stars": 9353, "label": 0} +{"function": " def quit(self):\n \"\"\"\u5173\u95ed\u6d4f\u89c8\u5668\"\"\"\n if self.browser:\n try:\n self.browser.quit()\n except:\n pass", "creation_date": "2024-12-28T13:56:48Z", "repo": "chengazhen/cursor-auto-free", "file_path": "browser_utils.py", "stars": 9353, "label": 0} +{"function": "def print_logo():\n print(\"\\033[96m\" + CURSOR_LOGO + \"\\033[0m\")\n print(\"\\033[93m\" + \"Building Cursor Keep Alive...\".center(56) + \"\\033[0m\\n\")", "creation_date": "2024-12-28T12:08:49Z", "repo": "chengazhen/cursor-auto-free", "file_path": "build.py", "stars": 9353, "label": 0} +{"function": "def progress_bar(progress, total, prefix=\"\", length=50):\n filled = int(length * progress // total)\n bar = \"\u2588\" * filled + \"\u2591\" * (length - filled)\n percent = f\"{100 * progress / total:.1f}\"\n print(f\"\\r{prefix} |{bar}| {percent}% Complete\", end=\"\", flush=True)\n if progress == total:\n print()", "creation_date": "2024-12-28T12:08:49Z", "repo": "chengazhen/cursor-auto-free", "file_path": "build.py", "stars": 9353, "label": 0} +{"function": "def simulate_progress(message, duration=1.0, steps=20):\n print(f\"\\033[94m{message}\\033[0m\")\n for i in range(steps + 1):\n time.sleep(duration / steps)\n progress_bar(i, steps, prefix=\"Progress:\", length=40)", "creation_date": "2024-12-28T12:08:49Z", "repo": "chengazhen/cursor-auto-free", "file_path": "build.py", "stars": 9353, "label": 0} +{"function": "def filter_output(output):\n \"\"\"ImportantMessage\"\"\"\n if not output:\n return \"\"\n important_lines = []\n for line in output.split(\"\\n\"):\n # Only keep lines containing specific keywords\n if any(\n keyword in line.lower()\n for keyword in [\"error:\", \"failed:\", \"completed\", \"directory:\"]\n ):\n important_lines.append(line)\n return \"\\n\".join(important_lines)", "creation_date": "2024-12-28T12:08:49Z", "repo": "chengazhen/cursor-auto-free", "file_path": "build.py", "stars": 9353, "label": 0} +{"function": "def build():\n # Clear screen\n os.system(\"cls\" if platform.system().lower() == \"windows\" else \"clear\")\n\n # Print logo\n print_logo()\n\n system = platform.system().lower()\n spec_file = os.path.join(\"CursorKeepAlive.spec\")\n\n # if system not in [\"darwin\", \"windows\"]:\n # print(f\"\\033[91mUnsupported operating system: {system}\\033[0m\")\n # return\n\n output_dir = f\"dist/{system if system != 'darwin' else 'mac'}\"\n\n # Create output directory\n os.makedirs(output_dir, exist_ok=True)\n simulate_progress(\"Creating output directory...\", 0.5)\n\n # Run PyInstaller with loading animation\n pyinstaller_command = [\n \"pyinstaller\",\n spec_file,\n \"--distpath\",\n output_dir,\n \"--workpath\",\n f\"build/{system}\",\n \"--noconfirm\",\n ]\n\n loading = LoadingAnimation()\n try:\n simulate_progress(\"Running PyInstaller...\", 2.0)\n loading.start(\"Building in progress\")\n result = subprocess.run(\n pyinstaller_command, check=True, capture_output=True, text=True\n )\n loading.stop()\n\n if result.stderr:\n filtered_errors = [\n line\n for line in result.stderr.split(\"\\n\")\n if any(\n keyword in line.lower()\n for keyword in [\"error:\", \"failed:\", \"completed\", \"directory:\"]\n )\n ]\n if filtered_errors:\n print(\"\\033[93mBuild Warnings/Errors:\\033[0m\")\n print(\"\\n\".join(filtered_errors))\n\n except subprocess.CalledProcessError as e:\n loading.stop()\n print(f\"\\033[91mBuild failed with error code {e.returncode}\\033[0m\")\n if e.stderr:\n print(\"\\033[91mError Details:\\033[0m\")\n print(e.stderr)\n return\n except FileNotFoundError:\n loading.stop()\n print(\n \"\\033[91mError: Please ensure PyInstaller is installed (pip install pyinstaller)\\033[0m\"\n )\n return\n except KeyboardInterrupt:\n loading.stop()\n print(\"\\n\\033[91mBuild cancelled by user\\033[0m\")\n return\n finally:\n loading.stop()\n\n # Copy config file\n if os.path.exists(\"config.ini.example\"):\n simulate_progress(\"Copying configuration file...\", 0.5)\n if system == \"windows\":\n subprocess.run(\n [\"copy\", \"config.ini.example\", f\"{output_dir}\\\\config.ini\"], shell=True\n )\n else:\n subprocess.run([\"cp\", \"config.ini.example\", f\"{output_dir}/config.ini\"])\n\n # Copy .env.example file\n if os.path.exists(\".env.example\"):\n simulate_progress(\"Copying environment file...\", 0.5)\n if system == \"windows\":\n subprocess.run([\"copy\", \".env.example\", f\"{output_dir}\\\\.env\"], shell=True)\n else:\n subprocess.run([\"cp\", \".env.example\", f\"{output_dir}/.env\"])\n\n print(\n f\"\\n\\033[92mBuild completed successfully! Output directory: {output_dir}\\033[0m\"\n )", "creation_date": "2024-12-28T12:08:49Z", "repo": "chengazhen/cursor-auto-free", "file_path": "build.py", "stars": 9353, "label": 0} +{"function": "def patched_torch_load(*args, **kwargs):\n if 'map_location' not in kwargs:\n kwargs['map_location'] = map_location\n return torch_load_original(*args, **kwargs)", "creation_date": "2025-05-29T00:43:44Z", "repo": "resemble-ai/chatterbox", "file_path": "example_for_mac.py", "stars": 9330, "label": 0} +{"function": "def set_seed(seed: int):\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n random.seed(seed)\n np.random.seed(seed)", "creation_date": "2025-05-28T15:57:27Z", "repo": "resemble-ai/chatterbox", "file_path": "gradio_tts_app.py", "stars": 9330, "label": 0} +{"function": "def load_model():\n model = ChatterboxTTS.from_pretrained(DEVICE)\n return model", "creation_date": "2025-05-28T15:57:27Z", "repo": "resemble-ai/chatterbox", "file_path": "gradio_tts_app.py", "stars": 9330, "label": 0} +{"function": "def generate(model, text, audio_prompt_path, exaggeration, temperature, seed_num, cfgw, min_p, top_p, repetition_penalty):\n if model is None:\n model = ChatterboxTTS.from_pretrained(DEVICE)\n\n if seed_num != 0:\n set_seed(int(seed_num))\n\n wav = model.generate(\n text,\n audio_prompt_path=audio_prompt_path,\n exaggeration=exaggeration,\n temperature=temperature,\n cfg_weight=cfgw,\n min_p=min_p,\n top_p=top_p,\n repetition_penalty=repetition_penalty,\n )\n return (model.sr, wav.squeeze(0).numpy())", "creation_date": "2025-05-28T15:57:27Z", "repo": "resemble-ai/chatterbox", "file_path": "gradio_tts_app.py", "stars": 9330, "label": 0} +{"function": "def generate(audio, target_voice_path):\n wav = model.generate(\n audio, target_voice_path=target_voice_path,\n )\n return model.sr, wav.squeeze(0).numpy()", "creation_date": "2025-05-28T15:57:27Z", "repo": "resemble-ai/chatterbox", "file_path": "gradio_vc_app.py", "stars": 9330, "label": 0} +{"function": "def punc_norm(text: str) -> str:\n \"\"\"\n Quick cleanup func for punctuation from LLMs or\n containing chars not seen often in the dataset\n \"\"\"\n if len(text) == 0:\n return \"You need to add some text for me to talk.\"\n\n # Capitalise first letter\n if text[0].islower():\n text = text[0].upper() + text[1:]\n\n # Remove multiple space chars\n text = \" \".join(text.split())\n\n # Replace uncommon/llm punc\n punc_to_replace = [\n (\"...\", \", \"),\n (\"\u2026\", \", \"),\n (\":\", \",\"),\n (\" - \", \", \"),\n (\";\", \", \"),\n (\"\u2014\", \"-\"),\n (\"\u2013\", \"-\"),\n (\" ,\", \",\"),\n (\"\u201c\", \"\\\"\"),\n (\"\u201d\", \"\\\"\"),\n (\"\u2018\", \"'\"),\n (\"\u2019\", \"'\"),\n ]\n for old_char_sequence, new_char in punc_to_replace:\n text = text.replace(old_char_sequence, new_char)\n\n # Add full stop if no ending punc\n text = text.rstrip(\" \")\n sentence_enders = {\".\", \"!\", \"?\", \"-\", \",\"}\n if not any(text.endswith(p) for p in sentence_enders):\n text += \".\"\n\n return text", "creation_date": "2025-05-28T15:57:27Z", "repo": "resemble-ai/chatterbox", "file_path": "src/chatterbox/tts.py", "stars": 9330, "label": 0} +{"function": " def to(self, device):\n self.t3 = self.t3.to(device=device)\n for k, v in self.gen.items():\n if torch.is_tensor(v):\n self.gen[k] = v.to(device=device)\n return self", "creation_date": "2025-05-28T15:57:27Z", "repo": "resemble-ai/chatterbox", "file_path": "src/chatterbox/tts.py", "stars": 9330, "label": 0} +{"function": " def save(self, fpath: Path):\n arg_dict = dict(\n t3=self.t3.__dict__,\n gen=self.gen\n )\n torch.save(arg_dict, fpath)", "creation_date": "2025-05-28T15:57:27Z", "repo": "resemble-ai/chatterbox", "file_path": "src/chatterbox/tts.py", "stars": 9330, "label": 0} +{"function": " def load(cls, fpath, map_location=\"cpu\"):\n if isinstance(map_location, str):\n map_location = torch.device(map_location)\n kwargs = torch.load(fpath, map_location=map_location, weights_only=True)\n return cls(T3Cond(**kwargs['t3']), kwargs['gen'])", "creation_date": "2025-05-28T15:57:27Z", "repo": "resemble-ai/chatterbox", "file_path": "src/chatterbox/tts.py", "stars": 9330, "label": 0} +{"function": " def __init__(\n self,\n t3: T3,\n s3gen: S3Gen,\n ve: VoiceEncoder,\n tokenizer: EnTokenizer,\n device: str,\n conds: Conditionals = None,\n ):\n self.sr = S3GEN_SR # sample rate of synthesized audio\n self.t3 = t3\n self.s3gen = s3gen\n self.ve = ve\n self.tokenizer = tokenizer\n self.device = device\n self.conds = conds\n self.watermarker = perth.PerthImplicitWatermarker()", "creation_date": "2025-05-28T15:57:27Z", "repo": "resemble-ai/chatterbox", "file_path": "src/chatterbox/tts.py", "stars": 9330, "label": 0} +{"function": "def run(\n weights=ROOT / 'yolo.pt', # weights path\n imgsz=640, # inference size (pixels)\n batch_size=1, # batch size\n data=ROOT / 'data/coco.yaml', # dataset.yaml path\n device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu\n half=False, # use FP16 half-precision inference\n test=False, # test exports only\n pt_only=False, # test PyTorch only\n hard_fail=False, # throw error on benchmark failure\n):\n y, t = [], time.time()\n device = select_device(device)\n model_type = type(attempt_load(weights, fuse=False)) # DetectionModel, SegmentationModel, etc.\n for i, (name, f, suffix, cpu, gpu) in export.export_formats().iterrows(): # index, (name, file, suffix, CPU, GPU)\n try:\n assert i not in (9, 10), 'inference not supported' # Edge TPU and TF.js are unsupported\n assert i != 5 or platform.system() == 'Darwin', 'inference only supported on macOS>=10.13' # CoreML\n if 'cpu' in device.type:\n assert cpu, 'inference not supported on CPU'\n if 'cuda' in device.type:\n assert gpu, 'inference not supported on GPU'\n\n # Export\n if f == '-':\n w = weights # PyTorch format\n else:\n w = export.run(weights=weights, imgsz=[imgsz], include=[f], device=device, half=half)[-1] # all others\n assert suffix in str(w), 'export failed'\n\n # Validate\n if model_type == SegmentationModel:\n result = val_seg(data, w, batch_size, imgsz, plots=False, device=device, task='speed', half=half)\n metric = result[0][7] # (box(p, r, map50, map), mask(p, r, map50, map), *loss(box, obj, cls))\n else: # DetectionModel:\n result = val_det(data, w, batch_size, imgsz, plots=False, device=device, task='speed', half=half)\n metric = result[0][3] # (p, r, map50, map, *loss(box, obj, cls))\n speed = result[2][1] # times (preprocess, inference, postprocess)\n y.append([name, round(file_size(w), 1), round(metric, 4), round(speed, 2)]) # MB, mAP, t_inference\n except Exception as e:\n if hard_fail:\n assert type(e) is AssertionError, f'Benchmark --hard-fail for {name}: {e}'\n LOGGER.warning(f'WARNING \u26a0\ufe0f Benchmark failure for {name}: {e}')\n y.append([name, None, None, None]) # mAP, t_inference\n if pt_only and i == 0:\n break # break after PyTorch\n\n # Print results\n LOGGER.info('\\n')\n parse_opt()\n notebook_init() # print system info\n c = ['Format', 'Size (MB)', 'mAP50-95', 'Inference time (ms)'] if map else ['Format', 'Export', '', '']\n py = pd.DataFrame(y, columns=c)\n LOGGER.info(f'\\nBenchmarks complete ({time.time() - t:.2f}s)')\n LOGGER.info(str(py if map else py.iloc[:, :2]))\n if hard_fail and isinstance(hard_fail, str):\n metrics = py['mAP50-95'].array # values to compare to floor\n floor = eval(hard_fail) # minimum metric floor to pass\n assert all(x > floor for x in metrics if pd.notna(x)), f'HARD FAIL: mAP50-95 < floor {floor}'\n return py", "creation_date": "2024-02-18T11:12:37Z", "repo": "WongKinYiu/yolov9", "file_path": "benchmarks.py", "stars": 9309, "label": 0} +{"function": "def test(\n weights=ROOT / 'yolo.pt', # weights path\n imgsz=640, # inference size (pixels)\n batch_size=1, # batch size\n data=ROOT / 'data/coco128.yaml', # dataset.yaml path\n device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu\n half=False, # use FP16 half-precision inference\n test=False, # test exports only\n pt_only=False, # test PyTorch only\n hard_fail=False, # throw error on benchmark failure\n):\n y, t = [], time.time()\n device = select_device(device)\n for i, (name, f, suffix, gpu) in export.export_formats().iterrows(): # index, (name, file, suffix, gpu-capable)\n try:\n w = weights if f == '-' else \\\n export.run(weights=weights, imgsz=[imgsz], include=[f], device=device, half=half)[-1] # weights\n assert suffix in str(w), 'export failed'\n y.append([name, True])\n except Exception:\n y.append([name, False]) # mAP, t_inference\n\n # Print results\n LOGGER.info('\\n')\n parse_opt()\n notebook_init() # print system info\n py = pd.DataFrame(y, columns=['Format', 'Export'])\n LOGGER.info(f'\\nExports complete ({time.time() - t:.2f}s)')\n LOGGER.info(str(py))\n return py", "creation_date": "2024-02-18T11:12:37Z", "repo": "WongKinYiu/yolov9", "file_path": "benchmarks.py", "stars": 9309, "label": 0} +{"function": "def parse_opt():\n parser = argparse.ArgumentParser()\n parser.add_argument('--weights', type=str, default=ROOT / 'yolo.pt', help='weights path')\n parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)')\n parser.add_argument('--batch-size', type=int, default=1, help='batch size')\n parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path')\n parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')\n parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')\n parser.add_argument('--test', action='store_true', help='test exports only')\n parser.add_argument('--pt-only', action='store_true', help='test PyTorch only')\n parser.add_argument('--hard-fail', nargs='?', const=True, default=False, help='Exception on error or < min metric')\n opt = parser.parse_args()\n opt.data = check_yaml(opt.data) # check YAML\n print_args(vars(opt))\n return opt", "creation_date": "2024-02-18T11:12:37Z", "repo": "WongKinYiu/yolov9", "file_path": "benchmarks.py", "stars": 9309, "label": 0} +{"function": "def main(opt):\n test(**vars(opt)) if opt.test else run(**vars(opt))", "creation_date": "2024-02-18T11:12:37Z", "repo": "WongKinYiu/yolov9", "file_path": "benchmarks.py", "stars": 9309, "label": 0} +{"function": "def run(\n weights=ROOT / 'yolo.pt', # model path or triton URL\n source=ROOT / 'data/images', # file/dir/URL/glob/screen/0(webcam)\n data=ROOT / 'data/coco.yaml', # dataset.yaml path\n imgsz=(640, 640), # inference size (height, width)\n conf_thres=0.25, # confidence threshold\n iou_thres=0.45, # NMS IOU threshold\n max_det=1000, # maximum detections per image\n device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu\n view_img=False, # show results\n save_txt=False, # save results to *.txt\n save_conf=False, # save confidences in --save-txt labels\n save_crop=False, # save cropped prediction boxes\n nosave=False, # do not save images/videos\n classes=None, # filter by class: --class 0, or --class 0 2 3\n agnostic_nms=False, # class-agnostic NMS\n augment=False, # augmented inference\n visualize=False, # visualize features\n update=False, # update all models\n project=ROOT / 'runs/detect', # save results to project/name\n name='exp', # save results to project/name\n exist_ok=False, # existing project/name ok, do not increment\n line_thickness=3, # bounding box thickness (pixels)\n hide_labels=False, # hide labels\n hide_conf=False, # hide confidences\n half=False, # use FP16 half-precision inference\n dnn=False, # use OpenCV DNN for ONNX inference\n vid_stride=1, # video frame-rate stride\n):\n source = str(source)\n save_img = not nosave and not source.endswith('.txt') # save inference images\n is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS)\n is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://'))\n webcam = source.isnumeric() or source.endswith('.txt') or (is_url and not is_file)\n screenshot = source.lower().startswith('screen')\n if is_url and is_file:\n source = check_file(source) # download\n\n # Directories\n save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run\n (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir\n\n # Load model\n device = select_device(device)\n model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half)\n stride, names, pt = model.stride, model.names, model.pt\n imgsz = check_img_size(imgsz, s=stride) # check image size\n\n # Dataloader\n bs = 1 # batch_size\n if webcam:\n view_img = check_imshow(warn=True)\n dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride)\n bs = len(dataset)\n elif screenshot:\n dataset = LoadScreenshots(source, img_size=imgsz, stride=stride, auto=pt)\n else:\n dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride)\n vid_path, vid_writer = [None] * bs, [None] * bs\n\n # Run inference\n model.warmup(imgsz=(1 if pt or model.triton else bs, 3, *imgsz)) # warmup\n seen, windows, dt = 0, [], (Profile(), Profile(), Profile())\n for path, im, im0s, vid_cap, s in dataset:\n with dt[0]:\n im = torch.from_numpy(im).to(model.device)\n im = im.half() if model.fp16 else im.float() # uint8 to fp16/32\n im /= 255 # 0 - 255 to 0.0 - 1.0\n if len(im.shape) == 3:\n im = im[None] # expand for batch dim\n\n # Inference\n with dt[1]:\n visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False\n pred = model(im, augment=augment, visualize=visualize)\n\n # NMS\n with dt[2]:\n pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det)\n\n # Second-stage classifier (optional)\n # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s)\n\n # Process predictions\n for i, det in enumerate(pred): # per image\n seen += 1\n if webcam: # batch_size >= 1\n p, im0, frame = path[i], im0s[i].copy(), dataset.count\n s += f'{i}: '\n else:\n p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0)\n\n p = Path(p) # to Path\n save_path = str(save_dir / p.name) # im.jpg\n txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt\n s += '%gx%g ' % im.shape[2:] # print string\n gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh\n imc = im0.copy() if save_crop else im0 # for save_crop\n annotator = Annotator(im0, line_width=line_thickness, example=str(names))\n if len(det):\n # Rescale boxes from img_size to im0 size\n det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round()\n\n # Print results\n for c in det[:, 5].unique():\n n = (det[:, 5] == c).sum() # detections per class\n s += f\"{n} {names[int(c)]}{'s' * (n > 1)}, \" # add to string\n\n # Write results\n for *xyxy, conf, cls in reversed(det):\n if save_txt: # Write to file\n xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh\n line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format\n with open(f'{txt_path}.txt', 'a') as f:\n f.write(('%g ' * len(line)).rstrip() % line + '\\n')\n\n if save_img or save_crop or view_img: # Add bbox to image\n c = int(cls) # integer class\n label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}')\n annotator.box_label(xyxy, label, color=colors(c, True))\n if save_crop:\n save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True)\n\n # Stream results\n im0 = annotator.result()\n if view_img:\n if platform.system() == 'Linux' and p not in windows:\n windows.append(p)\n cv2.namedWindow(str(p), cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO) # allow window resize (Linux)\n cv2.resizeWindow(str(p), im0.shape[1], im0.shape[0])\n cv2.imshow(str(p), im0)\n cv2.waitKey(1) # 1 millisecond\n\n # Save results (image with detections)\n if save_img:\n if dataset.mode == 'image':\n cv2.imwrite(save_path, im0)\n else: # 'video' or 'stream'\n if vid_path[i] != save_path: # new video\n vid_path[i] = save_path\n if isinstance(vid_writer[i], cv2.VideoWriter):\n vid_writer[i].release() # release previous video writer\n if vid_cap: # video\n fps = vid_cap.get(cv2.CAP_PROP_FPS)\n w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n else: # stream\n fps, w, h = 30, im0.shape[1], im0.shape[0]\n save_path = str(Path(save_path).with_suffix('.mp4')) # force *.mp4 suffix on results videos\n vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))\n vid_writer[i].write(im0)\n\n # Print time (inference-only)\n LOGGER.info(f\"{s}{'' if len(det) else '(no detections), '}{dt[1].dt * 1E3:.1f}ms\")\n\n # Print results\n t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image\n LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t)\n if save_txt or save_img:\n s = f\"\\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}\" if save_txt else ''\n LOGGER.info(f\"Results saved to {colorstr('bold', save_dir)}{s}\")\n if update:\n strip_optimizer(weights[0]) # update model (to fix SourceChangeWarning)", "creation_date": "2024-02-18T11:12:37Z", "repo": "WongKinYiu/yolov9", "file_path": "detect.py", "stars": 9309, "label": 0} +{"function": "def parse_opt():\n parser = argparse.ArgumentParser()\n parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolo.pt', help='model path or triton URL')\n parser.add_argument('--source', type=str, default=ROOT / 'data/images', help='file/dir/URL/glob/screen/0(webcam)')\n parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='(optional) dataset.yaml path')\n parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w')\n parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold')\n parser.add_argument('--iou-thres', type=float, default=0.45, help='NMS IoU threshold')\n parser.add_argument('--max-det', type=int, default=1000, help='maximum detections per image')\n parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')\n parser.add_argument('--view-img', action='store_true', help='show results')\n parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')\n parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')\n parser.add_argument('--save-crop', action='store_true', help='save cropped prediction boxes')\n parser.add_argument('--nosave', action='store_true', help='do not save images/videos')\n parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --classes 0, or --classes 0 2 3')\n parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')\n parser.add_argument('--augment', action='store_true', help='augmented inference')\n parser.add_argument('--visualize', action='store_true', help='visualize features')\n parser.add_argument('--update', action='store_true', help='update all models')\n parser.add_argument('--project', default=ROOT / 'runs/detect', help='save results to project/name')\n parser.add_argument('--name', default='exp', help='save results to project/name')\n parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')\n parser.add_argument('--line-thickness', default=3, type=int, help='bounding box thickness (pixels)')\n parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels')\n parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences')\n parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')\n parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference')\n parser.add_argument('--vid-stride', type=int, default=1, help='video frame-rate stride')\n opt = parser.parse_args()\n opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand\n print_args(vars(opt))\n return opt", "creation_date": "2024-02-18T11:12:37Z", "repo": "WongKinYiu/yolov9", "file_path": "detect.py", "stars": 9309, "label": 0} +{"function": "def main(opt):\n # check_requirements(exclude=('tensorboard', 'thop'))\n run(**vars(opt))", "creation_date": "2024-02-18T11:12:37Z", "repo": "WongKinYiu/yolov9", "file_path": "detect.py", "stars": 9309, "label": 0} +{"function": "def run(\n weights=ROOT / 'yolo.pt', # model path or triton URL\n source=ROOT / 'data/images', # file/dir/URL/glob/screen/0(webcam)\n data=ROOT / 'data/coco.yaml', # dataset.yaml path\n imgsz=(640, 640), # inference size (height, width)\n conf_thres=0.25, # confidence threshold\n iou_thres=0.45, # NMS IOU threshold\n max_det=1000, # maximum detections per image\n device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu\n view_img=False, # show results\n save_txt=False, # save results to *.txt\n save_conf=False, # save confidences in --save-txt labels\n save_crop=False, # save cropped prediction boxes\n nosave=False, # do not save images/videos\n classes=None, # filter by class: --class 0, or --class 0 2 3\n agnostic_nms=False, # class-agnostic NMS\n augment=False, # augmented inference\n visualize=False, # visualize features\n update=False, # update all models\n project=ROOT / 'runs/detect', # save results to project/name\n name='exp', # save results to project/name\n exist_ok=False, # existing project/name ok, do not increment\n line_thickness=3, # bounding box thickness (pixels)\n hide_labels=False, # hide labels\n hide_conf=False, # hide confidences\n half=False, # use FP16 half-precision inference\n dnn=False, # use OpenCV DNN for ONNX inference\n vid_stride=1, # video frame-rate stride\n):\n source = str(source)\n save_img = not nosave and not source.endswith('.txt') # save inference images\n is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS)\n is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://'))\n webcam = source.isnumeric() or source.endswith('.txt') or (is_url and not is_file)\n screenshot = source.lower().startswith('screen')\n if is_url and is_file:\n source = check_file(source) # download\n\n # Directories\n save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run\n (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir\n\n # Load model\n device = select_device(device)\n model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half)\n stride, names, pt = model.stride, model.names, model.pt\n imgsz = check_img_size(imgsz, s=stride) # check image size\n\n # Dataloader\n bs = 1 # batch_size\n if webcam:\n view_img = check_imshow(warn=True)\n dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride)\n bs = len(dataset)\n elif screenshot:\n dataset = LoadScreenshots(source, img_size=imgsz, stride=stride, auto=pt)\n else:\n dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride)\n vid_path, vid_writer = [None] * bs, [None] * bs\n\n # Run inference\n model.warmup(imgsz=(1 if pt or model.triton else bs, 3, *imgsz)) # warmup\n seen, windows, dt = 0, [], (Profile(), Profile(), Profile())\n for path, im, im0s, vid_cap, s in dataset:\n with dt[0]:\n im = torch.from_numpy(im).to(model.device)\n im = im.half() if model.fp16 else im.float() # uint8 to fp16/32\n im /= 255 # 0 - 255 to 0.0 - 1.0\n if len(im.shape) == 3:\n im = im[None] # expand for batch dim\n\n # Inference\n with dt[1]:\n visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False\n pred = model(im, augment=augment, visualize=visualize)\n pred = pred[0][1]\n\n # NMS\n with dt[2]:\n pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det)\n\n # Second-stage classifier (optional)\n # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s)\n\n # Process predictions\n for i, det in enumerate(pred): # per image\n seen += 1\n if webcam: # batch_size >= 1\n p, im0, frame = path[i], im0s[i].copy(), dataset.count\n s += f'{i}: '\n else:\n p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0)\n\n p = Path(p) # to Path\n save_path = str(save_dir / p.name) # im.jpg\n txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt\n s += '%gx%g ' % im.shape[2:] # print string\n gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh\n imc = im0.copy() if save_crop else im0 # for save_crop\n annotator = Annotator(im0, line_width=line_thickness, example=str(names))\n if len(det):\n # Rescale boxes from img_size to im0 size\n det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round()\n\n # Print results\n for c in det[:, 5].unique():\n n = (det[:, 5] == c).sum() # detections per class\n s += f\"{n} {names[int(c)]}{'s' * (n > 1)}, \" # add to string\n\n # Write results\n for *xyxy, conf, cls in reversed(det):\n if save_txt: # Write to file\n xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh\n line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format\n with open(f'{txt_path}.txt', 'a') as f:\n f.write(('%g ' * len(line)).rstrip() % line + '\\n')\n\n if save_img or save_crop or view_img: # Add bbox to image\n c = int(cls) # integer class\n label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}')\n annotator.box_label(xyxy, label, color=colors(c, True))\n if save_crop:\n save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True)\n\n # Stream results\n im0 = annotator.result()\n if view_img:\n if platform.system() == 'Linux' and p not in windows:\n windows.append(p)\n cv2.namedWindow(str(p), cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO) # allow window resize (Linux)\n cv2.resizeWindow(str(p), im0.shape[1], im0.shape[0])\n cv2.imshow(str(p), im0)\n cv2.waitKey(1) # 1 millisecond\n\n # Save results (image with detections)\n if save_img:\n if dataset.mode == 'image':\n cv2.imwrite(save_path, im0)\n else: # 'video' or 'stream'\n if vid_path[i] != save_path: # new video\n vid_path[i] = save_path\n if isinstance(vid_writer[i], cv2.VideoWriter):\n vid_writer[i].release() # release previous video writer\n if vid_cap: # video\n fps = vid_cap.get(cv2.CAP_PROP_FPS)\n w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n else: # stream\n fps, w, h = 30, im0.shape[1], im0.shape[0]\n save_path = str(Path(save_path).with_suffix('.mp4')) # force *.mp4 suffix on results videos\n vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))\n vid_writer[i].write(im0)\n\n # Print time (inference-only)\n LOGGER.info(f\"{s}{'' if len(det) else '(no detections), '}{dt[1].dt * 1E3:.1f}ms\")\n\n # Print results\n t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image\n LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t)\n if save_txt or save_img:\n s = f\"\\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}\" if save_txt else ''\n LOGGER.info(f\"Results saved to {colorstr('bold', save_dir)}{s}\")\n if update:\n strip_optimizer(weights[0]) # update model (to fix SourceChangeWarning)", "creation_date": "2024-03-07T03:50:23Z", "repo": "WongKinYiu/yolov9", "file_path": "detect_dual.py", "stars": 9309, "label": 0} +{"function": "def parse_opt():\n parser = argparse.ArgumentParser()\n parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolo.pt', help='model path or triton URL')\n parser.add_argument('--source', type=str, default=ROOT / 'data/images', help='file/dir/URL/glob/screen/0(webcam)')\n parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='(optional) dataset.yaml path')\n parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w')\n parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold')\n parser.add_argument('--iou-thres', type=float, default=0.45, help='NMS IoU threshold')\n parser.add_argument('--max-det', type=int, default=1000, help='maximum detections per image')\n parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')\n parser.add_argument('--view-img', action='store_true', help='show results')\n parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')\n parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')\n parser.add_argument('--save-crop', action='store_true', help='save cropped prediction boxes')\n parser.add_argument('--nosave', action='store_true', help='do not save images/videos')\n parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --classes 0, or --classes 0 2 3')\n parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')\n parser.add_argument('--augment', action='store_true', help='augmented inference')\n parser.add_argument('--visualize', action='store_true', help='visualize features')\n parser.add_argument('--update', action='store_true', help='update all models')\n parser.add_argument('--project', default=ROOT / 'runs/detect', help='save results to project/name')\n parser.add_argument('--name', default='exp', help='save results to project/name')\n parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')\n parser.add_argument('--line-thickness', default=3, type=int, help='bounding box thickness (pixels)')\n parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels')\n parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences')\n parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')\n parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference')\n parser.add_argument('--vid-stride', type=int, default=1, help='video frame-rate stride')\n opt = parser.parse_args()\n opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand\n print_args(vars(opt))\n return opt", "creation_date": "2024-03-07T03:50:23Z", "repo": "WongKinYiu/yolov9", "file_path": "detect_dual.py", "stars": 9309, "label": 0} +{"function": "def main(opt):\n # check_requirements(exclude=('tensorboard', 'thop'))\n run(**vars(opt))", "creation_date": "2024-03-07T03:50:23Z", "repo": "WongKinYiu/yolov9", "file_path": "detect_dual.py", "stars": 9309, "label": 0} +{"function": "async def computer():\n \"\"\"Shared Computer instance for all test cases.\"\"\"\n # Create a remote Linux computer with C/ua\n computer = Computer(\n os_type=\"linux\",\n api_key=os.getenv(\"CUA_API_KEY\"),\n name=str(os.getenv(\"CUA_CONTAINER_NAME\")),\n provider_type=VMProviderType.CLOUD,\n )\n \n # Create a local macOS computer with C/ua\n # computer = Computer()\n \n # Connect to host computer\n # computer = Computer(use_host_computer_server=True)\n \n try:\n await computer.run()\n yield computer\n finally:\n await computer.disconnect()", "creation_date": "2025-06-10T15:47:54Z", "repo": "trycua/cua", "file_path": "tests/files.py", "stars": 8965, "label": 0} +{"function": "async def test_file_exists(computer):\n tmp_path = \"test_file_exists.txt\"\n # Ensure file does not exist\n if await computer.interface.file_exists(tmp_path):\n await computer.interface.delete_file(tmp_path)\n exists = await computer.interface.file_exists(tmp_path)\n assert exists is False, f\"File {tmp_path} should not exist\"\n # Create file and check again\n await computer.interface.write_text(tmp_path, \"hello\")\n exists = await computer.interface.file_exists(tmp_path)\n assert exists is True, f\"File {tmp_path} should exist\"\n await computer.interface.delete_file(tmp_path)", "creation_date": "2025-06-10T15:47:54Z", "repo": "trycua/cua", "file_path": "tests/files.py", "stars": 8965, "label": 0} +{"function": "async def test_directory_exists(computer):\n tmp_dir = \"test_directory_exists\"\n if await computer.interface.directory_exists(tmp_dir):\n # Remove all files in directory before removing directory\n files = await computer.interface.list_dir(tmp_dir)\n for fname in files:\n await computer.interface.delete_file(f\"{tmp_dir}/{fname}\")\n # Remove the directory itself\n await computer.interface.delete_dir(tmp_dir)\n exists = await computer.interface.directory_exists(tmp_dir)\n assert exists is False, f\"Directory {tmp_dir} should not exist\"\n await computer.interface.create_dir(tmp_dir)\n exists = await computer.interface.directory_exists(tmp_dir)\n assert exists is True, f\"Directory {tmp_dir} should exist\"\n # Cleanup: remove files and directory\n files = await computer.interface.list_dir(tmp_dir)\n for fname in files:\n await computer.interface.delete_file(f\"{tmp_dir}/{fname}\")\n await computer.interface.delete_dir(tmp_dir)", "creation_date": "2025-06-10T15:47:54Z", "repo": "trycua/cua", "file_path": "tests/files.py", "stars": 8965, "label": 0} +{"function": "async def test_list_dir(computer):\n tmp_dir = \"test_list_dir\"\n if not await computer.interface.directory_exists(tmp_dir):\n await computer.interface.create_dir(tmp_dir)\n files = [\"foo.txt\", \"bar.txt\"]\n for fname in files:\n await computer.interface.write_text(f\"{tmp_dir}/{fname}\", \"hi\")\n result = await computer.interface.list_dir(tmp_dir)\n assert set(result) >= set(files), f\"Directory {tmp_dir} should contain files {files}\"\n for fname in files:\n await computer.interface.delete_file(f\"{tmp_dir}/{fname}\")\n await computer.interface.delete_dir(tmp_dir)", "creation_date": "2025-06-10T15:47:54Z", "repo": "trycua/cua", "file_path": "tests/files.py", "stars": 8965, "label": 0} +{"function": "async def test_read_write_text(computer):\n tmp_path = \"test_rw_text.txt\"\n content = \"sample text\"\n await computer.interface.write_text(tmp_path, content)\n read = await computer.interface.read_text(tmp_path)\n assert read == content, \"File content should match\"\n await computer.interface.delete_file(tmp_path)", "creation_date": "2025-06-10T15:47:54Z", "repo": "trycua/cua", "file_path": "tests/files.py", "stars": 8965, "label": 0} +{"function": "async def test_delete_file(computer):\n tmp_path = \"test_delete_file.txt\"\n await computer.interface.write_text(tmp_path, \"bye\")\n exists = await computer.interface.file_exists(tmp_path)\n assert exists is True, \"File should exist\"\n await computer.interface.delete_file(tmp_path)\n exists = await computer.interface.file_exists(tmp_path)\n assert exists is False, \"File should not exist\"", "creation_date": "2025-06-10T15:47:54Z", "repo": "trycua/cua", "file_path": "tests/files.py", "stars": 8965, "label": 0} +{"function": "async def test_create_dir(computer):\n tmp_dir = \"test_create_dir\"\n if await computer.interface.directory_exists(tmp_dir):\n await computer.interface.delete_dir(tmp_dir)\n await computer.interface.create_dir(tmp_dir)\n exists = await computer.interface.directory_exists(tmp_dir)\n assert exists is True, \"Directory should exist\"\n await computer.interface.delete_dir(tmp_dir)", "creation_date": "2025-06-10T15:47:54Z", "repo": "trycua/cua", "file_path": "tests/files.py", "stars": 8965, "label": 0} +{"function": "async def test_read_bytes_basic(computer):\n \"\"\"Test basic read_bytes functionality.\"\"\"\n tmp_path = \"test_read_bytes.bin\"\n test_data = b\"Hello, World! This is binary data \\x00\\x01\\x02\\x03\"\n \n # Write binary data using write_text (assuming it handles bytes)\n await computer.interface.write_text(tmp_path, test_data.decode('latin-1'))\n \n # Read all bytes\n read_data = await computer.interface.read_bytes(tmp_path)\n assert read_data == test_data, \"Binary data should match\"\n \n await computer.interface.delete_file(tmp_path)", "creation_date": "2025-06-10T15:47:54Z", "repo": "trycua/cua", "file_path": "tests/files.py", "stars": 8965, "label": 0} +{"function": "async def test_read_bytes_with_offset_and_length(computer):\n \"\"\"Test read_bytes with offset and length parameters.\"\"\"\n tmp_path = \"test_read_bytes_offset.bin\"\n test_data = b\"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n \n # Write test data\n await computer.interface.write_text(tmp_path, test_data.decode('latin-1'))\n \n # Test reading with offset only\n read_data = await computer.interface.read_bytes(tmp_path, offset=5)\n expected = test_data[5:]\n assert read_data == expected, f\"Data from offset 5 should match. Got: {read_data}, Expected: {expected}\"\n \n # Test reading with offset and length\n read_data = await computer.interface.read_bytes(tmp_path, offset=10, length=5)\n expected = test_data[10:15]\n assert read_data == expected, f\"Data from offset 10, length 5 should match. Got: {read_data}, Expected: {expected}\"\n \n # Test reading from beginning with length\n read_data = await computer.interface.read_bytes(tmp_path, offset=0, length=10)\n expected = test_data[:10]\n assert read_data == expected, f\"Data from beginning, length 10 should match. Got: {read_data}, Expected: {expected}\"\n \n await computer.interface.delete_file(tmp_path)", "creation_date": "2025-06-10T15:47:54Z", "repo": "trycua/cua", "file_path": "tests/files.py", "stars": 8965, "label": 0} +{"function": "async def test_get_file_size(computer):\n \"\"\"Test get_file_size functionality.\"\"\"\n tmp_path = \"test_file_size.txt\"\n test_content = \"A\" * 1000 # 1000 bytes\n \n await computer.interface.write_text(tmp_path, test_content)\n \n file_size = await computer.interface.get_file_size(tmp_path)\n assert file_size == 1000, f\"File size should be 1000 bytes, got {file_size}\"\n \n await computer.interface.delete_file(tmp_path)", "creation_date": "2025-06-10T15:47:54Z", "repo": "trycua/cua", "file_path": "tests/files.py", "stars": 8965, "label": 0} +{"function": "def extract_url_content(url):\n downloaded = trafilatura.fetch_url(url)\n content = trafilatura.extract(downloaded)\n \n return {\"url\":url, \"content\":content}", "creation_date": "2024-04-05T07:15:41Z", "repo": "nashsu/FreeAskInternet", "file_path": "free_ask_internet.py", "stars": 8710, "label": 0} +{"function": "def search_web_ref(query:str, debug=False):\n \n content_list = []\n\n try:\n\n safe_string = urllib.parse.quote_plus(\":all !general \" + query)\n\n response = requests.get('http://searxng:8080?q=' + safe_string + '&format=json')\n response.raise_for_status()\n search_results = response.json()\n \n if debug:\n print(\"JSON Response:\")\n pprint(search_results)\n pedding_urls = []\n\n conv_links = []\n\n if search_results.get('results'):\n for item in search_results.get('results')[0:9]:\n name = item.get('title')\n snippet = item.get('content')\n url = item.get('url')\n pedding_urls.append(url)\n\n if url:\n url_parsed = urlparse(url)\n domain = url_parsed.netloc\n icon_url = url_parsed.scheme + '://' + url_parsed.netloc + '/favicon.ico'\n site_name = tldextract.extract(url).domain\n \n conv_links.append({\n 'site_name':site_name,\n 'icon_url':icon_url,\n 'title':name,\n 'url':url,\n 'snippet':snippet\n })\n\n results = []\n futures = []\n\n executor = ThreadPoolExecutor(max_workers=10) \n for url in pedding_urls:\n futures.append(executor.submit(extract_url_content,url))\n try:\n for future in futures:\n res = future.result(timeout=5)\n results.append(res)\n except concurrent.futures.TimeoutError:\n print(\"\u4efb\u52a1\u6267\u884c\u8d85\u65f6\")\n executor.shutdown(wait=False,cancel_futures=True)\n\n for content in results:\n if content and content.get('content'):\n \n item_dict = {\n \"url\":content.get('url'),\n \"content\": content.get('content'),\n \"length\":len(content.get('content'))\n }\n content_list.append(item_dict)\n if debug:\n print(\"URL: {}\".format(url))\n print(\"=================\")\n \n return conv_links,content_list\n except Exception as ex:\n raise ex", "creation_date": "2024-04-05T07:15:41Z", "repo": "nashsu/FreeAskInternet", "file_path": "free_ask_internet.py", "stars": 8710, "label": 0} +{"function": "def gen_prompt(question,content_list, lang=\"zh-CN\", context_length_limit=11000,debug=False):\n \n limit_len = (context_length_limit - 2000)\n if len(question) > limit_len:\n question = question[0:limit_len]\n \n ref_content = [ item.get(\"content\") for item in content_list]\n \n answer_language = ' Simplified Chinese '\n if lang == \"zh-CN\":\n answer_language = ' Simplified Chinese '\n if lang == \"zh-TW\":\n answer_language = ' Traditional Chinese '\n if lang == \"en-US\":\n answer_language = ' English '\n\n\n if len(ref_content) > 0:\n \n if False:\n prompts = '''\n \u60a8\u662f\u4e00\u4f4d\u7531 nash_su \u5f00\u53d1\u7684\u5927\u578b\u8bed\u8a00\u4eba\u5de5\u667a\u80fd\u52a9\u624b\u3002\u60a8\u5c06\u88ab\u63d0\u4f9b\u4e00\u4e2a\u7528\u6237\u95ee\u9898\uff0c\u5e76\u9700\u8981\u64b0\u5199\u4e00\u4e2a\u6e05\u6670\u3001\u7b80\u6d01\u4e14\u51c6\u786e\u7684\u7b54\u6848\u3002\u63d0\u4f9b\u4e86\u4e00\u7ec4\u4e0e\u95ee\u9898\u76f8\u5173\u7684\u4e0a\u4e0b\u6587\uff0c\u6bcf\u4e2a\u90fd\u4ee5[[citation:x]]\u8fd9\u6837\u7684\u7f16\u53f7\u5f00\u5934\uff0cx\u4ee3\u8868\u4e00\u4e2a\u6570\u5b57\u3002\u8bf7\u5728\u9002\u5f53\u7684\u60c5\u51b5\u4e0b\u5728\u53e5\u5b50\u672b\u5c3e\u5f15\u7528\u4e0a\u4e0b\u6587\u3002\u7b54\u6848\u5fc5\u987b\u6b63\u786e\u3001\u7cbe\u786e\uff0c\u5e76\u4ee5\u4e13\u5bb6\u7684\u4e2d\u7acb\u548c\u804c\u4e1a\u8bed\u6c14\u64b0\u5199\u3002\u8bf7\u5c06\u7b54\u6848\u9650\u5236\u57282000\u4e2a\u6807\u8bb0\u5185\u3002\u4e0d\u8981\u63d0\u4f9b\u4e0e\u95ee\u9898\u65e0\u5173\u7684\u4fe1\u606f\uff0c\u4e5f\u4e0d\u8981\u91cd\u590d\u3002\u5982\u679c\u7ed9\u51fa\u7684\u4e0a\u4e0b\u6587\u4fe1\u606f\u4e0d\u8db3\uff0c\u8bf7\u5728\u76f8\u5173\u4e3b\u9898\u540e\u5199\u4e0a\u201c\u4fe1\u606f\u7f3a\u5931\uff1a\u201d\u3002\u8bf7\u6309\u7167\u5f15\u7528\u7f16\u53f7[citation:x]\u7684\u683c\u5f0f\u5728\u7b54\u6848\u4e2d\u5bf9\u5e94\u90e8\u5206\u5f15\u7528\u4e0a\u4e0b\u6587\u3002\u5982\u679c\u4e00\u53e5\u8bdd\u6e90\u81ea\u591a\u4e2a\u4e0a\u4e0b\u6587\uff0c\u8bf7\u5217\u51fa\u6240\u6709\u76f8\u5173\u7684\u5f15\u7528\u7f16\u53f7\uff0c\u4f8b\u5982[citation:3][citation:5]\uff0c\u4e0d\u8981\u5c06\u5f15\u7528\u96c6\u4e2d\u5728\u6700\u540e\u8fd4\u56de\uff0c\u800c\u662f\u5728\u7b54\u6848\u5bf9\u5e94\u90e8\u5206\u5217\u51fa\u3002\u9664\u975e\u662f\u4ee3\u7801\u3001\u7279\u5b9a\u7684\u540d\u79f0\u6216\u5f15\u7528\u7f16\u53f7\uff0c\u7b54\u6848\u7684\u8bed\u8a00\u5e94\u4e0e\u95ee\u9898\u76f8\u540c\u3002\u4ee5\u4e0b\u662f\u4e0a\u4e0b\u6587\u7684\u5185\u5bb9\u96c6\uff1a\n ''' + \"\\n\\n\" + \"```\" \n ref_index = 1\n\n for ref_text in ref_content:\n \n prompts = prompts + \"\\n\\n\" + \" [citation:{}] \".format(str(ref_index)) + ref_text\n ref_index += 1\n\n if len(prompts) >= limit_len:\n prompts = prompts[0:limit_len] \n prompts = prompts + '''\n ```\n \u8bb0\u4f4f\uff0c\u4e0d\u8981\u4e00\u5b57\u4e0d\u5dee\u7684\u91cd\u590d\u4e0a\u4e0b\u6587\u5185\u5bb9. \u56de\u7b54\u5fc5\u987b\u4f7f\u7528\u7b80\u4f53\u4e2d\u6587\uff0c\u5982\u679c\u56de\u7b54\u5f88\u957f\uff0c\u8bf7\u5c3d\u91cf\u7ed3\u6784\u5316\u3001\u5206\u6bb5\u843d\u603b\u7ed3\u3002\u8bf7\u6309\u7167\u5f15\u7528\u7f16\u53f7[citation:x]\u7684\u683c\u5f0f\u5728\u7b54\u6848\u4e2d\u5bf9\u5e94\u90e8\u5206\u5f15\u7528\u4e0a\u4e0b\u6587\u3002\u5982\u679c\u4e00\u53e5\u8bdd\u6e90\u81ea\u591a\u4e2a\u4e0a\u4e0b\u6587\uff0c\u8bf7\u5217\u51fa\u6240\u6709\u76f8\u5173\u7684\u5f15\u7528\u7f16\u53f7\uff0c\u4f8b\u5982[citation:3][citation:5]\uff0c\u4e0d\u8981\u5c06\u5f15\u7528\u96c6\u4e2d\u5728\u6700\u540e\u8fd4\u56de\uff0c\u800c\u662f\u5728\u7b54\u6848\u5bf9\u5e94\u90e8\u5206\u5217\u51fa\u3002\u4e0b\u9762\u662f\u7528\u6237\u95ee\u9898\uff1a\n ''' + question \n else:\n prompts = '''\n You are a large language AI assistant develop by nash_su. You are given a user question, and please write clean, concise and accurate answer to the question. You will be given a set of related contexts to the question, each starting with a reference number like [[citation:x]], where x is a number. Please use the context and cite the context at the end of each sentence if applicable.\n Your answer must be correct, accurate and written by an expert using an unbiased and professional tone. Please limit to 1024 tokens. Do not give any information that is not related to the question, and do not repeat. Say \"information is missing on\" followed by the related topic, if the given context do not provide sufficient information.\n\n Please cite the contexts with the reference numbers, in the format [citation:x]. If a sentence comes from multiple contexts, please list all applicable citations, like [citation:3][citation:5]. Other than code and specific names and citations, your answer must be written in the same language as the question.\n Here are the set of contexts:\n ''' + \"\\n\\n\" + \"```\" \n ref_index = 1\n\n for ref_text in ref_content:\n \n prompts = prompts + \"\\n\\n\" + \" [citation:{}] \".format(str(ref_index)) + ref_text\n ref_index += 1\n\n if len(prompts) >= limit_len:\n prompts = prompts[0:limit_len] \n prompts = prompts + '''\n ```\n Above is the reference contexts. Remember, don't repeat the context word for word. Answer in ''' + answer_language + '''. If the response is lengthy, structure it in paragraphs and summarize where possible. Cite the context using the format [citation:x] where x is the reference number. If a sentence originates from multiple contexts, list all relevant citation numbers, like [citation:3][citation:5]. Don't cluster the citations at the end but include them in the answer where they correspond.\n Remember, don't blindly repeat the contexts verbatim. And here is the user question:\n ''' + question \n \n \n else:\n prompts = question\n\n if debug:\n print(prompts)\n print(\"\u603b\u957f\u5ea6\uff1a\"+ str(len(prompts)))\n return prompts", "creation_date": "2024-04-05T07:15:41Z", "repo": "nashsu/FreeAskInternet", "file_path": "free_ask_internet.py", "stars": 8710, "label": 0} +{"function": "def chat(prompt, model:str,llm_auth_token:str,llm_base_url:str,using_custom_llm=False,stream=True, debug=False):\n openai.base_url = \"http://127.0.0.1:3040/v1/\"\n\n if model == \"gpt3.5\":\n openai.base_url = \"http://llm-freegpt35:3040/v1/\"\n \n if model == \"kimi\":\n openai.base_url = \"http://llm-kimi:8000/v1/\"\n if model == \"glm4\":\n openai.base_url = \"http://llm-glm4:8000/v1/\"\n if model == \"qwen\":\n openai.base_url = \"http://llm-qwen:8000/v1/\"\n \n\n if llm_auth_token == '':\n llm_auth_token = \"CUSTOM\"\n \n openai.api_key = llm_auth_token\n\n if using_custom_llm:\n openai.base_url = llm_base_url\n openai.api_key = llm_auth_token\n\n\n total_content = \"\"\n for chunk in openai.chat.completions.create(\n model=model,\n messages=[{\n \"role\": \"user\",\n \"content\": prompt\n }],\n stream=True,\n max_tokens=1024,temperature=0.2\n ):\n stream_resp = chunk.dict()\n token = stream_resp[\"choices\"][0][\"delta\"].get(\"content\", \"\")\n if token:\n \n total_content += token\n yield token\n if debug:\n print(total_content)", "creation_date": "2024-04-05T07:15:41Z", "repo": "nashsu/FreeAskInternet", "file_path": "free_ask_internet.py", "stars": 8710, "label": 0} +{"function": "def ask_internet(query:str, debug=False):\n \n content_list = search_web_ref(query,debug=debug)\n if debug:\n print(content_list)\n prompt = gen_prompt(query,content_list,context_length_limit=6000,debug=debug)\n total_token = \"\"\n \n for token in chat(prompt=prompt):\n # for token in daxianggpt.chat(prompt=prompt):\n if token:\n total_token += token\n yield token\n yield \"\\n\\n\"\n # \u662f\u5426\u8fd4\u56de\u53c2\u8003\u8d44\u6599\n if True:\n yield \"---\"\n yield \"\\n\"\n yield \"\u53c2\u8003\u8d44\u6599:\\n\"\n count = 1\n for url_content in content_list:\n url = url_content.get('url')\n yield \"*[{}. {}]({})*\".format(str(count),url,url ) \n yield \"\\n\"\n count += 1", "creation_date": "2024-04-05T07:15:41Z", "repo": "nashsu/FreeAskInternet", "file_path": "free_ask_internet.py", "stars": 8710, "label": 0} +{"function": "async def list_models():\n global model_args\n model_card = ModelCard(id=\"gpt-3.5-turbo\")\n return ModelList(data=[model_card])", "creation_date": "2024-04-05T07:15:41Z", "repo": "nashsu/FreeAskInternet", "file_path": "server.py", "stars": 8710, "label": 0} +{"function": "async def create_chat_completion(request: ChatCompletionRequest):\n global model, tokenizer\n print(request)\n if request.messages[-1].role != \"user\":\n raise HTTPException(status_code=400, detail=\"Invalid request\")\n query = request.messages[-1].content\n\n\n generate = predict(query, \"\", request.model)\n return EventSourceResponse(generate, media_type=\"text/event-stream\")", "creation_date": "2024-04-05T07:15:41Z", "repo": "nashsu/FreeAskInternet", "file_path": "server.py", "stars": 8710, "label": 0} +{"function": "def predict(query: str, history: None, model_id: str):\n choice_data = ChatCompletionResponseStreamChoice(\n index=0,\n delta=DeltaMessage(role=\"assistant\"),\n finish_reason=None\n )\n chunk = ChatCompletionResponse(model=model_id, choices=[\n choice_data], object=\"chat.completion.chunk\")\n yield \"{}\".format(chunk.json(exclude_unset=True))\n new_response = \"\"\n current_length = 0\n for token in free_ask_internet.ask_internet(query=query):\n \n new_response += token\n if len(new_response) == current_length:\n continue\n\n new_text = new_response[current_length:]\n current_length = len(new_response)\n\n choice_data = ChatCompletionResponseStreamChoice(\n index=0,\n delta=DeltaMessage(content=new_text,role=\"assistant\"),\n finish_reason=None\n )\n chunk = ChatCompletionResponse(model=model_id, choices=[\n choice_data], object=\"chat.completion.chunk\")\n yield \"{}\".format(chunk.json(exclude_unset=True))\n\n choice_data = ChatCompletionResponseStreamChoice(\n index=0,\n delta=DeltaMessage(),\n finish_reason=\"stop\"\n )\n chunk = ChatCompletionResponse(model=model_id, choices=[\n choice_data], object=\"chat.completion.chunk\")\n yield \"{}\".format(chunk.json(exclude_unset=True))\n yield '[DONE]'", "creation_date": "2024-04-05T07:15:41Z", "repo": "nashsu/FreeAskInternet", "file_path": "server.py", "stars": 8710, "label": 0} +{"function": "async def get_search_refs(request: QueryRequest):\n\n global search_results\n search_results = []\n search_item_list = []\n if request.ask_type == \"search\":\n search_links,search_results = free_ask_internet.search_web_ref(request.query)\n for search_item in search_links:\n snippet = search_item.get(\"snippet\")\n url = search_item.get(\"url\")\n icon_url = search_item.get(\"icon_url\")\n site_name = search_item.get(\"site_name\")\n title = search_item.get(\"title\")\n \n\n si = SearchItem(snippet=snippet,url=url,icon_url=icon_url,site_name=site_name,title=title)\n\n search_item_list.append(si)\n\n resp = SearchResp(code=0,msg=\"success\",data=search_item_list)\n \n return resp", "creation_date": "2024-04-05T07:15:41Z", "repo": "nashsu/FreeAskInternet", "file_path": "server.py", "stars": 8710, "label": 0} +{"function": "def generator(prompt:str, model:str, llm_auth_token:str,llm_base_url:str, using_custom_llm=False,is_failed=False):\n if is_failed:\n yield \"\u641c\u7d22\u5931\u8d25\uff0c\u6ca1\u6709\u8fd4\u56de\u7ed3\u679c\"\n else:\n total_token = \"\"\n for token in free_ask_internet.chat(prompt=prompt,model=model,llm_auth_token=llm_auth_token,llm_base_url=llm_base_url,using_custom_llm=using_custom_llm,stream=True):\n total_token += token\n yield token", "creation_date": "2024-04-05T07:15:41Z", "repo": "nashsu/FreeAskInternet", "file_path": "server.py", "stars": 8710, "label": 0} +{"function": "def get_api():\n token = input(\"Write token? \").strip()\n api = HfApi(token=token)\n return api", "creation_date": "2025-02-04T13:32:06Z", "repo": "kyutai-labs/moshi", "file_path": "scripts/export_quantized.py", "stars": 8639, "label": 0} +{"function": "def main():\n parser = argparse.ArgumentParser('export_quantized')\n parser.add_argument('hf_repo')\n parser.add_argument('new_hf_repo', nargs='?', default=None)\n\n args = parser.parse_args()\n api = get_api()\n\n repo = args.hf_repo\n\n print(\"Downloading base model.\")\n info = loaders.CheckpointInfo.from_hf_repo(args.hf_repo)\n print(\"Creating model.\")\n model = info.get_moshi(fuse_lora=True, device='cuda')\n print(\"Quantizing model.\")\n replace_linear_with_qlinear(model)\n\n if args.new_hf_repo is None:\n new_repo = repo.rsplit('-', 1)[0] + '-q8'\n else:\n new_repo = args.new_hf_repo\n if not api.repo_exists(new_repo):\n api.create_repo(new_repo, repo_type='model')\n print(\"Repo created.\")\n\n to_copy = ['README.md']\n for file in to_copy:\n if not api.file_exists(repo, file):\n continue\n if not api.file_exists(new_repo, file):\n print(\"File\", file, \"is missing\")\n old_file = hf_hub_download(repo, file)\n api.upload_file(\n path_or_fileobj=old_file,\n path_in_repo=file,\n repo_id=new_repo,\n repo_type=\"model\")\n with tempfile.NamedTemporaryFile(suffix='.safetensors', delete=True) as file:\n save_file(model.state_dict(), file.name)\n size = Path(file.name).stat().st_size / 1e9\n print(f\"Checkpoint size: {size:.1f}GB\")\n old_name, old_ext = info.moshi_weights.name.rsplit('.', 1)\n new_name = old_name + '.q8.' + old_ext\n api.upload_file(\n path_or_fileobj=file.name,\n path_in_repo=new_name,\n repo_id=new_repo,\n repo_type=\"model\")\n config = json.load(open(hf_hub_download(repo, 'config.json')))\n config['moshi_name'] = new_name\n config['quantize'] = True\n if not config['mimi_name'].startswith('hf://'):\n config['mimi_name'] = f'hf://{repo}/{config[\"mimi_name\"]}'\n if not config['tokenizer_name'].startswith('hf://'):\n config['tokenizer_name'] = f'hf://{repo}/{config[\"tokenizer_name\"]}'\n with tempfile.NamedTemporaryFile(mode='w') as file:\n json.dump(config, file, indent=2)\n file.flush()\n api.upload_file(\n path_or_fileobj=file.name,\n path_in_repo='config.json',\n repo_id=new_repo,\n repo_type=\"model\")", "creation_date": "2025-02-04T13:32:06Z", "repo": "kyutai-labs/moshi", "file_path": "scripts/export_quantized.py", "stars": 8639, "label": 0} +{"function": "def get_api():\n token = input(\"Write token? \").strip()\n api = HfApi(token=token)\n return api", "creation_date": "2025-04-24T16:32:52Z", "repo": "kyutai-labs/moshi", "file_path": "scripts/export_torch.py", "stars": 8639, "label": 0} +{"function": "def main():\n parser = argparse.ArgumentParser('export_quantized')\n parser.add_argument(\"--tokenizer\", type=str, help=\"Path to a local tokenizer file.\")\n parser.add_argument(\"--moshi-weight\", type=str, help=\"Path to a local checkpoint file for Moshi.\")\n parser.add_argument(\"--mimi-weight\", type=str, help=\"Path to a local checkpoint file for Mimi.\")\n parser.add_argument(\"--hf-repo\", type=str, default=loaders.DEFAULT_REPO,\n help=\"HF repo to look into, defaults Moshiko. \"\n \"Use this to select a different pre-trained model.\")\n parser.add_argument(\"--config\", \"--lm-config\", dest=\"config\", type=str, help=\"The config as a json file.\")\n parser.add_argument('new_hf_repo')\n\n args = parser.parse_args()\n api = get_api()\n\n info = loaders.CheckpointInfo.from_hf_repo(\n args.hf_repo, moshi_weights=args.moshi_weight, mimi_weights=args.mimi_weight,\n tokenizer=args.tokenizer, config_path=args.config)\n\n if not api.repo_exists(args.new_hf_repo):\n api.create_repo(args.new_hf_repo, repo_type='model', private=True)\n print(\"Repo created.\")\n\n config = info.raw_config\n assert config is not None\n config['mimi_name'] = info.mimi_weights.name\n config['moshi_name'] = info.moshi_weights.name\n config['tokenizer_name'] = info.tokenizer.name\n for file in [info.mimi_weights, info.moshi_weights, info.tokenizer]:\n if not api.file_exists(args.new_hf_repo, file.name):\n print(\"Uploading file\", file)\n api.upload_file(\n path_or_fileobj=file,\n path_in_repo=file.name,\n repo_id=args.new_hf_repo,\n repo_type=\"model\")\n with tempfile.NamedTemporaryFile(mode='w') as file:\n json.dump(config, file, indent=2)\n file.flush()\n api.upload_file(\n path_or_fileobj=file.name,\n path_in_repo='config.json',\n repo_id=args.new_hf_repo,\n repo_type=\"model\")", "creation_date": "2025-04-24T16:32:52Z", "repo": "kyutai-labs/moshi", "file_path": "scripts/export_torch.py", "stars": 8639, "label": 0} +{"function": "def import_model(in_path: Path, out_path: Path, silent: bool = False) -> None:\n with safe_open(in_path, framework=\"pt\", device=\"cpu\") as f:\n tensors = {key: f.get_tensor(key) for key in f.keys()}\n model = {\n \"text_emb.weight\": tensors[\"model.embed_tokens.weight\"],\n \"text_linear.weight\": tensors[\"lm_head.weight\"],\n \"out_norm.weight\": tensors[\"model.norm.weight\"],\n }\n n_layers = -1\n for key in tensors.keys():\n if key.startswith(\"model.layers.\"):\n layer_idx = int(key.split(\".\")[2])\n n_layers = max(layer_idx, n_layers)\n n_layers += 1\n if not silent:\n print(f\"found {n_layers} layers\")\n for layer_idx in range(n_layers):\n dst_prefix = f\"transformer.layers.{layer_idx}.\"\n src_prefix = f\"model.layers.{layer_idx}.\"\n _model = {\n \"norm1.weight\": \"input_layernorm.weight\",\n \"norm2.weight\": \"post_attention_layernorm.weight\",\n \"self_attn.out_proj.weight\": \"self_attn.o_proj.weight\",\n \"gating.linear_out.weight\": \"mlp.down_proj.weight\",\n }\n for dst, src in _model.items():\n model[dst_prefix + dst] = tensors[src_prefix + src]\n gate_proj = tensors[src_prefix + \"mlp.gate_proj.weight\"]\n up_proj = tensors[src_prefix + \"mlp.up_proj.weight\"]\n linear_in = torch.cat([gate_proj, up_proj], dim=0)\n model[dst_prefix + \"gating.linear_in.weight\"] = linear_in\n q = tensors[src_prefix + \"self_attn.q_proj.weight\"]\n k = tensors[src_prefix + \"self_attn.k_proj.weight\"]\n v = tensors[src_prefix + \"self_attn.v_proj.weight\"]\n in_proj = torch.cat([q, k, v], dim=0)\n model[dst_prefix + \"self_attn.in_proj.weight\"] = in_proj\n\n save_file(model, out_path)", "creation_date": "2025-01-13T20:50:31Z", "repo": "kyutai-labs/moshi", "file_path": "scripts/import_helium_mlx.py", "stars": 8639, "label": 0} +{"function": "def main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--checkpoint\",\n type=str,\n default=\"kyutai/helium-1-preview-2b\",\n help=\"the transformers checkpoint to import\",\n )\n parser.add_argument(\"--out\", type=str, help=\"the mlx safetensors file to generate\")\n parser.add_argument(\n \"-s\", \"--silent\", action=\"store_true\", help=\"Only prints the checkpoint name\"\n )\n args = parser.parse_args()\n\n ckpt_path = Path(args.checkpoint)\n if not ckpt_path.exists():\n ckpt_path = hf_hub_download(\n repo_id=args.checkpoint, filename=\"model.safetensors\"\n )\n out_path = Path(args.out)\n if not out_path.exists():\n import_model(ckpt_path, out_path, silent=args.silent)\n print(out_path)", "creation_date": "2025-01-13T20:50:31Z", "repo": "kyutai-labs/moshi", "file_path": "scripts/import_helium_mlx.py", "stars": 8639, "label": 0} +{"function": "def import_model(\n in_path: str,\n out_path: str,\n) -> None:\n pkg = torch.load(in_path, map_location=torch.device(\"cpu\"))\n if 'xp.cfg' in pkg:\n cfg = pkg['xp.cfg']\n else:\n cfg = omegaconf.OmegaConf.load(Path(in_path).parent / '.hydra/config.yaml')\n\n model = pkg[\"fsdp_best_state\"][\"model\"]\n\n # Asumming same size of both streams n_q.\n in_n_q = cfg.compression_model_n_q * 2\n out_n_q = cfg.compression_model_n_q\n print(f\"in_n_q: {in_n_q}, out_n_q: {out_n_q}\")\n schedule = cfg.transformer_lm.get('depformer_weights_per_step_schedule', None)\n if schedule is None:\n schedule = list(range(in_n_q))\n\n num_weights = max(schedule) + 1\n schedule = schedule[:out_n_q]\n kept_weights = max(schedule) + 1\n print(f\"Number of dep weights: {num_weights}, keeping {kept_weights}\")\n\n for idx in range(cfg.transformer_lm.depformer_num_layers):\n in_proj_key = f\"depformer.layers.{idx}.self_attn.in_proj_weight\"\n in_proj = model[in_proj_key]\n in_proj = in_proj.view(num_weights, -1, *in_proj.shape[1:])\n model[in_proj_key] = in_proj[:kept_weights].view(-1, *in_proj.shape[2:]).contiguous()\n out_proj_key = f\"depformer.layers.{idx}.self_attn.out_proj.weight\"\n out_proj = model[out_proj_key]\n out_proj = out_proj.view(num_weights, -1, *out_proj.shape[1:])\n model[out_proj_key] = out_proj[:kept_weights].view(-1, *out_proj.shape[2:]).contiguous()\n\n # For mimi inference, we trim the depformer layer that are unused.\n for dep_idx in range(out_n_q - 1, in_n_q - 1):\n del model[f\"depformer_emb.{dep_idx}.weight\"]\n if cfg.transformer_lm.get('depformer_low_rank_embeddings'):\n del model[f\"depformer_emb.{dep_idx}.low_rank.weight\"]\n for dep_idx in range(out_n_q, in_n_q):\n del model[f\"linears.{dep_idx}.weight\"]\n for real_idx in range(kept_weights, num_weights):\n model.pop(f\"depformer_in.{real_idx}.weight\")\n for idx in range(cfg.transformer_lm.depformer_num_layers):\n model.pop(f\"depformer.layers.{idx}.gating.{real_idx}.linear_in.weight\")\n model.pop(f\"depformer.layers.{idx}.gating.{real_idx}.linear_out.weight\")\n\n schedule = schedule[:out_n_q]\n\n save_file(model, out_path)", "creation_date": "2025-02-05T17:03:33Z", "repo": "kyutai-labs/moshi", "file_path": "scripts/import_lightformer.py", "stars": 8639, "label": 0} +{"function": "def main():\n parser = argparse.ArgumentParser(\n prog=\"moshi_import\", description=\"Imports moshi checkpoints\"\n )\n parser.add_argument(\"checkpoint\", help=\"The checkpoint to be imported.\")\n parser.add_argument(\"out\", help=\"The safetensors out file.\")\n args = parser.parse_args()\n\n out_path = Path(args.out)\n\n if out_path.exists():\n print(\"file already exists\")\n else:\n import_model(args.checkpoint, out_path)\n print(out_path)", "creation_date": "2025-02-05T17:03:33Z", "repo": "kyutai-labs/moshi", "file_path": "scripts/import_lightformer.py", "stars": 8639, "label": 0} +{"function": "def import_model(\n in_path: Path,\n out_path: Path,\n weights_per_step_schedule: list[int] | None = None,\n silent: bool = False,\n max_out_n_q: int | None = None,\n) -> None:\n if in_path.suffix == \".safetensors\":\n tch_model = load_file(in_path)\n else:\n pkg = torch.load(in_path, map_location=torch.device(\"cpu\"), weights_only=False)\n tch_model = pkg[\"fsdp_best_state\"][\"model\"]\n\n in_n_q: int | None = None\n for idx in range(999):\n name = f\"emb.{idx}.weight\"\n if name not in tch_model:\n in_n_q = idx\n break\n out_n_q: int | None = None\n for idx in range(999):\n name = f\"linears.{idx}.weight\"\n if name not in tch_model:\n out_n_q = idx\n break\n assert in_n_q is not None\n assert out_n_q is not None\n if not silent:\n print(f\"in_n_q: {in_n_q}, out_n_q: {out_n_q}\")\n\n if weights_per_step_schedule is not None:\n if len(weights_per_step_schedule) != out_n_q:\n raise ValueError(\"inconsistent weights_per_step_schedule\", len(weights_per_step_schedule), out_n_q)\n\n depformer_layers: int | None = None\n for idx in range(999):\n if f\"depformer.layers.{idx}.self_attn.in_proj_weight\" not in tch_model:\n depformer_layers = idx\n break\n assert depformer_layers is not None\n if not silent:\n print(f\"depformer layers: {depformer_layers}\")\n\n model = {}\n for name in [\"text_emb.weight\", \"text_linear.weight\"]:\n model[name] = tch_model[name]\n for name in tch_model.keys():\n if name.startswith(\"condition_provider.conditioners\"):\n model[name] = tch_model[name]\n model[\"out_norm.weight\"] = tch_model[\"out_norm.alpha\"][0, 0]\n for idx in range(in_n_q):\n src_name = f\"emb.{idx}.weight\"\n dst_name = f\"audio_embs.{idx}.weight\"\n model[dst_name] = tch_model[src_name]\n\n for k, v in sorted(tch_model.items()):\n print(k, v.shape, v.dtype)\n if k.startswith(\"transformer\"):\n if k.endswith(\".alpha\"):\n v = v[0, 0]\n k = k.replace(\".alpha\", \".weight\")\n k = k.replace(\".in_proj_weight\", \".in_proj.weight\")\n model[k] = v\n\n # Only export the first slices of the depformer (main).\n if max_out_n_q is not None:\n exported_out_n_q = min(max_out_n_q, out_n_q)\n print(f\"only exporting the first {exported_out_n_q} depformer layers\")\n else:\n exported_out_n_q = out_n_q\n\n max_df_steps = out_n_q\n if weights_per_step_schedule is not None:\n max_df_steps = max(weights_per_step_schedule) + 1\n\n for idx in range(exported_out_n_q):\n if weights_per_step_schedule is not None:\n tch_idx = weights_per_step_schedule[idx]\n else:\n tch_idx = idx\n\n base = f\"depformer.slices.{idx}.\"\n model[base + \"linear_in.weight\"] = tch_model[f\"depformer_in.{tch_idx}.weight\"].clone()\n model[base + \"linear_out.weight\"] = tch_model[f\"linears.{idx}.weight\"]\n if idx == 0:\n model[base + \"emb.weight\"] = tch_model[\"depformer_text_emb.weight\"]\n if \"depformer_text_emb.low_rank.weight\" in tch_model:\n model[base + \"emb.low_rank.weight\"] = tch_model[\"depformer_text_emb.low_rank.weight\"].clone()\n else:\n model[base + \"emb.weight\"] = tch_model[f\"depformer_emb.{idx-1}.weight\"].clone()\n if f\"depformer_emb.{idx-1}.low_rank.weight\" in tch_model:\n model[base + \"emb.low_rank.weight\"] = tch_model[f\"depformer_emb.{idx-1}.low_rank.weight\"].clone()\n\n for layer_idx in range(depformer_layers):\n layer = base + f\"transformer.layers.{layer_idx}.\"\n # WARNING: note that this uses in_proj_weight vs out_proj.weight\n model[layer + \"self_attn.in_proj.weight\"] = (\n tch_model[f\"depformer.layers.{layer_idx}.self_attn.in_proj_weight\"]\n .chunk(max_df_steps)[tch_idx]\n .clone()\n )\n model[layer + \"self_attn.out_proj.weight\"] = (\n tch_model[f\"depformer.layers.{layer_idx}.self_attn.out_proj.weight\"]\n .chunk(max_df_steps)[tch_idx]\n .clone()\n )\n model[layer + \"norm1.weight\"] = tch_model[\n f\"depformer.layers.{layer_idx}.norm1.alpha\"\n ][0, 0].clone()\n model[layer + \"norm2.weight\"] = tch_model[\n f\"depformer.layers.{layer_idx}.norm2.alpha\"\n ][0, 0].clone()\n model[layer + \"gating.linear_in.weight\"] = tch_model[\n f\"depformer.layers.{layer_idx}.gating.{tch_idx}.linear_in.weight\"\n ].clone()\n model[layer + \"gating.linear_out.weight\"] = tch_model[\n f\"depformer.layers.{layer_idx}.gating.{tch_idx}.linear_out.weight\"\n ].clone()\n\n save_file(model, out_path)", "creation_date": "2024-08-31T09:23:13Z", "repo": "kyutai-labs/moshi", "file_path": "scripts/import_mlx.py", "stars": 8639, "label": 0} +{"function": "def main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"checkpoint\", type=str, help=\"the pytorch checkpoint to import\")\n parser.add_argument(\"out\", type=str, help=\"the mlx safetensors file to generate\")\n parser.add_argument(\n \"-s\", \"--silent\", action=\"store_true\", help=\"only prints the checkpoint name\"\n )\n parser.add_argument(\"--wpss\", type=str, help=\"weights per step schedule config\")\n parser.add_argument(\n \"--max-out-n-q\",\n type=int,\n help=\"limit the number of depformer layers that are exported\",\n )\n args = parser.parse_args()\n\n wpss = None\n if args.wpss is not None:\n if args.wpss == \"hibiki-2b\":\n wpss = [0, 1, 2, 3, 4, 5, 6, 7] + [8] * 8 + [9] * 16\n else:\n raise ValueError(f\"unknown wpss {args.wpss}\")\n\n ckpt_path = Path(args.checkpoint)\n out_path = Path(args.out)\n if not out_path.exists():\n import_model(\n ckpt_path,\n out_path,\n weights_per_step_schedule=wpss,\n silent=args.silent,\n max_out_n_q=args.max_out_n_q\n )\n print(out_path)", "creation_date": "2024-08-31T09:23:13Z", "repo": "kyutai-labs/moshi", "file_path": "scripts/import_mlx.py", "stars": 8639, "label": 0} +{"function": "def predict(image, audio, pose_weight, face_weight, lip_weight, face_expand_ratio, progress=gr.Progress(track_tqdm=True)):\n \"\"\"\n Create a gradio interface with the configs.\n \"\"\"\n _ = progress\n config = {\n 'source_image': image,\n 'driving_audio': audio,\n 'pose_weight': pose_weight,\n 'face_weight': face_weight,\n 'lip_weight': lip_weight,\n 'face_expand_ratio': face_expand_ratio,\n 'config': 'configs/inference/default.yaml',\n 'checkpoint': None,\n 'output': \".cache/output.mp4\"\n }\n args = argparse.Namespace()\n for key, value in config.items():\n setattr(args, key, value)\n return inference_process(args)", "creation_date": "2024-06-20T06:01:24Z", "repo": "fudan-generative-vision/hallo", "file_path": "scripts/app.py", "stars": 8520, "label": 0} +{"function": "def setup_directories(video_path: Path) -> dict:\n \"\"\"\n Setup directories for storing processed files.\n\n Args:\n video_path (Path): Path to the video file.\n\n Returns:\n dict: A dictionary containing paths for various directories.\n \"\"\"\n base_dir = video_path.parent.parent\n dirs = {\n \"face_mask\": base_dir / \"face_mask\",\n \"sep_pose_mask\": base_dir / \"sep_pose_mask\",\n \"sep_face_mask\": base_dir / \"sep_face_mask\",\n \"sep_lip_mask\": base_dir / \"sep_lip_mask\",\n \"face_emb\": base_dir / \"face_emb\",\n \"audio_emb\": base_dir / \"audio_emb\"\n }\n\n for path in dirs.values():\n path.mkdir(parents=True, exist_ok=True)\n\n return dirs", "creation_date": "2024-06-27T06:54:01Z", "repo": "fudan-generative-vision/hallo", "file_path": "scripts/data_preprocess.py", "stars": 8520, "label": 0} +{"function": "def process_single_video(video_path: Path,\n output_dir: Path,\n image_processor: ImageProcessorForDataProcessing,\n audio_processor: AudioProcessor,\n step: int) -> None:\n \"\"\"\n Process a single video file.\n\n Args:\n video_path (Path): Path to the video file.\n output_dir (Path): Directory to save the output.\n image_processor (ImageProcessorForDataProcessing): Image processor object.\n audio_processor (AudioProcessor): Audio processor object.\n gpu_status (bool): Whether to use GPU for processing.\n \"\"\"\n assert video_path.exists(), f\"Video path {video_path} does not exist\"\n dirs = setup_directories(video_path)\n logging.info(f\"Processing video: {video_path}\")\n\n try:\n if step == 1:\n images_output_dir = output_dir / 'images' / video_path.stem\n images_output_dir.mkdir(parents=True, exist_ok=True)\n images_output_dir = convert_video_to_images(\n video_path, images_output_dir)\n logging.info(f\"Images saved to: {images_output_dir}\")\n\n audio_output_dir = output_dir / 'audios'\n audio_output_dir.mkdir(parents=True, exist_ok=True)\n audio_output_path = audio_output_dir / f'{video_path.stem}.wav'\n audio_output_path = extract_audio_from_videos(\n video_path, audio_output_path)\n logging.info(f\"Audio extracted to: {audio_output_path}\")\n\n face_mask, _, sep_pose_mask, sep_face_mask, sep_lip_mask = image_processor.preprocess(\n images_output_dir)\n cv2.imwrite(\n str(dirs[\"face_mask\"] / f\"{video_path.stem}.png\"), face_mask)\n cv2.imwrite(str(dirs[\"sep_pose_mask\"] /\n f\"{video_path.stem}.png\"), sep_pose_mask)\n cv2.imwrite(str(dirs[\"sep_face_mask\"] /\n f\"{video_path.stem}.png\"), sep_face_mask)\n cv2.imwrite(str(dirs[\"sep_lip_mask\"] /\n f\"{video_path.stem}.png\"), sep_lip_mask)\n else:\n images_dir = output_dir / \"images\" / video_path.stem\n audio_path = output_dir / \"audios\" / f\"{video_path.stem}.wav\"\n _, face_emb, _, _, _ = image_processor.preprocess(images_dir)\n torch.save(face_emb, str(\n dirs[\"face_emb\"] / f\"{video_path.stem}.pt\"))\n audio_emb, _ = audio_processor.preprocess(audio_path)\n torch.save(audio_emb, str(\n dirs[\"audio_emb\"] / f\"{video_path.stem}.pt\"))\n except Exception as e:\n logging.error(f\"Failed to process video {video_path}: {e}\")", "creation_date": "2024-06-27T06:54:01Z", "repo": "fudan-generative-vision/hallo", "file_path": "scripts/data_preprocess.py", "stars": 8520, "label": 0} +{"function": "def process_all_videos(input_video_list: List[Path], output_dir: Path, step: int) -> None:\n \"\"\"\n Process all videos in the input list.\n\n Args:\n input_video_list (List[Path]): List of video paths to process.\n output_dir (Path): Directory to save the output.\n gpu_status (bool): Whether to use GPU for processing.\n \"\"\"\n face_analysis_model_path = \"pretrained_models/face_analysis\"\n landmark_model_path = \"pretrained_models/face_analysis/models/face_landmarker_v2_with_blendshapes.task\"\n audio_separator_model_file = \"pretrained_models/audio_separator/Kim_Vocal_2.onnx\"\n wav2vec_model_path = 'pretrained_models/wav2vec/wav2vec2-base-960h'\n\n audio_processor = AudioProcessor(\n 16000,\n 25,\n wav2vec_model_path,\n False,\n os.path.dirname(audio_separator_model_file),\n os.path.basename(audio_separator_model_file),\n os.path.join(output_dir, \"vocals\"),\n ) if step==2 else None\n\n image_processor = ImageProcessorForDataProcessing(\n face_analysis_model_path, landmark_model_path, step)\n\n for video_path in tqdm(input_video_list, desc=\"Processing videos\"):\n process_single_video(video_path, output_dir,\n image_processor, audio_processor, step)", "creation_date": "2024-06-27T06:54:01Z", "repo": "fudan-generative-vision/hallo", "file_path": "scripts/data_preprocess.py", "stars": 8520, "label": 0} +{"function": "def get_video_paths(source_dir: Path, parallelism: int, rank: int) -> List[Path]:\n \"\"\"\n Get paths of videos to process, partitioned for parallel processing.\n\n Args:\n source_dir (Path): Source directory containing videos.\n parallelism (int): Level of parallelism.\n rank (int): Rank for distributed processing.\n\n Returns:\n List[Path]: List of video paths to process.\n \"\"\"\n video_paths = [item for item in sorted(\n source_dir.iterdir()) if item.is_file() and item.suffix == '.mp4']\n return [video_paths[i] for i in range(len(video_paths)) if i % parallelism == rank]", "creation_date": "2024-06-27T06:54:01Z", "repo": "fudan-generative-vision/hallo", "file_path": "scripts/data_preprocess.py", "stars": 8520, "label": 0} +{"function": "def collect_video_folder_paths(root_path: Path) -> list:\n \"\"\"\n Collect all video folder paths from the root path.\n\n Args:\n root_path (Path): The root directory containing video folders.\n\n Returns:\n list: List of video folder paths.\n \"\"\"\n return [frames_dir.resolve() for frames_dir in root_path.iterdir() if frames_dir.is_dir()]", "creation_date": "2024-06-27T06:54:01Z", "repo": "fudan-generative-vision/hallo", "file_path": "scripts/extract_meta_info_stage1.py", "stars": 8520, "label": 0} +{"function": "def construct_meta_info(frames_dir_path: Path) -> dict:\n \"\"\"\n Construct meta information for a given frames directory.\n\n Args:\n frames_dir_path (Path): The path to the frames directory.\n\n Returns:\n dict: A dictionary containing the meta information for the frames directory, or None if the required files do not exist.\n \"\"\"\n mask_path = str(frames_dir_path).replace(\"images\", \"face_mask\") + \".png\"\n face_emb_path = str(frames_dir_path).replace(\"images\", \"face_emb\") + \".pt\"\n\n if not os.path.exists(mask_path):\n print(f\"Mask path not found: {mask_path}\")\n return None\n\n if torch.load(face_emb_path) is None:\n print(f\"Face emb is None: {face_emb_path}\")\n return None\n\n return {\n \"image_path\": str(frames_dir_path),\n \"mask_path\": mask_path,\n \"face_emb\": face_emb_path,\n }", "creation_date": "2024-06-27T06:54:01Z", "repo": "fudan-generative-vision/hallo", "file_path": "scripts/extract_meta_info_stage1.py", "stars": 8520, "label": 0} +{"function": "def main():\n \"\"\"\n Main function to extract meta info for training.\n \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-r\", \"--root_path\", type=str,\n required=True, help=\"Root path of the video directories\")\n parser.add_argument(\"-n\", \"--dataset_name\", type=str,\n required=True, help=\"Name of the dataset\")\n parser.add_argument(\"--meta_info_name\", type=str,\n help=\"Name of the meta information file\")\n\n args = parser.parse_args()\n\n if args.meta_info_name is None:\n args.meta_info_name = args.dataset_name\n\n image_dir = Path(args.root_path) / \"images\"\n output_dir = Path(\"./data\")\n output_dir.mkdir(exist_ok=True)\n\n # Collect all video folder paths\n frames_dir_paths = collect_video_folder_paths(image_dir)\n\n meta_infos = []\n for frames_dir_path in frames_dir_paths:\n meta_info = construct_meta_info(frames_dir_path)\n if meta_info:\n meta_infos.append(meta_info)\n\n output_file = output_dir / f\"{args.meta_info_name}_stage1.json\"\n with output_file.open(\"w\", encoding=\"utf-8\") as f:\n json.dump(meta_infos, f, indent=4)\n\n print(f\"Final data count: {len(meta_infos)}\")", "creation_date": "2024-06-27T06:54:01Z", "repo": "fudan-generative-vision/hallo", "file_path": "scripts/extract_meta_info_stage1.py", "stars": 8520, "label": 0} +{"function": "def get_video_paths(root_path: Path, extensions: list) -> list:\n \"\"\"\n Get a list of video paths from the root path with the specified extensions.\n\n Args:\n root_path (Path): The root directory containing video files.\n extensions (list): List of file extensions to include.\n\n Returns:\n list: List of video file paths.\n \"\"\"\n return [str(path.resolve()) for path in root_path.iterdir() if path.suffix in extensions]", "creation_date": "2024-06-27T06:54:01Z", "repo": "fudan-generative-vision/hallo", "file_path": "scripts/extract_meta_info_stage2.py", "stars": 8520, "label": 0} +{"function": "def file_exists(file_path: str) -> bool:\n \"\"\"\n Check if a file exists.\n\n Args:\n file_path (str): The path to the file.\n\n Returns:\n bool: True if the file exists, False otherwise.\n \"\"\"\n return os.path.exists(file_path)", "creation_date": "2024-06-27T06:54:01Z", "repo": "fudan-generative-vision/hallo", "file_path": "scripts/extract_meta_info_stage2.py", "stars": 8520, "label": 0} +{"function": "def exception_hook(exctype, value, tb):\n logger.error(\"\".join(traceback.format_exception(exctype, value, tb)))\n sys.__excepthook__(exctype, value, tb) # \u8c03\u7528\u9ed8\u8ba4\u7684\u5f02\u5e38\u5904\u7406", "creation_date": "2024-10-04T03:13:20Z", "repo": "WEIFENG2333/VideoCaptioner", "file_path": "main.py", "stars": 8310, "label": 0} +{"function": "def create_temp_dir():\n \"\"\"\u521b\u5efa\u4e34\u65f6\u76ee\u5f55\u7528\u4e8e\u5b58\u50a8\u5904\u7406\u6587\u4ef6\"\"\"\n temp_dir = Path(\"temp\")\n temp_dir.mkdir(exist_ok=True)\n return temp_dir", "creation_date": "2025-01-08T18:32:33Z", "repo": "WEIFENG2333/VideoCaptioner", "file_path": "streamlit_app.py", "stars": 8310, "label": 0} +{"function": "def format_time(milliseconds):\n \"\"\"\u5c06\u6beb\u79d2\u8f6c\u6362\u4e3a\u65f6:\u5206:\u79d2.\u6beb\u79d2\u683c\u5f0f\"\"\"\n total_seconds = milliseconds / 1000\n hours = int(total_seconds // 3600)\n minutes = int((total_seconds % 3600) // 60)\n seconds = int(total_seconds % 60)\n ms = int((total_seconds * 1000) % 1000)\n\n if hours > 0:\n return f\"{hours:02d}:{minutes:02d}:{seconds:02d}.{ms:03d}\"\n else:\n return f\"{minutes:02d}:{seconds:02d}.{ms:03d}\"", "creation_date": "2025-01-08T18:32:33Z", "repo": "WEIFENG2333/VideoCaptioner", "file_path": "streamlit_app.py", "stars": 8310, "label": 0} +{"function": "def format_duration(milliseconds):\n \"\"\"\u5c06\u6beb\u79d2\u8f6c\u6362\u4e3a\u65f6\u5206\u79d2\u7684\u663e\u793a\u683c\u5f0f\"\"\"\n total_seconds = milliseconds / 1000\n hours = int(total_seconds // 3600)\n minutes = int((total_seconds % 3600) // 60)\n seconds = int(total_seconds % 60)\n\n if hours > 0:\n return f\"{hours:02d}\u65f6{minutes:02d}\u5206{seconds:02d}\u79d2\"\n elif minutes > 0:\n return f\"{minutes:02d}\u5206{seconds:02d}\u79d2\"\n else:\n return f\"{seconds:02d}\u79d2\"", "creation_date": "2025-01-08T18:32:33Z", "repo": "WEIFENG2333/VideoCaptioner", "file_path": "streamlit_app.py", "stars": 8310, "label": 0} +{"function": "def asr_page():\n st.title(\"\ud83c\udfaf ASR \u89c6\u9891\u5b57\u5e55\u8bc6\u522b\")\n st.markdown(\"---\")\n\n # \u521d\u59cb\u5316session state\n if \"srt_content\" not in st.session_state:\n st.session_state.srt_content = None\n if \"subtitle_path\" not in st.session_state:\n st.session_state.subtitle_path = None\n if \"asr_data\" not in st.session_state:\n st.session_state.asr_data = None\n if \"translated_asr_data\" not in st.session_state:\n st.session_state.translated_asr_data = None\n\n temp_dir = create_temp_dir()\n\n # \u521b\u5efa\u4e24\u5217\u5e03\u5c40\n col1, col2 = st.columns([1, 1])\n\n with col1:\n st.markdown(\"### \ud83d\udcfa \u89c6\u9891\u9884\u89c8\")\n video_file = st.file_uploader(\n label=\"\u4e0a\u4f20\u89c6\u9891\u6587\u4ef6\",\n type=[\"mp4\", \"mov\", \"avi\", \"mkv\", \"flv\"],\n key=\"asr_video\",\n accept_multiple_files=False,\n label_visibility=\"collapsed\",\n help=\"\u652f\u6301\u7684\u89c6\u9891\u683c\u5f0f: MP4, MOV, AVI, MKV, WMV, FLV, WebM, M4V\",\n )\n video_placeholder = st.empty()\n\n if video_file:\n video_path = temp_dir / video_file.name\n # \u68c0\u67e5\u6587\u4ef6\u662f\u5426\u5df2\u5b58\u5728\uff0c\u907f\u514d\u91cd\u590d\u5199\u5165\n if not video_path.exists():\n with open(video_path, \"wb\") as f:\n f.write(video_file.getbuffer())\n logger.info(f\"\u89c6\u9891\u6587\u4ef6\u5df2\u4fdd\u5b58\u5230: {video_path}\")\n\n video_placeholder.video(\n video_file,\n subtitles=(\n st.session_state.subtitle_path\n if st.session_state.subtitle_path\n else None\n ),\n )\n\n with col2:\n st.markdown(\"### \ud83c\udfaf \u64cd\u4f5c\u9762\u677f\")\n if video_file is not None:\n st.success(\"\u2705 \u89c6\u9891\u4e0a\u4f20\u6210\u529f\uff01\")\n\n if st.button(\"\ud83d\ude80 \u5f00\u59cb\u8bc6\u522b\", use_container_width=True):\n with st.spinner(\"\u23f3 \u6b63\u5728\u5904\u7406\u4e2d...\"):\n try:\n logger.info(f\"\u5f00\u59cb\u5904\u7406\u89c6\u9891\u6587\u4ef6: {video_file.name}\")\n # \u8f6c\u6362\u4e3a\u97f3\u9891\n audio_path = temp_dir / f\"{video_path.stem}.wav\"\n logger.info(f\"\u5f00\u59cb\u5c06\u89c6\u9891\u8f6c\u6362\u4e3a\u97f3\u9891: {audio_path}\")\n is_success = video2audio(str(video_path), str(audio_path))\n\n if not is_success:\n logger.error(\"\u97f3\u9891\u8f6c\u6362\u5931\u8d25\")\n st.error(\"\u97f3\u9891\u8f6c\u6362\u5931\u8d25\")\n return\n\n logger.info(\"\u5f00\u59cbASR\u8bc6\u522b\")\n # \u4f7f\u7528BcutASR\u8fdb\u884c\u8bc6\u522b\n asr = BcutASR(str(audio_path))\n asr_data = asr.run()\n logger.info(\"ASR\u8bc6\u522b\u5b8c\u6210\")\n\n st.session_state.srt_content = asr_data.to_srt()\n st.session_state.asr_data = asr_data\n\n # \u4fdd\u5b58\u5b57\u5e55\u6587\u4ef6\n subtitle_path = temp_dir / f\"{video_path.stem}.srt\"\n logger.info(f\"\u4fdd\u5b58\u5b57\u5e55\u6587\u4ef6\u5230: {subtitle_path}\")\n with open(subtitle_path, \"w\", encoding=\"utf-8\") as f:\n f.write(st.session_state.srt_content)\n\n st.session_state.subtitle_path = str(subtitle_path)\n\n # \u4f7f\u7528\u4e4b\u524d\u521b\u5efa\u7684\u5bb9\u5668\u66f4\u65b0\u89c6\u9891\u663e\u793a\n video_placeholder.video(\n video_file, subtitles=st.session_state.subtitle_path\n )\n\n logger.info(\"\u5b57\u5e55\u8bc6\u522b\u5168\u6d41\u7a0b\u5b8c\u6210\")\n st.success(\"\u2728 \u8bc6\u522b\u5b8c\u6210\uff01\")\n\n # \u663e\u793a\u5b57\u5e55\u7edf\u8ba1\u4fe1\u606f\n if st.session_state.asr_data:\n st.markdown(\"### \ud83d\udcca \u5b57\u5e55\u7edf\u8ba1\")\n segments = st.session_state.asr_data.segments\n total_segments = len(segments)\n total_duration = sum(\n seg.end_time - seg.start_time for seg in segments\n )\n total_chars = sum(len(seg.text.strip()) for seg in segments)\n avg_segment_duration = (\n total_duration / total_segments\n if total_segments > 0\n else 0\n )\n\n col_stats1, col_stats2, col_stats3 = st.columns(3)\n with col_stats1:\n st.metric(\"\u5b57\u5e55\u6bb5\u843d\u6570\", f\"{total_segments} \u6bb5\")\n with col_stats2:\n st.metric(\"\u603b\u65f6\u957f\", format_duration(total_duration))\n with col_stats3:\n st.metric(\"\u603b\u5b57\u6570\", f\"{total_chars} \u5b57\")\n\n except Exception as e:\n logger.exception(f\"\u5904\u7406\u8fc7\u7a0b\u4e2d\u51fa\u73b0\u9519\u8bef: {str(e)}\")\n st.error(f\"\u5904\u7406\u8fc7\u7a0b\u4e2d\u51fa\u73b0\u9519\u8bef: {str(e)}\")\n finally:\n # \u6e05\u7406\u97f3\u9891\u6587\u4ef6\n if \"audio_path\" in locals() and audio_path.exists():\n logger.info(f\"\u6e05\u7406\u4e34\u65f6\u97f3\u9891\u6587\u4ef6: {audio_path}\")\n os.remove(audio_path)\n\n # \u5982\u679c\u6709\u5b57\u5e55\u5185\u5bb9\uff0c\u663e\u793a\u9884\u89c8\u548c\u4e0b\u8f7d\u533a\u57df\n if st.session_state.srt_content and st.session_state.asr_data:\n st.markdown(\"---\")\n # \u521b\u5efa\u5b57\u5e55\u9884\u89c8\u533a\u57df\n with st.expander(\"\ud83d\udcdd \u5b57\u5e55\u9884\u89c8\", expanded=True):\n # \u6dfb\u52a0\u641c\u7d22\u6846\u548c\u8fc7\u6ee4\u9009\u9879\n search_term = st.text_input(\n \"\ud83d\udd0d \u641c\u7d22\u5b57\u5e55\u5185\u5bb9\",\n key=\"subtitle_search\",\n placeholder=\"\u8f93\u5165\u5173\u952e\u8bcd\u8fdb\u884c\u641c\u7d22...\",\n )\n\n # \u5c06\u5b57\u5e55\u5185\u5bb9\u8f6c\u6362\u4e3aDataFrame\u683c\u5f0f\u663e\u793a\n segments = st.session_state.asr_data.segments\n df = pd.DataFrame(\n [\n {\n \"\u5e8f\u53f7\": i + 1,\n \"\u5f00\u59cb\u65f6\u95f4\": format_time(seg.start_time),\n \"\u7ed3\u675f\u65f6\u95f4\": format_time(seg.end_time),\n \"\u65f6\u957f(\u79d2)\": round(\n (seg.end_time - seg.start_time) / 1000, 1\n ),\n \"\u5b57\u5e55\u6587\u672c\": seg.text.strip(),\n }\n for i, seg in enumerate(segments)\n ]\n )\n\n # \u5e94\u7528\u8fc7\u6ee4\u6761\u4ef6\n if search_term:\n df = df[\n df[\"\u5b57\u5e55\u6587\u672c\"].str.contains(\n search_term, case=False, na=False\n )\n ]\n\n # \u4f7f\u7528\u81ea\u5b9a\u4e49\u6837\u5f0f\u663e\u793a\u6570\u636e\n st.dataframe(\n df,\n use_container_width=True,\n height=400,\n hide_index=True,\n column_config={\n \"\u5e8f\u53f7\": st.column_config.NumberColumn(\n \"\u5e8f\u53f7\", help=\"\u5b57\u5e55\u6bb5\u843d\u5e8f\u53f7\", format=\"%d\", width=\"small\"\n ),\n \"\u5f00\u59cb\u65f6\u95f4\": st.column_config.TextColumn(\n \"\u5f00\u59cb\u65f6\u95f4\", help=\"\u5b57\u5e55\u5f00\u59cb\u65f6\u95f4\", width=\"small\"\n ),\n \"\u7ed3\u675f\u65f6\u95f4\": st.column_config.TextColumn(\n \"\u7ed3\u675f\u65f6\u95f4\", help=\"\u5b57\u5e55\u7ed3\u675f\u65f6\u95f4\", width=\"small\"\n ),\n \"\u65f6\u957f(\u79d2)\": st.column_config.NumberColumn(\n \"\u65f6\u957f(\u79d2)\",\n help=\"\u5b57\u5e55\u6301\u7eed\u65f6\u95f4\",\n format=\"%.1f\",\n width=\"small\",\n ),\n \"\u5b57\u5e55\u6587\u672c\": st.column_config.TextColumn(\n \"\u5b57\u5e55\u6587\u672c\", help=\"\u8bc6\u522b\u51fa\u7684\u5b57\u5e55\u5185\u5bb9\", width=\"medium\"\n ),\n },\n )\n\n # \u4e0b\u8f7d\u6309\u94ae\u533a\u57df\n st.markdown(\"### \ud83d\udcbe \u5bfc\u51fa\u5b57\u5e55\")\n st.download_button(\n label=\"\ud83d\udce5 \u4e0b\u8f7d SRT \u5b57\u5e55\u6587\u4ef6\",\n data=st.session_state.srt_content,\n file_name=f\"{video_file.name.rsplit('.', 1)[0]}.srt\",\n mime=\"text/plain\",\n use_container_width=True,\n )", "creation_date": "2025-01-08T18:32:33Z", "repo": "WEIFENG2333/VideoCaptioner", "file_path": "streamlit_app.py", "stars": 8310, "label": 0} +{"function": "def translation_page():\n st.title(\"\ud83c\udf0f \u5b57\u5e55\u7ffb\u8bd1\")\n st.markdown(\"---\")\n\n # \u521d\u59cb\u5316session state\n if \"translated_content\" not in st.session_state:\n st.session_state.translated_content = None\n if \"current_subtitle_file\" not in st.session_state:\n st.session_state.current_subtitle_file = None\n if \"translation_done\" not in st.session_state:\n st.session_state.translation_done = False\n\n temp_dir = create_temp_dir()\n\n # \u4f7f\u7528\u5bb9\u5668\u5e03\u5c40\n with st.container():\n subtitle_file = st.file_uploader(\n label=\"\u4e0a\u4f20\u5b57\u5e55\u6587\u4ef6\",\n type=[\"srt\", \"ass\", \"vtt\"],\n key=\"trans_subtitle\",\n label_visibility=\"visible\",\n help=\"\u652f\u6301 SRT\u3001ASS\u3001VTT \u683c\u5f0f\u7684\u5b57\u5e55\u6587\u4ef6\",\n )\n\n target_language = st.selectbox(\n \"\u9009\u62e9\u8981\u7ffb\u8bd1\u6210\u7684\u76ee\u6807\u8bed\u8a00\",\n [\n \"\u82f1\u8bed\",\n \"\u7b80\u4f53\u4e2d\u6587\",\n \"\u7e41\u4f53\u4e2d\u6587\",\n \"\u65e5\u672c\u8a9e\",\n \"\u97e9\u8bed\",\n \"\u7ca4\u8bed\",\n \"\u6cd5\u8bed\",\n \"\u5fb7\u8bed\",\n \"\u897f\u73ed\u7259\u8bed\",\n \"\u4fc4\u8bed\",\n \"\u8461\u8404\u7259\u8bed\",\n \"\u571f\u8033\u5176\u8bed\",\n ],\n index=0,\n help=\"\u9009\u62e9\u8981\u5c06\u5b57\u5e55\u7ffb\u8bd1\u6210\u7684\u76ee\u6807\u8bed\u8a00\",\n )\n\n # \u5982\u679c\u4e0a\u4f20\u4e86\u65b0\u6587\u4ef6\uff0c\u6e05\u7406\u65e7\u6587\u4ef6\u548c\u72b6\u6001\n if (\n subtitle_file is not None\n and subtitle_file != st.session_state.current_subtitle_file\n ):\n if st.session_state.current_subtitle_file:\n old_path = temp_dir / st.session_state.current_subtitle_file.name\n if os.path.exists(old_path):\n os.remove(old_path)\n st.session_state.current_subtitle_file = subtitle_file\n st.session_state.translation_done = False\n st.session_state.translated_content = None\n st.session_state.translated_asr_data = None\n\n if subtitle_file is not None:\n subtitle_path = temp_dir / subtitle_file.name\n with open(subtitle_path, \"wb\") as f:\n f.write(subtitle_file.getbuffer())\n\n # \u663e\u793a\u539f\u59cb\u5b57\u5e55\u9884\u89c8\n with st.expander(\"\u539f\u59cb\u5b57\u5e55\u9884\u89c8\"):\n asr_data = ASRData.from_subtitle_file(str(subtitle_path))\n st.session_state.asr_data = asr_data\n subtitle_json = st.session_state.asr_data.to_json()\n df = pd.DataFrame(\n [\n {\n \"\u5f00\u59cb\u65f6\u95f4\": format_time(v[\"start_time\"]),\n \"\u7ed3\u675f\u65f6\u95f4\": format_time(v[\"end_time\"]),\n \"\u539f\u6587\": v[\"original_subtitle\"],\n \"\u8bd1\u6587\": v[\"translated_subtitle\"],\n }\n for k, v in subtitle_json.items()\n ]\n )\n\n st.dataframe(df, use_container_width=True)\n\n # \u5f00\u59cb\u7ffb\u8bd1\u6309\u94ae\n if st.button(\"\u5f00\u59cb\u7ffb\u8bd1\", use_container_width=True):\n with st.spinner(\"\u6b63\u5728\u7ffb\u8bd1\u4e2d...\"):\n try:\n logger.info(f\"\u5f00\u59cb\u7ffb\u8bd1\u5b57\u5e55\u6587\u4ef6: {subtitle_file.name}\")\n # \u8bfb\u53d6\u5b57\u5e55\u6587\u4ef6\n asr_data = ASRData.from_subtitle_file(str(subtitle_path))\n\n logger.info(f\"\u76ee\u6807\u8bed\u8a00: {target_language}\")\n # \u521b\u5efa\u4f18\u5316\u5668\u5b9e\u4f8b\uff08\u7528\u4e8e\u7ffb\u8bd1\uff09\n translator = TranslatorFactory.create_translator(\n translator_type=TranslatorType.BING,\n target_language=target_language,\n )\n\n # \u51c6\u5907\u5b57\u5e55\u6570\u636e\n subtitle_json = {\n str(k): v[\"original_subtitle\"]\n for k, v in asr_data.to_json().items()\n }\n logger.info(f\"\u5f85\u7ffb\u8bd1\u5b57\u5e55\u6bb5\u843d\u6570: {len(subtitle_json)}\")\n\n # \u6267\u884c\u7ffb\u8bd1\n logger.info(\"\u5f00\u59cb\u591a\u7ebf\u7a0b\u7ffb\u8bd1\")\n asr_data = translator.translate_subtitle(asr_data)\n logger.info(\"\u7ffb\u8bd1\u5b8c\u6210\")\n\n # \u4fdd\u5b58\u7ffb\u8bd1\u540e\u7684\u5b57\u5e55\n st.session_state.translated_content = asr_data.to_srt()\n st.session_state.translated_asr_data = asr_data\n st.session_state.translation_done = True\n\n logger.info(\"\u5b57\u5e55\u7ffb\u8bd1\u5168\u6d41\u7a0b\u5b8c\u6210\")\n st.success(\"\u7ffb\u8bd1\u5b8c\u6210\uff01\")\n\n except Exception as e:\n logger.exception(f\"\u7ffb\u8bd1\u8fc7\u7a0b\u4e2d\u51fa\u73b0\u9519\u8bef: {str(e)}\")\n st.error(f\"\u7ffb\u8bd1\u8fc7\u7a0b\u4e2d\u51fa\u73b0\u9519\u8bef: {str(e)}\")\n\n # \u5982\u679c\u7ffb\u8bd1\u5b8c\u6210\uff0c\u663e\u793a\u7ed3\u679c\u548c\u4e0b\u8f7d\u6309\u94ae\n if (\n st.session_state.translation_done\n and st.session_state.translated_asr_data is not None\n ):\n # \u663e\u793a\u7ffb\u8bd1\u540e\u7684\u9884\u89c8\n st.subheader(\"\u7ffb\u8bd1\u7ed3\u679c\u9884\u89c8\")\n subtitle_json = st.session_state.translated_asr_data.to_json()\n df = pd.DataFrame(\n [\n {\n \"\u5f00\u59cb\u65f6\u95f4\": format_time(v[\"start_time\"]),\n \"\u7ed3\u675f\u65f6\u95f4\": format_time(v[\"end_time\"]),\n \"\u539f\u6587\": v[\"original_subtitle\"],\n \"\u8bd1\u6587\": v[\"translated_subtitle\"],\n }\n for k, v in subtitle_json.items()\n ]\n )\n\n st.dataframe(df, use_container_width=True)\n\n # \u63d0\u4f9b\u4e0b\u8f7d\u6309\u94ae\n st.download_button(\n label=\"\u4e0b\u8f7d\u7ffb\u8bd1\u540e\u7684\u5b57\u5e55\",\n data=st.session_state.translated_content,\n file_name=f\"translated_{subtitle_file.name}\",\n mime=\"text/plain\",\n use_container_width=True,\n )", "creation_date": "2025-01-08T18:32:33Z", "repo": "WEIFENG2333/VideoCaptioner", "file_path": "streamlit_app.py", "stars": 8310, "label": 0} +{"function": "def main():\n logger.info(\"\u5e94\u7528\u7a0b\u5e8f\u542f\u52a8\")\n # \u4fa7\u8fb9\u680f\u8bbe\u8ba1\n st.sidebar.markdown(\n \"\"\"\n # \ud83c\udfa5 \u5361\u5361\u5b57\u5e55\u52a9\u624b\n ---\n ### \ud83d\udee0\ufe0f \u529f\u80fd\u5217\u8868\n \"\"\"\n )\n\n # \u521b\u5efa\u7f8e\u5316\u540e\u7684\u5bfc\u822a\u9009\u9879\n page = st.sidebar.radio(\"\", options=[\"\ud83c\udfaf ASR \u5b57\u5e55\u8bc6\u522b\", \"\ud83c\udf0f \u5b57\u5e55\u7ffb\u8bd1\"], index=0)\n\n logger.info(f\"\u7528\u6237\u9009\u62e9\u9875\u9762: {page}\")\n # \u6839\u636e\u9009\u62e9\u663e\u793a\u4e0d\u540c\u7684\u9875\u9762\n if \"ASR\" in page:\n asr_page()\n else:\n translation_page()", "creation_date": "2025-01-08T18:32:33Z", "repo": "WEIFENG2333/VideoCaptioner", "file_path": "streamlit_app.py", "stars": 8310, "label": 0} +{"function": " def __init__(self, parent=None):\n super().__init__(parent=parent)\n self.setObjectName(\"batchProcessInterface\")\n self.setWindowTitle(self.tr(\"\u6279\u91cf\u5904\u7406\"))\n self.setAcceptDrops(True)\n self.batch_thread = BatchProcessThread()\n\n self.init_ui()\n self.setup_connections()", "creation_date": "2024-10-30T00:56:24Z", "repo": "WEIFENG2333/VideoCaptioner", "file_path": "app/view/batch_process_interface.py", "stars": 8310, "label": 0} +{"function": " def init_ui(self):\n # \u521b\u5efa\u4e3b\u5e03\u5c40\n main_layout = QVBoxLayout(self)\n main_layout.setContentsMargins(16, 16, 16, 16)\n main_layout.setSpacing(8)\n\n # \u9876\u90e8\u63a7\u5236\u533a\u57df\n top_layout = QHBoxLayout()\n top_layout.setSpacing(8)\n\n # \u4efb\u52a1\u7c7b\u578b\u9009\u62e9\n self.task_type_combo = ComboBox()\n self.task_type_combo.addItems([str(task_type) for task_type in BatchTaskType])\n self.task_type_combo.setCurrentText(str(BatchTaskType.FULL_PROCESS))\n\n # \u63a7\u5236\u6309\u94ae\n self.add_file_btn = PushButton(\"\u6dfb\u52a0\u6587\u4ef6\", icon=FIF.ADD)\n self.start_all_btn = PushButton(\"\u5f00\u59cb\u5904\u7406\", icon=FIF.PLAY)\n self.clear_btn = PushButton(\"\u6e05\u7a7a\u5217\u8868\", icon=FIF.DELETE)\n\n # \u6dfb\u52a0\u5230\u9876\u90e8\u5e03\u5c40\n top_layout.addWidget(self.task_type_combo)\n top_layout.addWidget(self.add_file_btn)\n top_layout.addWidget(self.clear_btn)\n\n top_layout.addStretch()\n top_layout.addWidget(self.start_all_btn)\n\n # \u521b\u5efa\u4efb\u52a1\u8868\u683c\n self.task_table = TableWidget()\n self.task_table.setColumnCount(3)\n self.task_table.setHorizontalHeaderLabels([\"\u6587\u4ef6\u540d\", \"\u8fdb\u5ea6\", \"\u72b6\u6001\"])\n\n # \u8bbe\u7f6e\u8868\u683c\u6837\u5f0f\n self.task_table.horizontalHeader().setSectionResizeMode(0, QHeaderView.Stretch)\n self.task_table.horizontalHeader().setSectionResizeMode(1, QHeaderView.Fixed)\n self.task_table.horizontalHeader().setSectionResizeMode(2, QHeaderView.Fixed)\n self.task_table.setColumnWidth(1, 250) # \u8fdb\u5ea6\u6761\u5217\u5bbd\n self.task_table.setColumnWidth(2, 160) # \u72b6\u6001\u5217\u5bbd\n\n # \u8bbe\u7f6e\u884c\u9ad8\n self.task_table.verticalHeader().setDefaultSectionSize(40) # \u8bbe\u7f6e\u9ed8\u8ba4\u884c\u9ad8\n\n # \u8bbe\u7f6e\u8868\u683c\u8fb9\u6846\n self.task_table.setBorderVisible(True)\n self.task_table.setBorderRadius(12)\n\n # \u8bbe\u7f6e\u8868\u683c\u4e0d\u53ef\u7f16\u8f91\n self.task_table.setEditTriggers(QTableWidget.NoEditTriggers)\n\n # \u8bbe\u7f6e\u8868\u683c\u5927\u5c0f\u7b56\u7565\n self.task_table.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)\n self.task_table.setMinimumHeight(300) # \u8bbe\u7f6e\u6700\u5c0f\u9ad8\u5ea6\n\n # \u8fde\u63a5\u53cc\u51fb\u4fe1\u53f7\n self.task_table.doubleClicked.connect(self.on_table_double_clicked)\n\n # \u6dfb\u52a0\u5230\u4e3b\u5e03\u5c40\n main_layout.addLayout(top_layout)\n main_layout.addWidget(self.task_table)\n\n # \u8fde\u63a5\u4fe1\u53f7\n self.add_file_btn.clicked.connect(self.on_add_file_clicked)\n self.start_all_btn.clicked.connect(self.start_all_tasks)\n self.clear_btn.clicked.connect(self.clear_tasks)\n self.task_type_combo.currentTextChanged.connect(self.on_task_type_changed)", "creation_date": "2024-10-30T00:56:24Z", "repo": "WEIFENG2333/VideoCaptioner", "file_path": "app/view/batch_process_interface.py", "stars": 8310, "label": 0} +{"function": " def setup_connections(self):\n # \u6279\u5904\u7406\u7ebf\u7a0b\u4fe1\u53f7\u8fde\u63a5\n self.batch_thread.task_progress.connect(self.update_task_progress)\n self.batch_thread.task_error.connect(self.on_task_error)\n self.batch_thread.task_completed.connect(self.on_task_completed)\n\n # \u8868\u683c\u53f3\u952e\u83dc\u5355\n self.task_table.setContextMenuPolicy(Qt.CustomContextMenu)\n self.task_table.customContextMenuRequested.connect(self.show_context_menu)", "creation_date": "2024-10-30T00:56:24Z", "repo": "WEIFENG2333/VideoCaptioner", "file_path": "app/view/batch_process_interface.py", "stars": 8310, "label": 0} +{"function": "def get_configs_dir() -> Path:\n return (Path(__file__).parent.parent / \"configs\").resolve()", "creation_date": "2024-10-23T18:47:47Z", "repo": "oumi-ai/oumi", "file_path": "tests/__init__.py", "stars": 8273, "label": 0} +{"function": "def get_testdata_dir() -> Path:\n return (Path(__file__).parent / \"testdata\").resolve()", "creation_date": "2024-10-23T18:47:47Z", "repo": "oumi-ai/oumi", "file_path": "tests/__init__.py", "stars": 8273, "label": 0} +{"function": "def get_notebooks_dir() -> Path:\n return (Path(__file__).parent.parent / \"notebooks\").resolve()", "creation_date": "2024-10-23T18:47:47Z", "repo": "oumi-ai/oumi", "file_path": "tests/__init__.py", "stars": 8273, "label": 0} +{"function": "def root_testdata_dir() -> Path:\n return Path(__file__).parent / \"testdata\"", "creation_date": "2024-10-22T18:39:26Z", "repo": "oumi-ai/oumi", "file_path": "tests/conftest.py", "stars": 8273, "label": 0} +{"function": "def setup_logging():\n \"\"\"Fixture to set up logging for all tests.\n\n We want to propagate to the root logger so that\n pytest caplog can capture logs, and we can test\n logging for the default oumi logger.\n \"\"\"\n logger = get_logger(\"oumi\")\n logger.propagate = True\n return logger", "creation_date": "2024-10-22T18:39:26Z", "repo": "oumi-ai/oumi", "file_path": "tests/conftest.py", "stars": 8273, "label": 0} +{"function": "def retain_logging_level():\n \"\"\"Fixture to preserve the logging level between tests.\"\"\"\n logger = get_logger(\"oumi\")\n # Store the current log level\n log_level = logger.level\n yield\n # Rehydrate the log level\n logger.setLevel(log_level)", "creation_date": "2024-10-22T18:39:26Z", "repo": "oumi-ai/oumi", "file_path": "tests/conftest.py", "stars": 8273, "label": 0} +{"function": "def single_turn_conversation():\n return Conversation(\n messages=[\n Message(role=Role.USER, content=\"Hello\"),\n Message(role=Role.ASSISTANT, content=\"Hi there!\"),\n ]\n )", "creation_date": "2024-10-22T18:39:26Z", "repo": "oumi-ai/oumi", "file_path": "tests/conftest.py", "stars": 8273, "label": 0} +{"function": "def requires_gpus(count: int = 1, min_gb: float = 0.0) -> pytest.MarkDecorator:\n \"\"\"Decorator to skip a test if the required number of GPUs is not available.\n\n Args:\n count (int): The number of GPUs required for the test. Defaults to 1.\n min_gb: Min required GPU VRAM in GB-s. Has no effect if zero or negative.\n\n Returns:\n pytest.MarkDecorator: A decorator that skips the test if the required\n number of GPUs is not available.\n \"\"\"\n\n if not torch.cuda.is_available():\n return pytest.mark.skip(reason=\"CUDA is not available\")\n\n gpu_count = torch.cuda.device_count()\n\n error_message = \"\"\n if gpu_count < count:\n error_message = (\n f\"Not enough GPUs to run the test: requires '{count}',\"\n f\" got '{torch.cuda.device_count()}'\"\n )\n elif min_gb > 0.0:\n eps = 1e-2 # relative tolerance\n for device_idx in range(gpu_count):\n _, total_memory = torch.cuda.mem_get_info(device_idx)\n total_memory_gb = float(total_memory) / float(1024 * 1024 * 1024)\n if total_memory_gb < min_gb * (1 - eps):\n device_name = torch.cuda.get_device_name(device_idx)\n error_message = (\n \"Not enough GPU memory to run the test: \"\n f\"requires {min_gb:.3f}GB, got {total_memory_gb:.3f}GB. \"\n f\"GPU: {device_name}\"\n ) + (f\" ({device_idx + 1} of {gpu_count})\" if gpu_count > 1 else \"\")\n\n return pytest.mark.skipif(len(error_message) > 0, reason=error_message)", "creation_date": "2024-10-23T18:47:47Z", "repo": "oumi-ai/oumi", "file_path": "tests/markers.py", "stars": 8273, "label": 0} +{"function": "def requires_cuda_initialized() -> pytest.MarkDecorator:\n if not torch.cuda.is_available():\n return pytest.mark.skip(reason=\"CUDA is not available\")\n\n if not torch.cuda.is_initialized():\n torch.cuda.init()\n\n return pytest.mark.skipif(\n not torch.cuda.is_initialized(), reason=\"CUDA is not initialized\"\n )", "creation_date": "2024-10-23T18:47:47Z", "repo": "oumi-ai/oumi", "file_path": "tests/markers.py", "stars": 8273, "label": 0} +{"function": "def requires_cuda_not_available() -> pytest.MarkDecorator:\n return pytest.mark.skipif(torch.cuda.is_available(), reason=\"CUDA is available\")", "creation_date": "2024-10-23T18:47:47Z", "repo": "oumi-ai/oumi", "file_path": "tests/markers.py", "stars": 8273, "label": 0} +{"function": "def test_encoder_initialization():\n \"\"\"Test encoder initialization\"\"\"\n encoder = MemvidEncoder()\n assert encoder.chunks == []\n assert encoder.index_manager is not None", "creation_date": "2025-05-27T16:02:35Z", "repo": "Olow304/memvid", "file_path": "tests/test_encoder.py", "stars": 8174, "label": 0} +{"function": "def test_add_chunks():\n \"\"\"Test adding chunks\"\"\"\n encoder = MemvidEncoder()\n chunks = [\"chunk1\", \"chunk2\", \"chunk3\"]\n \n encoder.add_chunks(chunks)\n assert len(encoder.chunks) == 3\n assert encoder.chunks == chunks", "creation_date": "2025-05-27T16:02:35Z", "repo": "Olow304/memvid", "file_path": "tests/test_encoder.py", "stars": 8174, "label": 0} +{"function": "def test_add_text():\n \"\"\"Test adding text with auto-chunking\"\"\"\n encoder = MemvidEncoder()\n text = \"This is a test. \" * 50 # 800 characters\n \n encoder.add_text(text, chunk_size=100, overlap=20)\n assert len(encoder.chunks) > 1\n assert all(chunk for chunk in encoder.chunks) # No empty chunks", "creation_date": "2025-05-27T16:02:35Z", "repo": "Olow304/memvid", "file_path": "tests/test_encoder.py", "stars": 8174, "label": 0} +{"function": "def test_build_video():\n \"\"\"Test video building (integration test)\"\"\"\n encoder = MemvidEncoder()\n chunks = [\n \"Test chunk 1: Important information\",\n \"Test chunk 2: More data here\",\n \"Test chunk 3: Final piece of info\"\n ]\n encoder.add_chunks(chunks)\n \n with tempfile.TemporaryDirectory() as temp_dir:\n video_file = os.path.join(temp_dir, \"test.mp4\")\n index_file = os.path.join(temp_dir, \"test_index.json\")\n \n # Build video\n stats = encoder.build_video(video_file, index_file, show_progress=False)\n \n # Check files exist\n assert os.path.exists(video_file)\n assert os.path.exists(index_file)\n assert os.path.exists(index_file.replace('.json', '.faiss'))\n \n # Check stats\n assert stats[\"total_chunks\"] == 3\n assert stats[\"total_frames\"] == 3\n assert stats[\"video_size_mb\"] > 0\n assert stats[\"duration_seconds\"] > 0", "creation_date": "2025-05-27T16:02:35Z", "repo": "Olow304/memvid", "file_path": "tests/test_encoder.py", "stars": 8174, "label": 0} +{"function": "def test_encoder_stats():\n \"\"\"Test encoder statistics\"\"\"\n encoder = MemvidEncoder()\n chunks = [\"short\", \"medium length chunk\", \"this is a longer chunk with more text\"]\n encoder.add_chunks(chunks)\n \n stats = encoder.get_stats()\n assert stats[\"total_chunks\"] == 3\n assert stats[\"total_characters\"] == sum(len(c) for c in chunks)\n assert stats[\"avg_chunk_size\"] > 0", "creation_date": "2025-05-27T16:02:35Z", "repo": "Olow304/memvid", "file_path": "tests/test_encoder.py", "stars": 8174, "label": 0} +{"function": "def test_clear():\n \"\"\"Test clearing encoder\"\"\"\n encoder = MemvidEncoder()\n encoder.add_chunks([\"test1\", \"test2\"])\n \n encoder.clear()\n assert encoder.chunks == []\n assert encoder.get_stats()[\"total_chunks\"] == 0", "creation_date": "2025-05-27T16:02:35Z", "repo": "Olow304/memvid", "file_path": "tests/test_encoder.py", "stars": 8174, "label": 0} +{"function": "def setup_test_memory():\n \"\"\"Create test video and index\"\"\"\n encoder = MemvidEncoder()\n chunks = [\n \"Quantum computing uses qubits for parallel processing\",\n \"Machine learning models require large datasets\",\n \"Neural networks mimic brain structure\",\n \"Cloud computing provides scalable resources\",\n \"Blockchain ensures data immutability\"\n ]\n encoder.add_chunks(chunks)\n \n with tempfile.TemporaryDirectory() as temp_dir:\n video_file = os.path.join(temp_dir, \"test.mp4\")\n index_file = os.path.join(temp_dir, \"test_index.json\")\n \n encoder.build_video(video_file, index_file, show_progress=False)\n \n yield video_file, index_file, chunks", "creation_date": "2025-05-27T16:02:35Z", "repo": "Olow304/memvid", "file_path": "tests/test_retriever.py", "stars": 8174, "label": 0} +{"function": "def test_retriever_initialization(setup_test_memory):\n \"\"\"Test retriever initialization\"\"\"\n video_file, index_file, chunks = setup_test_memory\n \n retriever = MemvidRetriever(video_file, index_file)\n assert retriever.video_file == video_file\n assert retriever.total_frames == len(chunks)", "creation_date": "2025-05-27T16:02:35Z", "repo": "Olow304/memvid", "file_path": "tests/test_retriever.py", "stars": 8174, "label": 0} +{"function": "def test_search(setup_test_memory):\n \"\"\"Test semantic search\"\"\"\n video_file, index_file, chunks = setup_test_memory\n retriever = MemvidRetriever(video_file, index_file)\n \n # Search for quantum\n results = retriever.search(\"quantum physics\", top_k=3)\n assert len(results) <= 3\n assert any(\"quantum\" in result.lower() for result in results)\n \n # Search for AI\n results = retriever.search(\"artificial intelligence\", top_k=3)\n assert len(results) <= 3\n assert any(\"neural\" in result.lower() or \"machine\" in result.lower() for result in results)", "creation_date": "2025-05-27T16:02:35Z", "repo": "Olow304/memvid", "file_path": "tests/test_retriever.py", "stars": 8174, "label": 0} +{"function": "def test_search_with_metadata(setup_test_memory):\n \"\"\"Test search with metadata\"\"\"\n video_file, index_file, chunks = setup_test_memory\n retriever = MemvidRetriever(video_file, index_file)\n \n results = retriever.search_with_metadata(\"blockchain\", top_k=2)\n assert len(results) <= 2\n \n if results:\n result = results[0]\n assert \"text\" in result\n assert \"score\" in result\n assert \"chunk_id\" in result\n assert \"frame\" in result\n assert result[\"score\"] > 0", "creation_date": "2025-05-27T16:02:35Z", "repo": "Olow304/memvid", "file_path": "tests/test_retriever.py", "stars": 8174, "label": 0} +{"function": "def _upsample_like(src, tar):\n src = F.interpolate(src, size=tar.shape[2:], mode=\"bilinear\")\n return src", "creation_date": "2024-05-07T14:24:40Z", "repo": "lllyasviel/IC-Light", "file_path": "briarmbg.py", "stars": 8132, "label": 0} +{"function": " def __init__(self, in_ch=3, out_ch=3, dirate=1, stride=1):\n super(REBNCONV, self).__init__()\n\n self.conv_s1 = nn.Conv2d(\n in_ch, out_ch, 3, padding=1 * dirate, dilation=1 * dirate, stride=stride\n )\n self.bn_s1 = nn.BatchNorm2d(out_ch)\n self.relu_s1 = nn.ReLU(inplace=True)", "creation_date": "2024-05-07T14:24:40Z", "repo": "lllyasviel/IC-Light", "file_path": "briarmbg.py", "stars": 8132, "label": 0} +{"function": " def forward(self, x):\n hx = x\n xout = self.relu_s1(self.bn_s1(self.conv_s1(hx)))\n\n return xout", "creation_date": "2024-05-07T14:24:40Z", "repo": "lllyasviel/IC-Light", "file_path": "briarmbg.py", "stars": 8132, "label": 0} +{"function": " def __init__(self, in_ch=3, mid_ch=12, out_ch=3, img_size=512):\n super(RSU7, self).__init__()\n\n self.in_ch = in_ch\n self.mid_ch = mid_ch\n self.out_ch = out_ch\n\n self.rebnconvin = REBNCONV(in_ch, out_ch, dirate=1) ## 1 -> 1/2\n\n self.rebnconv1 = REBNCONV(out_ch, mid_ch, dirate=1)\n self.pool1 = nn.MaxPool2d(2, stride=2, ceil_mode=True)\n\n self.rebnconv2 = REBNCONV(mid_ch, mid_ch, dirate=1)\n self.pool2 = nn.MaxPool2d(2, stride=2, ceil_mode=True)\n\n self.rebnconv3 = REBNCONV(mid_ch, mid_ch, dirate=1)\n self.pool3 = nn.MaxPool2d(2, stride=2, ceil_mode=True)\n\n self.rebnconv4 = REBNCONV(mid_ch, mid_ch, dirate=1)\n self.pool4 = nn.MaxPool2d(2, stride=2, ceil_mode=True)\n\n self.rebnconv5 = REBNCONV(mid_ch, mid_ch, dirate=1)\n self.pool5 = nn.MaxPool2d(2, stride=2, ceil_mode=True)\n\n self.rebnconv6 = REBNCONV(mid_ch, mid_ch, dirate=1)\n\n self.rebnconv7 = REBNCONV(mid_ch, mid_ch, dirate=2)\n\n self.rebnconv6d = REBNCONV(mid_ch * 2, mid_ch, dirate=1)\n self.rebnconv5d = REBNCONV(mid_ch * 2, mid_ch, dirate=1)\n self.rebnconv4d = REBNCONV(mid_ch * 2, mid_ch, dirate=1)\n self.rebnconv3d = REBNCONV(mid_ch * 2, mid_ch, dirate=1)\n self.rebnconv2d = REBNCONV(mid_ch * 2, mid_ch, dirate=1)\n self.rebnconv1d = REBNCONV(mid_ch * 2, out_ch, dirate=1)", "creation_date": "2024-05-07T14:24:40Z", "repo": "lllyasviel/IC-Light", "file_path": "briarmbg.py", "stars": 8132, "label": 0} +{"function": " def forward(self, x):\n b, c, h, w = x.shape\n\n hx = x\n hxin = self.rebnconvin(hx)\n\n hx1 = self.rebnconv1(hxin)\n hx = self.pool1(hx1)\n\n hx2 = self.rebnconv2(hx)\n hx = self.pool2(hx2)\n\n hx3 = self.rebnconv3(hx)\n hx = self.pool3(hx3)\n\n hx4 = self.rebnconv4(hx)\n hx = self.pool4(hx4)\n\n hx5 = self.rebnconv5(hx)\n hx = self.pool5(hx5)\n\n hx6 = self.rebnconv6(hx)\n\n hx7 = self.rebnconv7(hx6)\n\n hx6d = self.rebnconv6d(torch.cat((hx7, hx6), 1))\n hx6dup = _upsample_like(hx6d, hx5)\n\n hx5d = self.rebnconv5d(torch.cat((hx6dup, hx5), 1))\n hx5dup = _upsample_like(hx5d, hx4)\n\n hx4d = self.rebnconv4d(torch.cat((hx5dup, hx4), 1))\n hx4dup = _upsample_like(hx4d, hx3)\n\n hx3d = self.rebnconv3d(torch.cat((hx4dup, hx3), 1))\n hx3dup = _upsample_like(hx3d, hx2)\n\n hx2d = self.rebnconv2d(torch.cat((hx3dup, hx2), 1))\n hx2dup = _upsample_like(hx2d, hx1)\n\n hx1d = self.rebnconv1d(torch.cat((hx2dup, hx1), 1))\n\n return hx1d + hxin", "creation_date": "2024-05-07T14:24:40Z", "repo": "lllyasviel/IC-Light", "file_path": "briarmbg.py", "stars": 8132, "label": 0} +{"function": " def __init__(self, in_ch=3, mid_ch=12, out_ch=3):\n super(RSU6, self).__init__()\n\n self.rebnconvin = REBNCONV(in_ch, out_ch, dirate=1)\n\n self.rebnconv1 = REBNCONV(out_ch, mid_ch, dirate=1)\n self.pool1 = nn.MaxPool2d(2, stride=2, ceil_mode=True)\n\n self.rebnconv2 = REBNCONV(mid_ch, mid_ch, dirate=1)\n self.pool2 = nn.MaxPool2d(2, stride=2, ceil_mode=True)\n\n self.rebnconv3 = REBNCONV(mid_ch, mid_ch, dirate=1)\n self.pool3 = nn.MaxPool2d(2, stride=2, ceil_mode=True)\n\n self.rebnconv4 = REBNCONV(mid_ch, mid_ch, dirate=1)\n self.pool4 = nn.MaxPool2d(2, stride=2, ceil_mode=True)\n\n self.rebnconv5 = REBNCONV(mid_ch, mid_ch, dirate=1)\n\n self.rebnconv6 = REBNCONV(mid_ch, mid_ch, dirate=2)\n\n self.rebnconv5d = REBNCONV(mid_ch * 2, mid_ch, dirate=1)\n self.rebnconv4d = REBNCONV(mid_ch * 2, mid_ch, dirate=1)\n self.rebnconv3d = REBNCONV(mid_ch * 2, mid_ch, dirate=1)\n self.rebnconv2d = REBNCONV(mid_ch * 2, mid_ch, dirate=1)\n self.rebnconv1d = REBNCONV(mid_ch * 2, out_ch, dirate=1)", "creation_date": "2024-05-07T14:24:40Z", "repo": "lllyasviel/IC-Light", "file_path": "briarmbg.py", "stars": 8132, "label": 0} +{"function": " def forward(self, x):\n hx = x\n\n hxin = self.rebnconvin(hx)\n\n hx1 = self.rebnconv1(hxin)\n hx = self.pool1(hx1)\n\n hx2 = self.rebnconv2(hx)\n hx = self.pool2(hx2)\n\n hx3 = self.rebnconv3(hx)\n hx = self.pool3(hx3)\n\n hx4 = self.rebnconv4(hx)\n hx = self.pool4(hx4)\n\n hx5 = self.rebnconv5(hx)\n\n hx6 = self.rebnconv6(hx5)\n\n hx5d = self.rebnconv5d(torch.cat((hx6, hx5), 1))\n hx5dup = _upsample_like(hx5d, hx4)\n\n hx4d = self.rebnconv4d(torch.cat((hx5dup, hx4), 1))\n hx4dup = _upsample_like(hx4d, hx3)\n\n hx3d = self.rebnconv3d(torch.cat((hx4dup, hx3), 1))\n hx3dup = _upsample_like(hx3d, hx2)\n\n hx2d = self.rebnconv2d(torch.cat((hx3dup, hx2), 1))\n hx2dup = _upsample_like(hx2d, hx1)\n\n hx1d = self.rebnconv1d(torch.cat((hx2dup, hx1), 1))\n\n return hx1d + hxin", "creation_date": "2024-05-07T14:24:40Z", "repo": "lllyasviel/IC-Light", "file_path": "briarmbg.py", "stars": 8132, "label": 0} +{"function": " def __init__(self, in_ch=3, mid_ch=12, out_ch=3):\n super(RSU5, self).__init__()\n\n self.rebnconvin = REBNCONV(in_ch, out_ch, dirate=1)\n\n self.rebnconv1 = REBNCONV(out_ch, mid_ch, dirate=1)\n self.pool1 = nn.MaxPool2d(2, stride=2, ceil_mode=True)\n\n self.rebnconv2 = REBNCONV(mid_ch, mid_ch, dirate=1)\n self.pool2 = nn.MaxPool2d(2, stride=2, ceil_mode=True)\n\n self.rebnconv3 = REBNCONV(mid_ch, mid_ch, dirate=1)\n self.pool3 = nn.MaxPool2d(2, stride=2, ceil_mode=True)\n\n self.rebnconv4 = REBNCONV(mid_ch, mid_ch, dirate=1)\n\n self.rebnconv5 = REBNCONV(mid_ch, mid_ch, dirate=2)\n\n self.rebnconv4d = REBNCONV(mid_ch * 2, mid_ch, dirate=1)\n self.rebnconv3d = REBNCONV(mid_ch * 2, mid_ch, dirate=1)\n self.rebnconv2d = REBNCONV(mid_ch * 2, mid_ch, dirate=1)\n self.rebnconv1d = REBNCONV(mid_ch * 2, out_ch, dirate=1)", "creation_date": "2024-05-07T14:24:40Z", "repo": "lllyasviel/IC-Light", "file_path": "briarmbg.py", "stars": 8132, "label": 0} +{"function": " def forward(self, x):\n hx = x\n\n hxin = self.rebnconvin(hx)\n\n hx1 = self.rebnconv1(hxin)\n hx = self.pool1(hx1)\n\n hx2 = self.rebnconv2(hx)\n hx = self.pool2(hx2)\n\n hx3 = self.rebnconv3(hx)\n hx = self.pool3(hx3)\n\n hx4 = self.rebnconv4(hx)\n\n hx5 = self.rebnconv5(hx4)\n\n hx4d = self.rebnconv4d(torch.cat((hx5, hx4), 1))\n hx4dup = _upsample_like(hx4d, hx3)\n\n hx3d = self.rebnconv3d(torch.cat((hx4dup, hx3), 1))\n hx3dup = _upsample_like(hx3d, hx2)\n\n hx2d = self.rebnconv2d(torch.cat((hx3dup, hx2), 1))\n hx2dup = _upsample_like(hx2d, hx1)\n\n hx1d = self.rebnconv1d(torch.cat((hx2dup, hx1), 1))\n\n return hx1d + hxin", "creation_date": "2024-05-07T14:24:40Z", "repo": "lllyasviel/IC-Light", "file_path": "briarmbg.py", "stars": 8132, "label": 0} +{"function": " def __init__(self, in_ch=3, mid_ch=12, out_ch=3):\n super(RSU4, self).__init__()\n\n self.rebnconvin = REBNCONV(in_ch, out_ch, dirate=1)\n\n self.rebnconv1 = REBNCONV(out_ch, mid_ch, dirate=1)\n self.pool1 = nn.MaxPool2d(2, stride=2, ceil_mode=True)\n\n self.rebnconv2 = REBNCONV(mid_ch, mid_ch, dirate=1)\n self.pool2 = nn.MaxPool2d(2, stride=2, ceil_mode=True)\n\n self.rebnconv3 = REBNCONV(mid_ch, mid_ch, dirate=1)\n\n self.rebnconv4 = REBNCONV(mid_ch, mid_ch, dirate=2)\n\n self.rebnconv3d = REBNCONV(mid_ch * 2, mid_ch, dirate=1)\n self.rebnconv2d = REBNCONV(mid_ch * 2, mid_ch, dirate=1)\n self.rebnconv1d = REBNCONV(mid_ch * 2, out_ch, dirate=1)", "creation_date": "2024-05-07T14:24:40Z", "repo": "lllyasviel/IC-Light", "file_path": "briarmbg.py", "stars": 8132, "label": 0} +{"function": "def parse_args():\n parser = argparse.ArgumentParser(description=\"Run a task with a given configuration file.\")\n parser.add_argument('--config', type=str, required=True, help='Path to the configuration file.')\n return parser.parse_args()", "creation_date": "2024-10-04T13:22:18Z", "repo": "opendatalab/PDF-Extract-Kit", "file_path": "scripts/formula_detection.py", "stars": 8109, "label": 0} +{"function": "def main(config_path):\n config = load_config(config_path)\n task_instances = initialize_tasks_and_models(config)\n\n # get input and output path from config\n input_data = config.get('inputs', None)\n result_path = config.get('outputs', 'outputs'+'/'+TASK_NAME)\n\n # formula_detection_task\n model_formula_detection = task_instances[TASK_NAME]\n\n # for image detection\n detection_results = model_formula_detection.predict_images(input_data, result_path)\n\n # for pdf detection\n # detection_results = model_formula_detection.predict_pdfs(input_data, result_path)\n\n # print(detection_results)\n print(f'The predicted results can be found at {result_path}')", "creation_date": "2024-10-04T13:22:18Z", "repo": "opendatalab/PDF-Extract-Kit", "file_path": "scripts/formula_detection.py", "stars": 8109, "label": 0} +{"function": "def parse_args():\n parser = argparse.ArgumentParser(description=\"Run a task with a given configuration file.\")\n parser.add_argument('--config', type=str, required=True, help='Path to the configuration file.')\n return parser.parse_args()", "creation_date": "2024-10-04T13:22:18Z", "repo": "opendatalab/PDF-Extract-Kit", "file_path": "scripts/formula_recognition.py", "stars": 8109, "label": 0} +{"function": "def main(config_path):\n config = load_config(config_path)\n task_instances = initialize_tasks_and_models(config)\n\n # get input and output path from config\n input_data = config.get('inputs', None)\n result_path = config.get('outputs', 'outputs'+'/'+TASK_NAME)\n\n # formula_detection_task\n model_formula_recognition = task_instances[TASK_NAME]\n\n # for image detection\n recognition_results = model_formula_recognition.predict(input_data, result_path)\n\n\n print('Recognition results are as follows:')\n for id, math in enumerate(recognition_results):\n print(str(id+1)+': ', math)", "creation_date": "2024-10-04T13:22:18Z", "repo": "opendatalab/PDF-Extract-Kit", "file_path": "scripts/formula_recognition.py", "stars": 8109, "label": 0} +{"function": "def parse_args():\n parser = argparse.ArgumentParser(description=\"Run a task with a given configuration file.\")\n parser.add_argument('--config', type=str, required=True, help='Path to the configuration file.')\n return parser.parse_args()", "creation_date": "2024-10-04T13:22:18Z", "repo": "opendatalab/PDF-Extract-Kit", "file_path": "scripts/layout_detection.py", "stars": 8109, "label": 0} +{"function": "def main(config_path):\n config = load_config(config_path)\n task_instances = initialize_tasks_and_models(config)\n\n # get input and output path from config\n input_data = config.get('inputs', None)\n result_path = config.get('outputs', 'outputs'+'/'+TASK_NAME)\n\n # layout_detection_task\n model_layout_detection = task_instances[TASK_NAME]\n\n # for image detection\n detection_results = model_layout_detection.predict_images(input_data, result_path)\n\n # for pdf detection\n # detection_results = model_layout_detection.predict_pdfs(input_data, result_path)\n\n # print(detection_results)\n print(f'The predicted results can be found at {result_path}')", "creation_date": "2024-10-04T13:22:18Z", "repo": "opendatalab/PDF-Extract-Kit", "file_path": "scripts/layout_detection.py", "stars": 8109, "label": 0} +{"function": "def parse_args():\n parser = argparse.ArgumentParser(description=\"Run a task with a given configuration file.\")\n parser.add_argument('--config', type=str, required=True, help='Path to the configuration file.')\n return parser.parse_args()", "creation_date": "2024-10-04T13:22:18Z", "repo": "opendatalab/PDF-Extract-Kit", "file_path": "scripts/ocr.py", "stars": 8109, "label": 0} +{"function": "def main(config_path):\n config = load_config(config_path)\n task_instances = initialize_tasks_and_models(config)\n\n # get input and output path from config\n input_data = config.get('inputs', None)\n result_path = config.get('outputs', 'outputs'+'/'+TASK_NAME)\n visualize = config.get('visualize', False)\n\n # formula_detection_task\n task = task_instances[TASK_NAME]\n\n detection_results = task.process(input_data, save_dir=result_path, visualize=visualize)\n\n print(f'Task done, results can be found at {result_path}')", "creation_date": "2024-10-04T13:22:18Z", "repo": "opendatalab/PDF-Extract-Kit", "file_path": "scripts/ocr.py", "stars": 8109, "label": 0} +{"function": "def parse_args():\n parser = argparse.ArgumentParser(description=\"Run a task with a given configuration file.\")\n parser.add_argument('--config', type=str, required=True, help='Path to the configuration file.')\n return parser.parse_args()", "creation_date": "2024-10-04T13:22:18Z", "repo": "opendatalab/PDF-Extract-Kit", "file_path": "scripts/run_task.py", "stars": 8109, "label": 0} +{"function": "def main(config_path):\n config = load_config(config_path)\n task_instances = initialize_tasks_and_models(config)\n\n # \u4ece\u914d\u7f6e\u6587\u4ef6\u4e2d\u83b7\u53d6\u8f93\u5165\u6570\u636e\u8def\u5f84\n input_data = config.get('inputs', None)\n result_path = config.get('outputs', 'outputs')\n\n # formula_detection_task\n model_formula_detection = task_instances['formula_detection']\n detection_results = model_formula_detection.predict(input_data, result_path)\n print(detection_results)", "creation_date": "2024-10-04T13:22:18Z", "repo": "opendatalab/PDF-Extract-Kit", "file_path": "scripts/run_task.py", "stars": 8109, "label": 0} +{"function": "def pytest_addoption(parser):\n parser.addoption(\n \"--base-url\",\n action=\"store\",\n help=\"Base URL for OpenAI compatible API\",\n )\n parser.addoption(\n \"--api-key\",\n action=\"store\",\n help=\"API key to use for the provider\",\n )\n parser.addoption(\n \"--provider\",\n action=\"store\",\n help=\"Provider to use for testing\",\n )\n parser.addoption(\n \"--model\",\n action=\"store\",\n help=\"Model to use for testing\",\n )", "creation_date": "2025-04-09T04:21:38Z", "repo": "meta-llama/llama-stack", "file_path": "tests/verifications/conftest.py", "stars": 7907, "label": 0} +{"function": "def pytest_json_runtest_metadata(item, call):\n \"\"\"Add model and case_id to pytest-json report metadata.\"\"\"\n metadata = {}\n nodeid = item.nodeid\n\n # 1. Extract model from callspec if available\n model = item.callspec.params.get(\"model\") if hasattr(item, \"callspec\") else None\n if model:\n metadata[\"model\"] = model\n else:\n # Fallback: Try parsing from nodeid (less reliable)\n match_model = re.search(r\"\\[(.*?)-\", nodeid)\n if match_model:\n model = match_model.group(1) # Store model even if found via fallback\n metadata[\"model\"] = model\n else:\n print(f\"Warning: Could not determine model for test {nodeid}\")\n model = None # Ensure model is None if not found\n\n # 2. Extract case_id using the known model string if possible\n if model:\n # Construct a regex pattern to find the case_id *after* the model name and a hyphen.\n # Escape the model name in case it contains regex special characters.\n pattern = re.escape(model) + r\"-(.*?)\\]$\"\n match_case = re.search(pattern, nodeid)\n if match_case:\n case_id = match_case.group(1)\n metadata[\"case_id\"] = case_id\n else:\n # Fallback if the pattern didn't match (e.g., nodeid format unexpected)\n # Try the old less specific regex as a last resort.\n match_case_fallback = re.search(r\"-(.*?)\\]$\", nodeid)\n if match_case_fallback:\n case_id = match_case_fallback.group(1)\n metadata[\"case_id\"] = case_id\n print(f\"Warning: Used fallback regex to parse case_id from nodeid {nodeid}\")\n else:\n print(f\"Warning: Could not parse case_id from nodeid {nodeid} even with fallback.\")\n if \"case\" in (item.callspec.params if hasattr(item, \"callspec\") else {}):\n metadata[\"case_id\"] = \"parsing_failed\"\n elif \"case\" in (item.callspec.params if hasattr(item, \"callspec\") else {}):\n # Cannot reliably parse case_id without model, but we know it's a case test.\n # Try the generic fallback regex.\n match_case_fallback = re.search(r\"-(.*?)\\]$\", nodeid)\n if match_case_fallback:\n case_id = match_case_fallback.group(1)\n metadata[\"case_id\"] = case_id\n print(f\"Warning: Used fallback regex to parse case_id from nodeid {nodeid} (model unknown)\")\n else:\n print(f\"Warning: Could not parse case_id from nodeid {nodeid} (model unknown)\")\n metadata[\"case_id\"] = \"parsing_failed_no_model\"\n # else: Not a test with a model or case param we need to handle.\n\n return metadata", "creation_date": "2025-04-09T04:21:38Z", "repo": "meta-llama/llama-stack", "file_path": "tests/verifications/conftest.py", "stars": 7907, "label": 0} +{"function": "def run_tests(provider, keyword=None):\n \"\"\"Run pytest for a specific provider and save results\"\"\"\n print(f\"Running tests for provider: {provider}\")\n\n timestamp = int(time.time())\n # Use a constant filename for the final result and temp file\n result_file = RESULTS_DIR / f\"{provider}.json\"\n temp_json_file = RESULTS_DIR / f\"temp_{provider}.json\"\n\n # Determine project root directory relative to this script\n project_root = Path(__file__).parent.parent.parent\n\n # Run pytest with JSON output\n cmd = [\n \"python\",\n \"-m\",\n \"pytest\",\n \"tests/verifications/openai_api/test_chat_completion.py\",\n f\"--provider={provider}\",\n \"-v\",\n \"--json-report\",\n f\"--json-report-file={temp_json_file}\",\n ]\n\n # Append -k argument if provided\n if keyword:\n cmd.extend([\"-k\", keyword])\n\n try:\n # Run subprocess with cwd set to project root\n result = subprocess.run(cmd, capture_output=True, text=True, cwd=project_root)\n print(f\"Pytest exit code: {result.returncode}\")\n\n # Check if the JSON file was created\n if temp_json_file.exists():\n with open(temp_json_file) as f:\n test_results = json.load(f)\n\n test_results[\"run_timestamp\"] = timestamp\n\n # Save results to the final (overwritten) file\n with open(result_file, \"w\") as f:\n json.dump(test_results, f, indent=2)\n f.write(\"\\n\") # Add a trailing newline for precommit\n\n # Clean up temp file\n temp_json_file.unlink()\n\n print(f\"Test results saved to {result_file}\")\n return result_file\n else:\n print(f\"Error: JSON report file not created for {provider}\")\n print(f\"Command stdout: {result.stdout}\")\n print(f\"Command stderr: {result.stderr}\")\n return None\n except Exception as e:\n print(f\"Error running tests for {provider}: {e}\")\n return None", "creation_date": "2025-04-09T04:21:38Z", "repo": "meta-llama/llama-stack", "file_path": "tests/verifications/generate_report.py", "stars": 7907, "label": 0} +{"function": "def run_multiple_tests(providers_to_run: list[str], keyword: str | None):\n \"\"\"Runs tests for a list of providers.\"\"\"\n print(f\"Running tests for providers: {', '.join(providers_to_run)}\")\n for provider in providers_to_run:\n run_tests(provider.strip(), keyword=keyword)\n print(\"Finished running tests.\")", "creation_date": "2025-04-09T04:21:38Z", "repo": "meta-llama/llama-stack", "file_path": "tests/verifications/generate_report.py", "stars": 7907, "label": 0} +{"function": "def parse_results(\n result_file,\n) -> tuple[defaultdict[str, defaultdict[str, dict[str, bool]]], defaultdict[str, set[str]], set[str], str]:\n \"\"\"Parse a single test results file.\n\n Returns:\n Tuple containing:\n - parsed_results: DefaultDict[provider, DefaultDict[model, Dict[test_name, pass_status]]]\n - providers_in_file: DefaultDict[provider, Set[model]] found in this file.\n - tests_in_file: Set[test_name] found in this file.\n - run_timestamp: Timestamp when the test was run\n \"\"\"\n if not os.path.exists(result_file):\n print(f\"Results file does not exist: {result_file}\")\n # Return empty defaultdicts/set matching the type hint\n return defaultdict(lambda: defaultdict(dict)), defaultdict(set), set(), \"\"\n\n with open(result_file) as f:\n results = json.load(f)\n\n # Initialize results dictionary with specific types\n parsed_results: defaultdict[str, defaultdict[str, dict[str, bool]]] = defaultdict(lambda: defaultdict(dict))\n providers_in_file: defaultdict[str, set[str]] = defaultdict(set)\n tests_in_file: set[str] = set()\n # Extract provider from filename (e.g., \"openai.json\" -> \"openai\")\n provider: str = result_file.stem\n\n # Extract run timestamp from the JSON data\n run_timestamp_unix = results.get(\"run_timestamp\")\n run_timestamp_str = (\n time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(run_timestamp_unix))\n if run_timestamp_unix is not None\n else \"Unknown\"\n )\n\n # Debug: Print summary of test results\n print(f\"Test results summary for {provider}:\")\n print(f\"Total tests: {results.get('summary', {}).get('total', 0)}\")\n print(f\"Passed: {results.get('summary', {}).get('passed', 0)}\")\n print(f\"Failed: {results.get('summary', {}).get('failed', 0)}\")\n print(f\"Error: {results.get('summary', {}).get('error', 0)}\")\n print(f\"Skipped: {results.get('summary', {}).get('skipped', 0)}\")\n\n # Extract test results\n if \"tests\" not in results or not results[\"tests\"]:\n print(f\"No test results found in {result_file}\")\n # Return empty defaultdicts/set matching the type hint\n return defaultdict(lambda: defaultdict(dict)), defaultdict(set), set(), \"\"\n\n # Process the tests\n for test in results[\"tests\"]:\n test_id = test.get(\"nodeid\", \"\")\n\n if not (call_phase := test.get(\"call\")):\n continue\n call_outcome = call_phase.get(\"outcome\")\n if call_outcome not in (\"passed\", \"failed\"):\n continue\n\n # --- Extract data from metadata ---\n metadata = test.get(\"metadata\", {})\n model = metadata.get(\"model\")\n case_id = metadata.get(\"case_id\") # String ID (if provided)\n case_index = metadata.get(\"case_index\") # Integer index (if no ID provided)\n\n # Check if we have a model and at least one case identifier\n if not model or (case_id is None and case_index is None):\n print(\n f\"Warning: Missing 'model' or case identifier ('case_id'/'case_index') metadata for test: {test_id}. Skipping.\"\n )\n continue\n\n try:\n test_name_base = test_id.split(\"::\")[1].split(\"[\")[0]\n except (IndexError, ValueError) as e:\n print(f\"Warning: Could not parse base test name for {test_id}. Error: {e}. Skipping.\")\n continue\n\n # Construct detailed test name using ID or index\n if case_id is not None:\n detailed_test_name = f\"{test_name_base} ({case_id})\"\n elif case_index == 0:\n # If case_id is missing and index is 0, assume single case, use base name only\n detailed_test_name = test_name_base\n elif case_index is not None: # case_index > 0\n # Use case_index for naming if case_id wasn't provided and index > 0\n detailed_test_name = f\"{test_name_base} (case{case_index})\"\n else:\n # This case should be prevented by the earlier check, but handle defensively\n print(f\"Error: No case identifier found for test {test_id} after initial check. Skipping.\")\n continue\n\n # Populate collections for this file\n tests_in_file.add(detailed_test_name)\n providers_in_file[provider].add(model)\n\n if call_outcome == \"passed\":\n parsed_results[provider][model][detailed_test_name] = True\n elif call_outcome == \"failed\":\n parsed_results[provider][model][detailed_test_name] = False\n\n # Final Summary Warning (Optional)\n if not parsed_results.get(provider):\n print(f\"Warning: No valid test results parsed for provider {provider} from file {result_file}\")\n\n return parsed_results, providers_in_file, tests_in_file, run_timestamp_str", "creation_date": "2025-04-09T04:21:38Z", "repo": "meta-llama/llama-stack", "file_path": "tests/verifications/generate_report.py", "stars": 7907, "label": 0} +{"function": "def generate_report(\n results_dict: dict[str, Any],\n providers: dict[str, set[str]],\n all_tests: set[str],\n provider_timestamps: dict[str, str],\n output_file=None,\n):\n \"\"\"Generate the markdown report.\n\n Args:\n results_dict: Aggregated results [provider][model][test_name] -> status.\n providers: Dict of all providers and their models {provider: {models}}.\n The order of keys in this dict determines the report order.\n all_tests: Set of all test names found.\n provider_timestamps: Dict of provider to timestamp when tests were run\n output_file: Optional path to save the report.\n \"\"\"\n if output_file is None:\n # Default to creating the report in the same directory as this script\n output_file = Path(__file__).parent / \"REPORT.md\"\n else:\n output_file = Path(output_file)\n\n # Convert provider model sets to sorted lists (use passed-in providers dict)\n providers_sorted = {prov: sorted(models) for prov, models in providers.items()}\n\n # Sort tests alphabetically (use passed-in all_tests set)\n sorted_tests = sorted(all_tests)\n\n # Calculate counts for each base test name\n base_test_case_counts: defaultdict[str, int] = defaultdict(int)\n base_test_name_map: dict[str, str] = {}\n for test_name in sorted_tests:\n match = re.match(r\"^(.*?)( \\([^)]+\\))?$\", test_name)\n if match:\n base_name = match.group(1).strip()\n base_test_case_counts[base_name] += 1\n base_test_name_map[test_name] = base_name\n else:\n # Should not happen with current naming, but handle defensively\n base_test_case_counts[test_name] += 1\n base_test_name_map[test_name] = test_name\n\n if not sorted_tests:\n print(\"Warning: No test results found to generate a report.\")\n # Optionally create an empty report or return early\n with open(output_file, \"w\") as f:\n f.write(\"# Test Results Report\\n\\nNo test results found.\\n\")\n print(f\"Generated empty report: {output_file}\")\n return\n\n report = [\"# Test Results Report\\n\"]\n report.append(f\"*Generated on: {time.strftime('%Y-%m-%d %H:%M:%S')}*\\n\")\n report.append(\"*This report was generated by running `python tests/verifications/generate_report.py`*\\n\")\n\n # Icons for pass/fail\n pass_icon = \"\u2705\"\n fail_icon = \"\u274c\"\n na_icon = \"\u26aa\"\n\n # Add emoji legend\n report.append(\"## Legend\\n\")\n report.append(f\"- {pass_icon} - Test passed\")\n report.append(f\"- {fail_icon} - Test failed\")\n report.append(f\"- {na_icon} - Test not applicable or not run for this model\")\n report.append(\"\\n\")\n\n # Add a summary section\n report.append(\"## Summary\\n\")\n\n # Count total tests and passes (use passed-in providers and all_tests)\n total_tests = 0\n passed_tests = 0\n provider_totals = {}\n for provider, models in providers_sorted.items():\n provider_passed = 0\n provider_total = 0\n if provider in results_dict:\n for model in models:\n if model in results_dict[provider]:\n model_results = results_dict[provider][model]\n for test in sorted_tests:\n if test in model_results:\n provider_total += 1\n total_tests += 1\n if model_results[test]:\n provider_passed += 1\n passed_tests += 1\n provider_totals[provider] = (provider_passed, provider_total)\n\n # Add summary table (use the order from the providers dict keys)\n report.append(\"| Provider | Pass Rate | Tests Passed | Total Tests |\")\n report.append(\"| --- | --- | --- | --- |\")\n # Iterate through providers in the order they appear in the input dict\n for provider in providers_sorted.keys():\n passed, total = provider_totals.get(provider, (0, 0))\n pass_rate = f\"{(passed / total * 100):.1f}%\" if total > 0 else \"N/A\"\n report.append(f\"| {provider.capitalize()} | {pass_rate} | {passed} | {total} |\")\n report.append(\"\\n\")\n\n for provider in providers_sorted.keys():\n provider_models = providers_sorted[provider] # Use sorted models\n if not provider_models:\n continue\n\n report.append(f\"\\n## {provider.capitalize()}\\n\")\n\n # Add timestamp when test was run\n if provider in provider_timestamps:\n report.append(f\"*Tests run on: {provider_timestamps[provider]}*\\n\")\n\n # Add test command for reproducing results\n test_cmd_all = f\"pytest tests/verifications/openai_api/test_chat_completion.py --provider={provider} -v\"\n report.append(f\"```bash\\n# Run all tests for this provider:\\n{test_cmd_all}\\n\")\n\n # Find an example test with a case ID\n example_base_test_name = None\n example_case_id = None\n # Get first test as fallback base, handle empty list\n first_test_name = sorted_tests[0] if sorted_tests else \"unknown_test\"\n\n match = re.match(r\"^(.*?) \\((.*?)\\)$\", first_test_name)\n if match:\n example_base_test_name = match.group(1).strip()\n example_case_id = match.group(2).strip()\n else:\n example_base_test_name = first_test_name\n\n base_name = base_test_name_map.get(first_test_name, first_test_name) # Get base name\n case_count = base_test_case_counts.get(base_name, 1) # Get count\n filter_str = f\"{example_base_test_name} and {example_case_id}\" if case_count > 1 else example_base_test_name\n\n test_cmd_specific_case = (\n f'pytest tests/verifications/openai_api/test_chat_completion.py --provider={provider} -k \"{filter_str}\"'\n )\n report.append(\n f\"# Example: Run only the '{example_case_id}' case of {example_base_test_name}:\\n{test_cmd_specific_case}\\n```\\n\"\n )\n\n # Get display names (use passed-in providers dict)\n provider_config = VERIFICATION_CONFIG.get(\"providers\", {}).get(provider, {})\n display_name_map = provider_config.get(\"model_display_names\", {})\n\n # Add Model Key Table (use provider_models)\n report.append(f\"\\n**Model Key ({provider.capitalize()})**\\n\")\n provider_key_lines = [\"| Display Name | Full Model ID |\", \"| --- | --- |\"]\n for model_id in provider_models:\n display_name = display_name_map.get(model_id, model_id)\n provider_key_lines.append(f\"| {display_name} | `{model_id}` |\")\n report.extend(provider_key_lines)\n report.append(\"\\n\")\n\n # Create results table header (use provider_models)\n display_names = [display_name_map.get(m, m) for m in provider_models]\n header = \"| Test | \" + \" | \".join(display_names) + \" |\"\n separator = \"| --- | \" + \" | \".join([\"---\"] * len(provider_models)) + \" |\"\n report.append(header)\n report.append(separator)\n\n # Get results for this provider from results_dict\n provider_results_data = results_dict.get(provider, {})\n\n # Add rows for each test (use sorted_tests)\n for test in sorted_tests:\n # Determine display name based on case count\n base_name = base_test_name_map.get(test, test) # Get base name\n case_count = base_test_case_counts.get(base_name, 1) # Get count\n display_test_name = base_name if case_count == 1 else test # Choose display name\n row = f\"| {display_test_name} |\" # Use display name\n\n for model_id in provider_models:\n if model_id in provider_results_data and test in provider_results_data[model_id]:\n result = pass_icon if provider_results_data[model_id][test] else fail_icon\n else:\n result = na_icon\n row += f\" {result} |\"\n report.append(row)\n\n # Write to file\n with open(output_file, \"w\") as f:\n f.write(\"\\n\".join(report))\n f.write(\"\\n\")\n\n print(f\"Report generated: {output_file}\")", "creation_date": "2025-04-09T04:21:38Z", "repo": "meta-llama/llama-stack", "file_path": "tests/verifications/generate_report.py", "stars": 7907, "label": 0} +{"function": "def main():\n parser = argparse.ArgumentParser(description=\"Generate test report\")\n parser.add_argument(\"--run-tests\", action=\"store_true\", help=\"Run tests before generating report\")\n parser.add_argument(\n \"--providers\",\n type=str,\n nargs=\"+\",\n help=\"Specify providers to include/test (comma-separated or space-separated, default: uses DEFAULT_PROVIDERS)\",\n )\n parser.add_argument(\"--output\", type=str, help=\"Output file location (default: tests/verifications/REPORT.md)\")\n parser.add_argument(\"--k\", type=str, help=\"Keyword expression to filter tests (passed to pytest -k)\")\n args = parser.parse_args()\n\n all_results = {}\n final_providers_order = {} # Dictionary to store results, preserving processing order\n aggregated_tests = set()\n provider_timestamps = {}\n\n # 1. Determine the desired list and order of providers\n if args.providers:\n desired_providers = []\n for provider_arg in args.providers:\n desired_providers.extend([p.strip() for p in provider_arg.split(\",\")])\n else:\n desired_providers = DEFAULT_PROVIDERS # Use default order/list\n\n # 2. Run tests if requested (using the desired provider list)\n if args.run_tests:\n run_multiple_tests(desired_providers, args.k)\n\n for provider in desired_providers:\n # Construct the expected result file path directly\n result_file = RESULTS_DIR / f\"{provider}.json\"\n\n if result_file.exists(): # Check if the specific file exists\n print(f\"Loading results for {provider} from {result_file}\")\n try:\n parsed_data = parse_results(result_file)\n parsed_results, providers_in_file, tests_in_file, run_timestamp = parsed_data\n all_results.update(parsed_results)\n aggregated_tests.update(tests_in_file)\n\n # Add models for this provider, ensuring it's added in the correct report order\n if provider in providers_in_file:\n if provider not in final_providers_order:\n final_providers_order[provider] = set()\n final_providers_order[provider].update(providers_in_file[provider])\n if run_timestamp != \"Unknown\":\n provider_timestamps[provider] = run_timestamp\n else:\n print(\n f\"Warning: Provider '{provider}' found in desired list but not within its result file data ({result_file}).\"\n )\n\n except Exception as e:\n print(f\"Error parsing results for provider {provider} from {result_file}: {e}\")\n else:\n # Only print warning if we expected results (i.e., provider was in the desired list)\n print(f\"Result file for desired provider '{provider}' not found at {result_file}. Skipping.\")\n\n # 5. Generate the report using the filtered & ordered results\n print(f\"Final Provider Order for Report: {list(final_providers_order.keys())}\")\n generate_report(all_results, final_providers_order, aggregated_tests, provider_timestamps, args.output)", "creation_date": "2025-04-09T04:21:38Z", "repo": "meta-llama/llama-stack", "file_path": "tests/verifications/generate_report.py", "stars": 7907, "label": 0} +{"function": "def pytest_generate_tests(metafunc):\n \"\"\"Dynamically parametrize tests based on the selected provider and config.\"\"\"\n if \"model\" in metafunc.fixturenames:\n model = metafunc.config.getoption(\"model\")\n if model:\n metafunc.parametrize(\"model\", [model])\n return\n\n provider = metafunc.config.getoption(\"provider\")\n if not provider:\n print(\"Warning: --provider not specified. Skipping model parametrization.\")\n metafunc.parametrize(\"model\", [])\n return\n\n try:\n config_data = _load_all_verification_configs()\n except (OSError, FileNotFoundError) as e:\n print(f\"ERROR loading verification configs: {e}\")\n config_data = {\"providers\": {}}\n\n provider_config = config_data.get(\"providers\", {}).get(provider)\n if provider_config:\n models = provider_config.get(\"models\", [])\n if models:\n metafunc.parametrize(\"model\", models)\n else:\n print(f\"Warning: No models found for provider '{provider}' in config.\")\n metafunc.parametrize(\"model\", []) # Parametrize empty if no models found\n else:\n print(f\"Warning: Provider '{provider}' not found in config. No models parametrized.\")\n metafunc.parametrize(\"model\", []) # Parametrize empty if provider not found", "creation_date": "2025-04-28T21:06:00Z", "repo": "meta-llama/llama-stack", "file_path": "tests/verifications/openai_api/conftest.py", "stars": 7907, "label": 0} +{"function": "def multi_image_data():\n files = [\n THIS_DIR / \"fixtures/images/vision_test_1.jpg\",\n THIS_DIR / \"fixtures/images/vision_test_2.jpg\",\n THIS_DIR / \"fixtures/images/vision_test_3.jpg\",\n ]\n encoded_files = []\n for file in files:\n with open(file, \"rb\") as image_file:\n base64_data = base64.b64encode(image_file.read()).decode(\"utf-8\")\n encoded_files.append(f\"data:image/jpeg;base64,{base64_data}\")\n return encoded_files", "creation_date": "2025-04-10T17:26:19Z", "repo": "meta-llama/llama-stack", "file_path": "tests/verifications/openai_api/test_chat_completion.py", "stars": 7907, "label": 0} +{"function": "def test_chat_non_streaming_basic(request, openai_client, model, provider, verification_config, case):\n test_name_base = get_base_test_name(request)\n if should_skip_test(verification_config, provider, model, test_name_base):\n pytest.skip(f\"Skipping {test_name_base} for model {model} on provider {provider} based on config.\")\n\n response = openai_client.chat.completions.create(\n model=model,\n messages=case[\"input\"][\"messages\"],\n stream=False,\n )\n assert response.choices[0].message.role == \"assistant\"\n assert case[\"output\"].lower() in response.choices[0].message.content.lower()", "creation_date": "2025-04-10T17:26:19Z", "repo": "meta-llama/llama-stack", "file_path": "tests/verifications/openai_api/test_chat_completion.py", "stars": 7907, "label": 0}