id int64 393k 2.82B | repo stringclasses 68
values | title stringlengths 1 936 | body stringlengths 0 256k ⌀ | labels stringlengths 2 508 | priority stringclasses 3
values | severity stringclasses 3
values |
|---|---|---|---|---|---|---|
2,647,380,248 | react | [Compiler Bug]: Getting false eslint error from compiler | ### What kind of issue is this?
- [ ] React Compiler core (the JS output is incorrect, or your app works incorrectly after optimization)
- [ ] babel-plugin-react-compiler (build issue installing or using the Babel plugin)
- [X] eslint-plugin-react-compiler (build issue installing or using the eslint plugin)
- [ ] react-compiler-healthcheck (build issue installing or using the healthcheck script)
### Link to repro
Not Applicable.
### Repro steps
import { useEffect, useRef } from 'react'
const usePrevious = <T>(value: T): T => {
// The ref object is a generic container whose current property is mutable ...
// ... and can hold any value, similar to an instance property on a class
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const ref: any = useRef<T>()
// Store current value in ref
useEffect(() => {
ref.current = value
}, [value]) // Only re-run if value changes
// Return previous value (happens before update in useEffect above)
return ref.current
}
export default usePrevious
**Getting false eslint error, above is a custom hook to store previous state value.**
Ref values (the `current` property) may not be accessed during render. (https://react.dev/reference/react/useRef)eslint(react-compiler/react-compiler)
### How often does this bug happen?
Every time
### What version of React are you using?
18
### What version of React Compiler are you using?
19.0.0-beta-63b359f-20241101 | Type: Bug,Status: Unconfirmed,Component: Optimizing Compiler | medium | Critical |
2,647,385,958 | pytorch | `Torch.distributed.tensor.parallel.Style.Rowwiseparallel` when send tensor, the use of the underlying operator is `aten.to.dtype_layout`, lack of distributed strategy. But after adding a policy, `AsyncCollectiveTensor` no placements error | ### 🐛 Describe the bug
```model = Transformer.from_model_args(simple_llama2_config).to("cuda")
#init model weights
model.init_weights()
#parallelize the first embedding and the last linear out projection
model = parallelize_module(
model,
tp_mesh,
{
"tok_embeddings": RowwiseParallel( # **Here's the problem**
input_layouts=Replicate(),
output_layouts=Shard(1),
),
"norm": SequenceParallel(),
"output": ColwiseParallel(
input_layouts=Shard(1),
output_layouts=Replicate()
),
}
)
```
### first error:
```Traceback (most recent call last):
File ~/nfs/project/vision_model/GeoChat/mutiSateChatInOrbit/run_demo.py:50 in main
eval_instance.eval_model()
File ~/nfs/project/vision_model/GeoChat/mutiSateChatInOrbit/dist_batch_geochat_vqa.py:348 in eval_model
output_ids = self.model.generate(final_input_tensors,
File ~/nfs/anaconda3/envs/geochat/lib/python3.10/site-packages/torch/utils/_contextlib.py:116 in decorate_context
return func(*args, **kwargs)
File ~/nfs/anaconda3/envs/geochat/lib/python3.10/site-packages/transformers/generation/utils.py:1538 in generate
return self.greedy_search(
File ~/nfs/anaconda3/envs/geochat/lib/python3.10/site-packages/transformers/generation/utils.py:2362 in greedy_search
outputs = self(
File ~/nfs/anaconda3/envs/geochat/lib/python3.10/site-packages/torch/nn/modules/module.py:1736 in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File ~/nfs/anaconda3/envs/geochat/lib/python3.10/site-packages/torch/nn/modules/module.py:1747 in _call_impl
return forward_call(*args, **kwargs)
File ~/nfs/anaconda3/envs/geochat/lib/python3.10/site-packages/accelerate/hooks.py:165 in new_forward
output = old_forward(*args, **kwargs)
File ~/nfs/project/vision_model/GeoChat/geochat/model/language_model/geochat_llama.py:81 in forward
input_ids, attention_mask, past_key_values, inputs_embeds, labels = self.prepare_inputs_labels_for_multimodal(input_ids, attention_mask, past_key_values, labels, images)
File ~/nfs/project/vision_model/GeoChat/geochat/model/geochat_arch.py:187 in prepare_inputs_labels_for_multimodal
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start])) # 获取文本1的embedding
File ~/nfs/anaconda3/envs/geochat/lib/python3.10/site-packages/torch/nn/modules/module.py:1736 in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File ~/nfs/anaconda3/envs/geochat/lib/python3.10/site-packages/torch/nn/modules/module.py:1844 in _call_impl
return inner()
File ~/nfs/anaconda3/envs/geochat/lib/python3.10/site-packages/torch/nn/modules/module.py:1790 in inner
result = forward_call(*args, **kwargs)
File ~/nfs/anaconda3/envs/geochat/lib/python3.10/site-packages/accelerate/hooks.py:160 in new_forward
args, kwargs = module._hf_hook.pre_forward(module, *args, **kwargs)
File ~/nfs/anaconda3/envs/geochat/lib/python3.10/site-packages/accelerate/hooks.py:290 in pre_forward
return send_to_device(args, self.execution_device), send_to_device(
File ~/nfs/anaconda3/envs/geochat/lib/python3.10/site-packages/accelerate/utils/operations.py:151 in send_to_device
return honor_type(
File ~/nfs/anaconda3/envs/geochat/lib/python3.10/site-packages/accelerate/utils/operations.py:83 in honor_type
return type(obj)(generator)
File ~/nfs/anaconda3/envs/geochat/lib/python3.10/site-packages/accelerate/utils/operations.py:152 in <genexpr>
tensor, (send_to_device(t, device, non_blocking=non_blocking, skip_keys=skip_keys) for t in tensor)
File ~/nfs/anaconda3/envs/geochat/lib/python3.10/site-packages/accelerate/utils/operations.py:167 in send_to_device
return tensor.to(device, non_blocking=non_blocking)
File ~/nfs/anaconda3/envs/geochat/lib/python3.10/site-packages/torch/_compile.py:32 in inner
return disable_fn(*args, **kwargs)
File ~/nfs/anaconda3/envs/geochat/lib/python3.10/site-packages/torch/_dynamo/eval_frame.py:632 in _fn
return fn(*args, **kwargs)
File ~/nfs/anaconda3/envs/geochat/lib/python3.10/site-packages/torch/distributed/tensor/_api.py:341 in __torch_dispatch__
return DTensor._op_dispatcher.dispatch(
File ~/nfs/anaconda3/envs/geochat/lib/python3.10/site-packages/torch/distributed/tensor/_dispatch.py:169 in dispatch
self.sharding_propagator.propagate(op_info)
File ~/nfs/anaconda3/envs/geochat/lib/python3.10/site-packages/torch/distributed/tensor/_sharding_prop.py:201 in propagate
OutputSharding, self.propagate_op_sharding(op_info.schema)
File ~/nfs/anaconda3/envs/geochat/lib/python3.10/site-packages/torch/distributed/tensor/_sharding_prop.py:46 in __call__
return self.cache(*args, **kwargs)
File ~/nfs/anaconda3/envs/geochat/lib/python3.10/site-packages/torch/distributed/tensor/_sharding_prop.py:450 in propagate_op_sharding_non_cached
raise NotImplementedError(
NotImplementedError: Operator aten.to.dtype_layout does not have a sharding strategy registered.
```
### The band-aid modification error 01
#### alteration
`torch.distributed.tensor.ops._pointwise_ops` ,添加`dtype_layout`的策略
```
linear_pointwise_ops = [
aten.div.Scalar, # this op is linear on the first argument, and the second argument is scalar, so it fits as a linear op.
aten.div_.Scalar, # this op is linear on the first argument, and the second argument is scalar, so it fits as a linear op.
aten.to.dtype,
# aten.to.dtype_layout, # here is*
aten.add.Tensor,
aten.add_.Tensor,
]
```
#### error
```
File ~/nfs/project/vision_model/GeoChat/geochat/model/language_model/geochat_llama.py:81 in forward
input_ids, attention_mask, past_key_values, inputs_embeds, labels = self.prepare_inputs_labels_for_multimodal(input_ids, attention_mask, past_key_values, labels, images)
File ~/nfs/project/vision_model/GeoChat/geochat/model/geochat_arch.py:187 in prepare_inputs_labels_for_multimodal
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start])) # 获取文本1的embedding
File ~/nfs/anaconda3/envs/geochat/lib/python3.10/site-packages/torch/nn/modules/module.py:1736 in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File ~/nfs/anaconda3/envs/geochat/lib/python3.10/site-packages/torch/nn/modules/module.py:1844 in _call_impl
return inner()
File ~/nfs/anaconda3/envs/geochat/lib/python3.10/site-packages/torch/nn/modules/module.py:1803 in inner
hook_result = hook(self, args, result)
File ~/nfs/anaconda3/envs/geochat/lib/python3.10/site-packages/torch/distributed/tensor/_api.py:896 in <lambda>
lambda mod, inputs, outputs: output_fn(mod, outputs, device_mesh)
File ~/nfs/anaconda3/envs/geochat/lib/python3.10/site-packages/torch/distributed/tensor/parallel/style.py:297 in _prepare_output_fn
if outputs.placements != output_layouts:
AttributeError: 'AsyncCollectiveTensor' object has no attribute 'placements'
```
### The band-aid modification error 02
#### alteration
process ` asynccollectivetensor` type: ` Torch.distributed.tensor.parallel.Style.Rowwiseparallel: 297`
```
class RowwiseParallel(ParallelStyle):
……
@staticmethod
def _prepare_output_fn(output_layouts, use_local_output, mod, outputs, device_mesh):
# Rowwise sharding produces partial output, depending on output layouts:
# 1. to replicate -> allreduce
# 2. to shard -> reduce_scatter
# ----------------------------------------------------------------创可贴 1
# from torch.distributed._functional_collectives import AsyncCollectiveTensor
# if isinstance(outputs, AsyncCollectiveTensor):
# # # 等待异步操作完成
# outputs.wait()
# new_outputs = outputs.clone()
# print("new_outputs",new_outputs)
# outputs = distribute_tensor(outputs, device_mesh, output_layouts)
# # outputs = distributed_outputs.clone()
# outputs = outputs.redistribute(placements=output_layouts, async_op=False)
# ----------------------------------------------------------------创可贴 2
# if isinstance(outputs, torch.distributed._functional_collectives.AsyncCollectiveTensor):
# result = outputs.trigger_wait()
# new_outputs = result.clone().detach()
# print("new_outputs",new_outputs)
# new_outputs = new_outputs.redistribute(placements=output_layouts, async_op=False)
# ----------------------------------------------------------------创可贴3
# def handle_async_tensor(async_tensor):
# # 如果是 AsyncCollectiveTensor,则等待操作完成并返回结果
# if isinstance(async_tensor, torch.distributed._functional_collectives.AsyncCollectiveTensor):
# result = async_tensor.trigger_wait()
# return result.clone().detach()
# # 否则直接返回原对象
# return async_tensor
# def prepare_output(outputs):
# from torch.utils._pytree import tree_map, tree_flatten, tree_unflatten
# flat_outputs, spec = tree_flatten(outputs)
# processed_flat_outputs = [handle_async_tensor(item) for item in flat_outputs]
# # 重新构造原来的结构
# processed_outputs = tree_unflatten(processed_flat_outputs, spec)
# # 如果 outputs 是一个张量,直接克隆;如果是更复杂的结构,递归处理
# if torch.is_tensor(processed_outputs):
# new_outputs = processed_outputs.clone()
# else:
# # 递归处理非张量类型的输出
# new_outputs = tree_map(lambda x: x.clone() if torch.is_tensor(x) else x, processed_outputs)
# return new_outputs
# new_outputs = prepare_output(outputs)
# print("new_outputs",new_outputs)
# ----------------------------------------------------------------原代码
if outputs.placements != output_layouts:
outputs = outputs.redistribute(placements=output_layouts, async_op=True)
# back to local tensor if use_local_output is True
return outputs.to_local() if use_local_output else outputs
```
#### but always: error
```
File ~/nfs/anaconda3/envs/geochat/lib/python3.10/site-packages/torch/distributed/tensor/parallel/style.py:264 in _prepare_output_fn
result = outputs.trigger_wait()
File ~/nfs/anaconda3/envs/geochat/lib/python3.10/site-packages/torch/distributed/_functional_collectives.py:611 in trigger_wait
out = wait_tensor(self.elem)
File ~/nfs/anaconda3/envs/geochat/lib/python3.10/site-packages/torch/distributed/_functional_collectives.py:140 in wait_tensor
return torch.ops._c10d_functional.wait_tensor(tensor) # type: ignore[attr-defined]
File ~/nfs/anaconda3/envs/geochat/lib/python3.10/site-packages/torch/_ops.py:1116 in __call__
new_op = self._op(*args, **(kwargs or {}))
RuntimeError: Inplace update to inference tensor outside InferenceMode is not allowed.You can make a clone to get a normal tensor before doing inplace update.See https://github.com/pytorch/rfcs/pull/17 for more details.
```
### Versions
cuda11.8
python == 3.10.14,
torch==2.5.1+cu118
cc @H-Huang @awgu @kwen2501 @wanchaol @fegin @fduwjj @wz337 @wconstab @d4l3k @c-p-i-o | oncall: distributed | low | Critical |
2,647,387,307 | ui | [bug]: sidebar inconsistency | ### Describe the bug
on clear installation sidebar-09 looks different than on docs and v0.

### Affected component/components
sidebar
### How to reproduce
1. create a nextjs project
2. add shadcn sidebar-09
3. go to /dashboard
### Codesandbox/StackBlitz link
https://stackblitz.com/edit/stackblitz-starters-17qxnh?file=package.json
### Logs
_No response_
### System Info
```bash
macos, browser: arc
```
### Before submitting
- [X] I've made research efforts and searched the documentation
- [X] I've searched for existing issues | bug | low | Critical |
2,647,428,671 | rust | rust_2024_prelude_collisions fails with glob of underscore import | I tried this code:
```rust
#![warn(rust_2024_prelude_collisions)]
pub mod prelude {
pub use crate::inner::FExt as _;
}
pub mod inner {
pub trait FExt {
fn into_future(self) -> StreamFuture
where
Self: Sized,
{
StreamFuture
}
}
pub struct StreamFuture;
pub struct F;
impl FExt for F {}
}
use crate::prelude::*;
pub fn f() {
let _ = crate::inner::F.into_future();
}
```
This generates a suggestion to modify the code like this:
```diff
@@ -23,5 +23,5 @@
use crate::prelude::*;
pub fn f() {
- let _ = crate::inner::F.into_future();
+ let _ = FExt::into_future(crate::inner::F);
}
```
However, this fails to compile with the following error:
```
error[E0433]: failed to resolve: use of undeclared type `FExt`
--> src/lib.rs:26:13
|
26 | let _ = FExt::into_future(crate::inner::F);
| ^^^^ use of undeclared type `FExt`
|
help: consider importing one of these traits
|
23 + use crate::inner::FExt;
|
23 + use f::FExt;
|
warning: unused import: `crate::prelude::*`
--> src/lib.rs:23:5
|
23 | use crate::prelude::*;
| ^^^^^^^^^^^^^^^^^
|
= note: `#[warn(unused_imports)]` on by default
For more information about this error, try `rustc --explain E0433`.
```
Because the `_` import does not give the trait a name.
This comes up in real-life with importing `use futures::prelude::*`.
I'm not sure what the best solution to this would be. Perhaps it could detect a similar scenario, and fully qualify the path to the trait (like `futures::StreamExt::into_future`). Or just always fully qualify it?
### Meta
`rustc --version --verbose`:
```
rustc 1.84.0-nightly (59cec72a5 2024-11-08)
binary: rustc
commit-hash: 59cec72a57af178767a7b8e7f624b06cc50f1087
commit-date: 2024-11-08
host: aarch64-apple-darwin
release: 1.84.0-nightly
LLVM version: 19.1.3
```
| A-lints,A-resolve,T-lang,T-compiler,C-bug,A-suggestion-diagnostics,D-invalid-suggestion,D-edition,S-has-mcve,A-edition-2024,I-edition-triaged | low | Critical |
2,647,436,168 | vscode | Git diff does not work over a Samba share |
Type: <b>Bug</b>
I am accessing a network drive on a Linux server, shared using Samba, using the standard GNOME GVfs client. If I navigate to the file share on the terminal (`/run/user/####/gvfs/smb-share:..../`) and run `git diff`, the expected differences are displayed. However, in VS Code, it displays every file as if it is brand new; even worse, staged files won't show anything at all.
VS Code version: Code 1.95.2 (e8653663e8840adaf45af01eab5c627a5af81807, 2024-11-07T11:07:22.054Z)
OS version: Linux x64 5.4.0-200-generic snap
Modes:
<details>
<summary>System Info</summary>
|Item|Value|
|---|---|
|CPUs|AMD Ryzen 7 2700X Eight-Core Processor (16 x 2063)|
|GPU Status|2d_canvas: enabled<br>canvas_oop_rasterization: enabled_on<br>direct_rendering_display_compositor: disabled_off_ok<br>gpu_compositing: enabled<br>multiple_raster_threads: enabled_on<br>opengl: enabled_on<br>rasterization: enabled<br>raw_draw: disabled_off_ok<br>skia_graphite: disabled_off<br>video_decode: enabled<br>video_encode: disabled_software<br>vulkan: disabled_off<br>webgl: enabled<br>webgl2: enabled<br>webgpu: disabled_off<br>webnn: disabled_off|
|Load (avg)|0, 0, 1|
|Memory (System)|15.61GB (11.27GB free)|
|Process Argv|--no-sandbox --force-user-env --crash-reporter-id d79ac254-0e9d-427a-a9ba-5adfd6c2b60d|
|Screen Reader|no|
|VM|0%|
|DESKTOP_SESSION|ubuntu|
|XDG_CURRENT_DESKTOP|Unity|
|XDG_SESSION_DESKTOP|ubuntu|
|XDG_SESSION_TYPE|x11|
</details><details><summary>Extensions (20)</summary>
Extension|Author (truncated)|Version
---|---|---
language-hugo-vscode|bud|1.3.1
githistory|don|0.6.20
xml|Dot|2.5.1
codespaces|Git|1.17.3
gc-excelviewer|Gra|4.2.62
csdevkit|ms-|1.11.14
csharp|ms-|2.50.27
vscode-dotnet-runtime|ms-|2.2.2
vscodeintellicode-csharp|ms-|2.1.11
debugpy|ms-|2024.12.0
isort|ms-|2023.10.1
python|ms-|2024.18.1
vscode-pylance|ms-|2024.11.1
hexeditor|ms-|1.11.1
wasm-wasi-core|ms-|1.0.2
vscode-yaml|red|1.15.0
ruby-lsp|Sho|0.8.13
shopify-liquid|sis|4.0.1
code-spell-checker|str|3.0.1
vscode-icons|vsc|12.9.0
</details><details>
<summary>A/B Experiments</summary>
```
vsliv368:30146709
vspor879:30202332
vspor708:30202333
vspor363:30204092
vscod805cf:30301675
binariesv615:30325510
vsaa593cf:30376535
py29gd2263:31024239
c4g48928:30535728
azure-dev_surveyone:30548225
vscrp:30673768
2i9eh265:30646982
962ge761:30959799
pythongtdpath:30769146
pythonnoceb:30805159
asynctok:30898717
pythonmypyd1:30879173
h48ei257:31000450
pythontbext0:30879054
cppperfnew:31000557
dsvsc020:30976470
pythonait:31006305
dsvsc021:30996838
0ee40948:31013168
dvdeprecation:31068756
dwnewjupyter:31046869
impr_priority:31102340
nativerepl2:31139839
refactort:31108082
pythonrstrctxt:31112756
cf971741:31144450
iacca1:31171482
notype1:31157159
5fd0e150:31155592
dwcopilot:31170013
```
</details>
<!-- generated by issue reporter --> | bug,git | low | Critical |
2,647,440,139 | godot | [4.4 dev 4] WebsocketPeer regression: connections from HTML5 stay "connecting" | ### Tested versions
- Reproducible in 4.4.dev4+
### System information
Linux Mint 22. Firefox and Google Chromium
### Issue description
HTML5/WebsocketPeer regression:
- **Error happens when connecting to a port different to port 80**
- WebsocketPeer does not connect to local golang Websocket server from **4.4-dev4 HTML5 export** build
- After the call to connect_to_url the socket get_ready_state() can only be **connecting**, when calling close() get_ready_state() stays returning **closing** permanently
- The test works correctly from editor in 4.4-dev4
- The test works correctly in 4.4-dev3 HTML5 export
### Steps to reproduce
- Setup enviroment to execute HTML5 build with 'Access-Control-Allow-Origin', '*' header
- Execute de MRP and compare the outputs from editor and HTML5
# Console Output from HTML5 4.4-dev3 or Editor 4.4-dev4 ===
#WebsocketPeer regression TEST_1
#NEW_SOCKET_STATE: Connecting
#NEW_SOCKET_STATE: Connected
#NEW_SOCKET_STATE: Clossing
#NEW_SOCKET_STATE: Closed
#WebsocketPeer regression TEST_2
#NEW_SOCKET_STATE: Connecting
#NEW_SOCKET_STATE: Connected
#NEW_SOCKET_STATE: Closed
#WebsocketPeer regression TEST_END
# Console Output from HTML5 4.4dev4
#WebsocketPeer regression TEST_1
#NEW_SOCKET_STATE: Connecting
#NEW_SOCKET_STATE: Clossing
#WebsocketPeer regression TEST_2
#NEW_SOCKET_STATE: Connecting
#NEW_SOCKET_STATE: Clossing
#WebsocketPeer regression TEST_END
### Minimal reproduction project (MRP)
[websocketpeerdev4bug.zip](https://github.com/user-attachments/files/17692221/websocketpeerdev4bug.zip)
| bug,platform:web,needs testing,topic:network | low | Critical |
2,647,472,906 | deno | deno bench does not support --inspect-brk | Version: Deno 2.0.6
I wrote a benchmark for my code, and wanted to debug it. And also point the browser's performance profiler at it.
So I tried `deno bench --inspect-brk`. This however, does not work.
```
PM> deno bench --inspect-brk
error: unexpected argument '--inspect-brk' found
tip: to pass '--inspect-brk' as a value, use '-- --inspect-brk'
Usage: deno bench [OPTIONS] [files]... [-- [SCRIPT_ARG]...]
``` | suggestion,needs discussion | low | Critical |
2,647,492,864 | pytorch | Inconsistent Model Loading with `weights_only=True` for Custom Models | ### 🚀 The feature, motivation and pitch
I'm encountering an issue with the `weights_only=True` parameter in `torch.load`, where I need to load custom model classes while ensuring repeatable results across multiple runs. My current workaround involves setting `weights_only=False` to achieve identical predictions, which raises security warnings due to potential arbitrary code execution. This issue serves to inform the PyTorch team about the challenges in achieving both secure and repeatable model loading in use cases like neural architecture search, where multiple models are saved and loaded regularly.
**Background**:
In my work with neural architecture search, achieving repeatability is essential. When loading a saved custom model using `weights_only=True`, I encounter issues with loading state and repeatability, even after adding custom classes to `torch.serialization.add_safe_globals`. Using `weights_only=False` resolves the issue, but it raises security concerns. Given the importance of security and repeatability, this feature request aims to address these limitations and propose an improvement for handling custom models in `weights_only=True` mode.
### Alternatives
The current workaround involves setting `weights_only=False`, which allows repeatability but may pose a security risk due to the potential execution of arbitrary code during unpickling. However, this approach is not ideal for secure applications, especially when working with multiple models in neural architecture search.
### Additional context
Environment details:
- PyTorch version: 2.1.0
- Python version: 3.10
- OS: Ubuntu 22.04
Example code and detailed reproduction steps are provided below to illustrate the issue and expected vs. actual behavior.
```python
import torch
import numpy as np
import random
from my_custom_package import NeuralNetwork, Trainer # Import custom model and trainer
# Set random seed for reproducibility
def set_random_seed(seed=42):
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
set_random_seed(42)
# Initialize and train the model
model = NeuralNetwork(input_size=10, output_size=4, num_vars=40)
trainer = Trainer(model=model, lr=1e-4, batch_size=20, num_epochs=20)
trainer.fit(X_train, y_train, X_val, y_val)
# Save the trained model
trainer.save_model('neural_model.pth')
# Function to generate predictions
def generate_predictions(model, X_test):
with torch.no_grad():
X_test_tensor = torch.tensor(X_test, dtype=torch.float32)
return model(X_test_tensor)
# Generate predictions with the original model
predictions = generate_predictions(model, X_test)
# Load the model with weights_only=False and evaluate
loaded_model = torch.load('neural_model.pth', weights_only=False)
loaded_model.eval()
predictions_loaded_model = generate_predictions(loaded_model, X_test)
# Load the model again with default settings and evaluate
loaded_model_2 = torch.load('neural_model.pth')
loaded_model_2.eval()
predictions_loaded_model_2 = generate_predictions(loaded_model_2, X_test)
# Check consistency of predictions
print("All Same - trained and loaded:\t", np.allclose(predictions, predictions_loaded_model))
print("All Same - loaded once, loaded twice:\t", np.allclose(predictions_loaded_model_2, predictions_loaded_model_2))
```
Thank you for your time and assistance. Any guidance or recommendations would be greatly appreciated.
cc @ezyang @gchanan @zou3519 @kadeng @msaroufim @mruberry @mikaylagawarecki | high priority,needs reproduction,module: serialization,triaged | low | Major |
2,647,495,911 | rust | Support calling functions with SIMD vectors that couldn't be used in the caller | We now lint and will eventually error on this program:
```rust
use std::mem::transmute;
#[cfg(target_arch = "x86")]
use std::arch::x86::*;
#[cfg(target_arch = "x86_64")]
use std::arch::x86_64::*;
#[target_feature(enable = "avx")]
#[allow(improper_ctypes_definitions)]
unsafe extern "C" fn with_target_feature(x: __m256) {
let val = unsafe { transmute::<_, [u32; 8]>(x) };
dbg!(val);
}
fn main() {
assert!(is_x86_feature_detected!("avx"));
// SAFETY: we checked that the `avx` feature is present.
unsafe {
with_target_feature(transmute([1; 8])); //~ ERROR: missing `avx` target feature
}
}
```
```
warning: ABI error: this function call uses a vector type that requires the `avx` target feature, which is not enabled in the caller
--> test.rs:18:9
|
18 | with_target_feature(transmute([1; 8]));
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ function called here
|
= warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release!
= note: for more information, see issue #116558 <https://github.com/rust-lang/rust/issues/116558>
= help: consider enabling it globally (`-C target-feature=+avx`) or locally (`#[target_feature(enable="avx")]`)
= note: `#[warn(abi_unsupported_vector_types)]` on by default
```
The lint is necessary because the way we codegen this function would be unsound (and indeed, if you run this on the [playground](https://play.rust-lang.org/?version=nightly&mode=debug&edition=2021&gist=78a91a7266b7e39fe539c1e8d8c9892d) you can see that the argument value gets corrupted). See https://github.com/rust-lang/rust/issues/116558 for more context.
However, there's no fundamental reason that we couldn't compile this code! We "just" need to generate the call to `with_target_feature` using its proper ABI, i.e., using the AVX registers. This is sound because the function anyway requires that target feature, so the caller must have already ensured that this target feature is available.
The problem is that LLVM currently simply has no way to express such a call. So we have three options:
- error (the easiest one, and what we are currently working towards)
- fix this in LLVM (also see https://github.com/llvm/llvm-project/issues/70563) -- I am told this is quite hard
- generate a shim that uses the Rust ABI (so it is not affected by these ABI shenanigans), and has the `avx` feature gate, and calls the actual callee -- not a pretty solution since the extra function call is bad for performance, and performance is the reason people manually write SIMD code to begin with
Lucky enough, this only affects non-Rust ABIs, so users should only rarely run into this.
Cc @rust-lang/wg-llvm @rust-lang/opsem @chorman0773 @veluca93 | A-LLVM,C-feature-request,A-SIMD,WG-llvm,A-ABI,T-opsem | low | Critical |
2,647,510,238 | godot | Websockets disconnect when idle on web builds | ### Tested versions
4.4.dev4
4.3
### System information
Windows 11, SteamOS
### Issue description
Using Websockets causes the connection to be closed after a while (feels like roughly half a minute) if no data is being sent.
This applies to both regular Websockets and the multiplayer functionality.
### Steps to reproduce
- Establish a Websocket connection
- Do not send any data for some time
### Minimal reproduction project (MRP)
any project with Websocket functionality on html5 | discussion,topic:network | low | Minor |
2,647,535,085 | ollama | Ollama ps to report actual number of layers instead of percentage. | Can we have Ollama to report how many layers out of total layers are offloaded to cpu instead of percentage?
I think This would be more useful information than just percentage when setting num_gpu parameter. Also you can see how many layers a model has.
Thanks! | feature request | low | Minor |
2,647,537,161 | next.js | Type error: Namespace 'React' has no exported member 'PromiseLikeOfReactNode'. | ### Link to the code that reproduces this issue
https://github.com/cryfs/cryfs-web-next/tree/feature/failing
### To Reproduce
1. Clone repository from above link, in the specified branch
2. Run "npm run build" (which will run next build)
### Current vs. Expected behavior
With next.js 13.5.4, everything works fine. But when upgrading to next.js 13.5.5, ([this commit](https://github.com/cryfs/cryfs-web-next/commit/55785a15b962f9acae70a3502e8d226943b65897)), it breaks and shows:
```
./node_modules/next/dist/client/components/error-boundary.d.ts:26:71
Type error: Namespace 'React' has no exported member 'PromiseLikeOfReactNode'.
24 | static getDerivedStateFromProps(props: ErrorBoundaryHandlerProps, state: ErrorBoundaryHandlerState): ErrorBoundaryHandlerState | null;
25 | reset: () => void;
> 26 | render(): string | number | boolean | React.ReactFragment | React.PromiseLikeOfReactNode | React.JSX.Element | null | undefined;
| ^
27 | }
28 | export declare function GlobalError({ error }: {
29 | error: any;
```
The update of the next.js version is the only change in that commit, so this seems to be a breaking change in a minor version.
How can I fix this? Ideally without upgrading next.js since we're not ready to go to Next.js 14 yet. The latest minor version on next.js 13 (13.5.7) still has the issue.
### Provide environment information
```bash
Operating System:
Platform: linux
Arch: x64
Version: #47-Ubuntu SMP PREEMPT_DYNAMIC Fri Sep 27 21:40:26 UTC 2024
Binaries:
Node: 18.19.1
npm: 10.9.0
Yarn: 1.22.19
pnpm: N/A
Relevant Packages:
next: 13.5.5
eslint-config-next: 13.5.5
react: 18.2.0
react-dom: 18.2.0
typescript: 5.0.4
Next.js Config:
output: export
```
### Which area(s) are affected? (Select all that apply)
Not sure
### Which stage(s) are affected? (Select all that apply)
next build (local)
### Additional context
works in 13.5.5-canary.2
broken in 13.5.5-canary.3 | bug | low | Critical |
2,647,539,044 | langchain | Unable to count tokens & track usage while using AgentExecutor with AzureChatOpenAI | ### Checked other resources
- [X] I added a very descriptive title to this issue.
- [X] I searched the LangChain documentation with the integrated search.
- [X] I used the GitHub search to find a similar question and didn't find it.
- [X] I am sure that this is a bug in LangChain rather than my code.
- [X] The bug is not resolved by updating to the latest stable version of LangChain (or the specific integration package).
### Example Code
The following is the code I am using
```from pydantic import BaseModel, Field
from langchain.prompts import PromptTemplate
from langchain_core.output_parsers import JsonOutputParser
from langchain_openai import AzureChatOpenAI
from langchain_community.callbacks.manager import get_openai_callback
from tenacity import retry, stop_after_attempt, wait_fixed
import json
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain.agents.format_scratchpad.openai_tools import (
format_to_openai_tool_messages,
)
from langchain.agents.output_parsers.openai_tools import OpenAIToolsAgentOutputParser
from langchain_core.messages import AIMessage, HumanMessage
from langchain.agents import AgentExecutor
AZURE_ENDPOINT = "XXXX"
API_KEY = "XXXX"
DEPLOYMENT_NAME = "gpt_4o"
# Create the Azure Chat Model
llm = AzureChatOpenAI(
azure_endpoint=AZURE_ENDPOINT,
openai_api_version="2024-02-01",
deployment_name=DEPLOYMENT_NAME,
openai_api_key=API_KEY,
openai_api_type="azure",
max_retries= 1,
timeout = 120,
temperature=0.8,
max_tokens = 4000
)
from langchain.agents import tool
@tool
def get_word_length(word: str) -> int:
"""Returns the length of a word."""
return len(word)
tools = [get_word_length]
llm_with_tools = llm.bind_tools(tools)
MEMORY_KEY = "chat_history"
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"You are very powerful assistant, but don't know current events",
),
MessagesPlaceholder(variable_name=MEMORY_KEY),
("user", "{input}"),
MessagesPlaceholder(variable_name="agent_scratchpad"),
]
)
chat_history = []
agent = (
{
"input": lambda x: x["input"],
"agent_scratchpad": lambda x: format_to_openai_tool_messages(
x["intermediate_steps"]
),
"chat_history": lambda x: x["chat_history"]
}
| prompt
| llm_with_tools
| OpenAIToolsAgentOutputParser()
)
callback_handler = TokenUsageCallbackHandler()
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
input1 = "how many letters in the word system?"
result = agent_executor.invoke({"input": input1, "chat_history": chat_history})
chat_history.extend(
[
HumanMessage(content=input1),
AIMessage(content=result["output"]),
]
)
with get_openai_callback() as cb:
agent_executor.invoke({"input": "Is it a real word?", "chat_history": chat_history})
print(f"Total Tokens: {cb.total_tokens}")
print(f"Prompt Tokens: {cb.prompt_tokens}")
print(f"Completion Tokens: {cb.completion_tokens}")
print(f"Total Cost (USD): ${cb.total_cost}")
### Error Message and Stack Trace (if applicable)
Total Tokens: 0
Prompt Tokens: 0
Completion Tokens: 0
Total Cost (USD): $0.0
### Description
I am trying to use langchain and AzureOpenAI to build an agent. However I am unable to trace the cost.
### System Info
System Information
------------------
> OS: Windows
> OS Version: 10.0.19045
> Python Version: 3.10.11 (tags/v3.10.11:7d4cc5a, Apr 5 2023, 00:38:17) [MSC v.1929 64 bit (AMD64)]
Package Information
-------------------
> langchain_core: 0.1.48
> langchain: 0.1.4
> langchain_community: 0.0.16
> langsmith: 0.1.52
> langchain_experimental: 0.0.49
> langchain_openai: 0.1.4
Packages not installed (Not Necessarily a Problem)
--------------------------------------------------
The following packages were not found:
> langgraph
> langserve | 🤖:bug,investigate | low | Critical |
2,647,577,515 | godot | GDScript Editor setting "Highlight Type Safe Lines" not working as intended | ### Tested versions
Reproducible in 4.3.stable
### System information
Linux Ubuntu (KDE plasma)
### Issue description
The setting "highlight type safe code" does not work as intended. When turning on the setting (which it is by default) and changing theme colors ("Line Number Color" green and "Safe Line Number Color" blue) to be better visible, this is the result of my example code.

This behaviour seems inconsistent.
Expected behaviour:
Additionally green highlighting in line 9, 10, 14
I was trying to find the original issue/pull request adding this setting feature to find the intended documented behaviour so I am not sure if I might be misunderstanding this bug report, in that case I will close the issue again.
### Steps to reproduce
1. Go to 'Editor > Editor Settings > Text Editor > Appearance > Gutters > Highlight Type Safe Lines' and enable it.
2. Go to 'Editor > Editor Settings > Text Editor > Theme > Highlighting > Line Number Color' and 'Editor > Editor Settings > Text Editor > Theme > Highlighting > Line Number Color' and change colors to something more vibrant.
3. Create following script:
```gdscript
func hello():
var basecase: int = 42
basecase = 3
var unsafe = 12
unsafe = 4
unsafe = "lol"
var veryunsafe = 24
veryunsafe = "hi"
veryunsafe = 4
```
### Minimal reproduction project (MRP)
[bug-report-type-safe-line-highlighting.zip](https://github.com/user-attachments/files/17692831/bug-report-type-safe-line-highlighting.zip)
| enhancement,discussion,topic:gdscript | low | Critical |
2,647,578,482 | godot | Editor documentation is not updating | ### Tested versions
master
### System information
win10 21H2,Vulkan API 1.2.0
### Issue description


comment out: **//Engine::get_singleton()->add_singleton(Engine::Singleton("WorkerThreadPool", worker_thread_pool));**
start editor, in docs ZdSpace singleton still existed.
Trying to comment out other singleton is the same.
but, I del dir "editor_data/cache" and restart editor, document will be updated.
### Steps to reproduce
look at up
### Minimal reproduction project (MRP)
use commit 1d3c4e192c | bug,topic:editor,documentation | low | Minor |
2,647,601,691 | ui | [feat]: Schedule Component | ### Feature description
It is a component that displays events based on their time and day and places them in the correct place in the table. This explanation is clearer in the attached video below.
https://github.com/user-attachments/assets/533af091-25de-4940-b45b-e2099f35d6c3
### Affected component/components
_No response_
### Additional Context
N/A
### Before submitting
- [X] I've made research efforts and searched the documentation
- [X] I've searched for existing issues and PRs | area: request | low | Minor |
2,647,606,954 | rust | False positive `dead_code` lint when type is only used via destructuring | ### Code
```Rust
struct Foo(usize);
fn get_thing<T>() -> T { todo!() }
pub fn foo() {
let Foo(_x) = get_thing();
// no warning if the type is explicitly used when calling get_thing
// let Foo(_x) = get_thing::<Foo>();
}
```
### Current output
```Shell
warning: struct `Foo` is never constructed
--> src/lib.rs:1:8
|
1 | struct Foo(usize);
| ^^^
|
= note: `#[warn(dead_code)]` on by default
```
### Desired output
```Shell
(nothing)
```
### Rationale and extra context
For context, the specific "get_thing" function I originally ran into this was with [`zerocopy::FromBytes::read_from_bytes`]( https://docs.rs/zerocopy/latest/zerocopy/trait.FromBytes.html#method.read_from_bytes).
### Other cases
```Rust
```
### Rust Version
```Shell
$ rustc --version --verbose
rustc 1.82.0 (f6e511eec 2024-10-15)
binary: rustc
commit-hash: f6e511eec7342f59a25f7c0534f1dbea00d01b14
commit-date: 2024-10-15
host: x86_64-unknown-linux-gnu
release: 1.82.0
LLVM version: 19.1.1
```
### Anything else?
_No response_
<!-- TRIAGEBOT_START -->
<!-- TRIAGEBOT_ASSIGN_START -->
<!-- TRIAGEBOT_ASSIGN_DATA_START$${"user":"m-ysk"}$$TRIAGEBOT_ASSIGN_DATA_END -->
<!-- TRIAGEBOT_ASSIGN_END -->
<!-- TRIAGEBOT_END --> | A-lints,A-diagnostics,T-compiler,L-dead_code | low | Minor |
2,647,608,954 | rust | The run-make/version-verbose-commit-hash test doesn't reliably detect when the commit hash is missing | We recently had pre-nightly builds without a commit hash in the verbose version info (https://github.com/rust-lang/rust/issues/132845). This is despite the fact that we have a [test](https://github.com/rust-lang/rust/blob/5c9cc0cfbb11a97d084e5a09bb47cc5566421460/tests/run-make/version-verbose-commit-hash/rmake.rs#L0-L1) fully dedicated to preventing exactly that. Clearly, something is wrong with the test.
The issue with the test is that it has `//@ needs-git-hash`, which means it gets only run when bootstrap self-reports to compiletest that a git hash is available. This means if there is a bootstrap bug where a hash is never available, the test is skipped. Oopsie. Clearly, we shouldn't trust bootstrap on whether there should be a hash or not.
I don't know what exactly the conditions are under which a hash is legitimately missing. Probably it involves a build from the released tarball, or so?
I would suggest to remove the ``//@ needs-git-hash`` directive (it is only used by that one test), and instead have the test itself decide whether it can run or not. Maybe it can just check whether a `.git` folder exists? Maybe it can check whether it runs on CI, or runs on *our* CI? I don't know all the context here, but I hope it should be possible to independently determine whether the binary is supposed to have a hash.
Or maybe the simplest answer is to change [this](https://github.com/rust-lang/rust/blob/f42fb435179a01bef886bd187576e73bad35e289/src/bootstrap/src/core/build_steps/test.rs#L2121) code to always pass `--git-hash` on CI. But it seems cleaner to me to not have to trust bootstrap when we are testing whether bootstrap did the right thing.
Cc @rust-lang/bootstrap | A-testsuite,T-bootstrap,C-bug,A-run-make | low | Critical |
2,647,650,037 | next.js | [Turbopack] No Media Queries breakpoints in Chrome Device Emulation mode | ### Link to the code that reproduces this issue
https://codesandbox.io/p/devbox/turbopack-forked-plfd58
### To Reproduce
1. Add media quires with CSS Modules
2. Start the dev server via turbopack (next dev --turbo)
3. Open the Chrome and go to the device emulation section in the DevTools
4. Toggle media queries in options

5. Created media queries breakpoints will not shown. But all media queries still work
### Current vs. Expected behavior
Current (With Turbopack):

Expected (If using without Turbopack):

### Provide environment information
```bash
Operating System:
Platform: win32
Arch: x64
Version: Windows 11 Pro
Available memory (MB): 32601
Available CPU cores: 12
Binaries:
Node: 22.11.0
npm: 10.9.0
Yarn: N/A
pnpm: 9.5.0
Relevant Packages:
next: 15.0.3 // Latest available version is detected (15.0.3).
eslint-config-next: 15.0.3
react: 19.0.0-rc-5c56b873-20241107
react-dom: 19.0.0-rc-5c56b873-20241107
typescript: 5.6.3
Next.js Config:
output: N/A
```
### Which area(s) are affected? (Select all that apply)
Turbopack
### Which stage(s) are affected? (Select all that apply)
next dev (local)
### Additional context
The problem is that Turbopack, for example, when using `@media screen and (max-width: 768px)` converts this to `@media screen and (width <= 768px)`. Because of this, Chrome doesn't show media breakpoints | bug,Turbopack,linear: turbopack | low | Major |
2,647,681,696 | vscode | Git - Built in git plugin does not work with symlink | <!-- ⚠️⚠️ Do Not Delete This! bug_report_template ⚠️⚠️ -->
<!-- Please read our Rules of Conduct: https://opensource.microsoft.com/codeofconduct/ -->
<!-- 🕮 Read our guide about submitting issues: https://github.com/microsoft/vscode/wiki/Submitting-Bugs-and-Suggestions -->
<!-- 🔎 Search existing issues to avoid creating duplicates. -->
<!-- 🧪 Test using the latest Insiders build to see if your issue has already been fixed: https://code.visualstudio.com/insiders/ -->
<!-- 💡 Instead of creating your report here, use 'Report Issue' from the 'Help' menu in VS Code to pre-fill useful information. -->
<!-- 🔧 Launch with `code --disable-extensions` to check. -->
Does this issue occur when all extensions are disabled?: No
<!-- 🪓 If you answered No above, use 'Help: Start Extension Bisect' from Command Palette to try to identify the cause. -->
<!-- 📣 Issues caused by an extension need to be reported directly to the extension publisher. The 'Help > Report Issue' dialog can assist with this. -->
Version: 1.94.2 (user setup)
Commit: 384ff7382de624fb94dbaf6da11977bba1ecd427
Date: 2024-10-09T16:08:44.566Z
Electron: 30.5.1
ElectronBuildId: 10262041
Chromium: 124.0.6367.243
Node.js: 20.16.0
V8: 12.4.254.20-electron.0
OS: Windows_NT x64 10.0.22631
Steps to Reproduce:
1. init a directory `A` with git
2. Use `ln -s` to build a soft link from `A` to `B`
3. Use vscode to open folder `B`
If I modify something, I cannot see the built in git plugin work. For example, I cannot see the color bar.
| bug,git | low | Critical |
2,647,691,479 | rust | Documentation uses HRTB lifetime before declaring it | The [`str::rsplit_once`](https://doc.rust-lang.org/nightly/std/primitive.str.html#method.rsplit_once) docs say `<P as Pattern>::Searcher<'a>: for<'a> ReverseSearcher<'a>` where the code it's documenting says `for<'a> P::Searcher<'a>: ReverseSearcher<'a>`.
This almost certainly shows up in other places but this is where I first noticed it. | T-rustdoc,C-bug,A-cross-crate-reexports,A-higher-ranked | low | Minor |
2,647,704,003 | rust | ICE: `called 'Result::unwrap_err()' on an 'Ok' value: MethodCallee ` | <!--
ICE: Rustc ./BB46933371FE1A9056B1FE9FA982D37C278851295F07AEE8A85D56116A733DD5.rs '' 'thread 'rustc' panicked at compiler/rustc_hir_typeck/src/op.rs:593:30: 'called `Result::unwrap_err()` on an `Ok` value: MethodCallee { def_id: DefId(2:3465 ~ core[c9a4]::ops::arith::Add::add), args: [&'?1 N/#0, N/#0], sig: fn(&'?1 N/#0, N/#0) -> ?4t }'', 'thread 'rustc' panicked at compiler/rustc_hir_typeck/src/op.rs:593:30: 'called `Result::unwrap_err()` on an `Ok` value: MethodCallee { def_id: DefId(2:3465 ~ core[c9a4]::ops::arith::Add::add), args: [&'?1 N/#0, N/#0], sig: fn(&'?1 N/#0, N/#0) -> ?4t }''
File: /home/gh-matthiaskrgr/im/BB46933371FE1A9056B1FE9FA982D37C278851295F07AEE8A85D56116A733DD5.rs
-->
auto-reduced (treereduce-rust):
````rust
use std::ops::Add;
pub trait Numoid
where
Self: Sized,
for<N: Numoid> &'a Self: Add<Self>,
{
}
pub fn compute<N: Numoid>(a: N, b: N) -> N {
&a + b
}
````
original:
````rust
use std::ops::Add;
pub trait Numoid where
Self: Sized,
for<N: Numoid> &'a Self: Add<Self, Output = Self>
{}
impl<N> Numoid for N where
for<'a> &'a N: Add<N, Output = N>
{}
pub fn compute<N: Numoid>(a: N, b: N) -> N
// where for<'a> &'a N: Add<N, Output = N> // redundant bound is required
{ &a + b }
````
Version information
````
rustc 1.84.0-nightly (c22887b4d 2024-11-10)
binary: rustc
commit-hash: c22887b4d97400e8e024e19fb5f724eda65ad58d
commit-date: 2024-11-10
host: x86_64-unknown-linux-gnu
release: 1.84.0-nightly
LLVM version: 19.1.3
````
Possibly related line of code:
https://github.com/rust-lang/rust/blob/c22887b4d97400e8e024e19fb5f724eda65ad58d/compiler/rustc_hir_typeck/src/op.rs#L587-L599
Command:
`/home/gh-matthiaskrgr/.rustup/toolchains/master/bin/rustc `
<details><summary><strong>Program output</strong></summary>
<p>
```
error[E0261]: use of undeclared lifetime name `'a`
--> /tmp/icemaker_global_tempdir.8MhAhK5jiuLv/rustc_testrunner_tmpdir_reporting.CRfxbVTvebpq/mvce.rs:6:21
|
6 | for<N: Numoid> &'a Self: Add<Self>,
| ^^ undeclared lifetime
|
= note: for more information on higher-ranked polymorphism, visit https://doc.rust-lang.org/nomicon/hrtb.html
help: consider making the bound lifetime-generic with a new `'a` lifetime
|
6 | for<'a, N: Numoid> &'a Self: Add<Self>,
| +++
help: consider introducing lifetime `'a` here
|
3 | pub trait Numoid<'a>
| ++++
error[E0658]: only lifetime parameters can be used in this context
--> /tmp/icemaker_global_tempdir.8MhAhK5jiuLv/rustc_testrunner_tmpdir_reporting.CRfxbVTvebpq/mvce.rs:6:9
|
6 | for<N: Numoid> &'a Self: Add<Self>,
| ^
|
= note: see issue #108185 <https://github.com/rust-lang/rust/issues/108185> for more information
= help: add `#![feature(non_lifetime_binders)]` to the crate attributes to enable
= note: this compiler was built on 2024-11-10; consider upgrading it if it is out of date
error: bounds cannot be used in this context
--> /tmp/icemaker_global_tempdir.8MhAhK5jiuLv/rustc_testrunner_tmpdir_reporting.CRfxbVTvebpq/mvce.rs:6:12
|
6 | for<N: Numoid> &'a Self: Add<Self>,
| ^^^^^^
error[E0601]: `main` function not found in crate `mvce`
--> /tmp/icemaker_global_tempdir.8MhAhK5jiuLv/rustc_testrunner_tmpdir_reporting.CRfxbVTvebpq/mvce.rs:12:2
|
12 | }
| ^ consider adding a `main` function to `/tmp/icemaker_global_tempdir.8MhAhK5jiuLv/rustc_testrunner_tmpdir_reporting.CRfxbVTvebpq/mvce.rs`
thread 'rustc' panicked at compiler/rustc_hir_typeck/src/op.rs:593:30:
called `Result::unwrap_err()` on an `Ok` value: MethodCallee { def_id: DefId(2:3465 ~ core[c9a4]::ops::arith::Add::add), args: [&'?1 N/#0, N/#0], sig: fn(&'?1 N/#0, N/#0) -> ?4t }
stack backtrace:
0: 0x71bc56a6ea2a - <std::sys::backtrace::BacktraceLock::print::DisplayBacktrace as core::fmt::Display>::fmt::h83ad5e2a379fc4af
1: 0x71bc57204126 - core::fmt::write::hd8ade6666fd2c40a
2: 0x71bc5820c551 - std::io::Write::write_fmt::h7d2b379dca789286
3: 0x71bc56a6e882 - std::sys::backtrace::BacktraceLock::print::h59e64fca617eeec6
4: 0x71bc56a70d86 - std::panicking::default_hook::{{closure}}::h16156878543f93cc
5: 0x71bc56a70bd0 - std::panicking::default_hook::hbaee2e322f8f0f8f
6: 0x71bc55ae8e89 - std[248d60aad2b34f5b]::panicking::update_hook::<alloc[51073ec307549381]::boxed::Box<rustc_driver_impl[85a2628807024745]::install_ice_hook::{closure#0}>>::{closure#0}
7: 0x71bc56a71498 - std::panicking::rust_panic_with_hook::h1585e6a773a078df
8: 0x71bc56a7126a - std::panicking::begin_panic_handler::{{closure}}::h3c7880b1e47691e7
9: 0x71bc56a6eed9 - std::sys::backtrace::__rust_end_short_backtrace::hfac10c8ba5512bd0
10: 0x71bc56a70f2c - rust_begin_unwind
11: 0x71bc534d2650 - core::panicking::panic_fmt::h8ce9768a1a123ba1
12: 0x71bc53986e46 - core::result::unwrap_failed::h51824eae6f4049d1
13: 0x71bc573eb52a - <rustc_hir_typeck[d66e89de08caab9]::fn_ctxt::FnCtxt>::check_overloaded_binop
14: 0x71bc57fe139e - <rustc_hir_typeck[d66e89de08caab9]::fn_ctxt::FnCtxt>::check_expr_with_expectation_and_args
15: 0x71bc57fd9659 - <rustc_hir_typeck[d66e89de08caab9]::fn_ctxt::FnCtxt>::check_block_with_expected
16: 0x71bc57fdf9e4 - <rustc_hir_typeck[d66e89de08caab9]::fn_ctxt::FnCtxt>::check_expr_with_expectation_and_args
17: 0x71bc5741085b - rustc_hir_typeck[d66e89de08caab9]::check::check_fn
18: 0x71bc5740632c - rustc_hir_typeck[d66e89de08caab9]::typeck
19: 0x71bc57405cd3 - rustc_query_impl[edd1d74896ea38c9]::plumbing::__rust_begin_short_backtrace::<rustc_query_impl[edd1d74896ea38c9]::query_impl::typeck::dynamic_query::{closure#2}::{closure#0}, rustc_middle[dc17d8c02b0af06a]::query::erase::Erased<[u8; 8usize]>>
20: 0x71bc578b6681 - rustc_query_system[16014b61255515c2]::query::plumbing::try_execute_query::<rustc_query_impl[edd1d74896ea38c9]::DynamicConfig<rustc_query_system[16014b61255515c2]::query::caches::VecCache<rustc_span[966ebffeac22211b]::def_id::LocalDefId, rustc_middle[dc17d8c02b0af06a]::query::erase::Erased<[u8; 8usize]>>, false, false, false>, rustc_query_impl[edd1d74896ea38c9]::plumbing::QueryCtxt, false>
21: 0x71bc578b4d8d - rustc_query_impl[edd1d74896ea38c9]::query_impl::typeck::get_query_non_incr::__rust_end_short_backtrace
22: 0x71bc578b4a07 - <rustc_middle[dc17d8c02b0af06a]::hir::map::Map>::par_body_owners::<rustc_hir_analysis[aca35e167d413358]::check_crate::{closure#4}>::{closure#0}
23: 0x71bc578b29cf - rustc_hir_analysis[aca35e167d413358]::check_crate
24: 0x71bc578a310a - rustc_interface[1e17fd868ff3e64c]::passes::run_required_analyses
25: 0x71bc5800141e - rustc_interface[1e17fd868ff3e64c]::passes::analysis
26: 0x71bc580013ef - rustc_query_impl[edd1d74896ea38c9]::plumbing::__rust_begin_short_backtrace::<rustc_query_impl[edd1d74896ea38c9]::query_impl::analysis::dynamic_query::{closure#2}::{closure#0}, rustc_middle[dc17d8c02b0af06a]::query::erase::Erased<[u8; 1usize]>>
27: 0x71bc581d5d2e - rustc_query_system[16014b61255515c2]::query::plumbing::try_execute_query::<rustc_query_impl[edd1d74896ea38c9]::DynamicConfig<rustc_query_system[16014b61255515c2]::query::caches::SingleCache<rustc_middle[dc17d8c02b0af06a]::query::erase::Erased<[u8; 1usize]>>, false, false, false>, rustc_query_impl[edd1d74896ea38c9]::plumbing::QueryCtxt, false>
28: 0x71bc581d5a0e - rustc_query_impl[edd1d74896ea38c9]::query_impl::analysis::get_query_non_incr::__rust_end_short_backtrace
29: 0x71bc580cc93a - rustc_interface[1e17fd868ff3e64c]::interface::run_compiler::<core[c9a4dec56b015974]::result::Result<(), rustc_span[966ebffeac22211b]::ErrorGuaranteed>, rustc_driver_impl[85a2628807024745]::run_compiler::{closure#0}>::{closure#1}
30: 0x71bc58183410 - std[248d60aad2b34f5b]::sys::backtrace::__rust_begin_short_backtrace::<rustc_interface[1e17fd868ff3e64c]::util::run_in_thread_with_globals<rustc_interface[1e17fd868ff3e64c]::util::run_in_thread_pool_with_globals<rustc_interface[1e17fd868ff3e64c]::interface::run_compiler<core[c9a4dec56b015974]::result::Result<(), rustc_span[966ebffeac22211b]::ErrorGuaranteed>, rustc_driver_impl[85a2628807024745]::run_compiler::{closure#0}>::{closure#1}, core[c9a4dec56b015974]::result::Result<(), rustc_span[966ebffeac22211b]::ErrorGuaranteed>>::{closure#0}, core[c9a4dec56b015974]::result::Result<(), rustc_span[966ebffeac22211b]::ErrorGuaranteed>>::{closure#0}::{closure#0}, core[c9a4dec56b015974]::result::Result<(), rustc_span[966ebffeac22211b]::ErrorGuaranteed>>
31: 0x71bc5818382b - <<std[248d60aad2b34f5b]::thread::Builder>::spawn_unchecked_<rustc_interface[1e17fd868ff3e64c]::util::run_in_thread_with_globals<rustc_interface[1e17fd868ff3e64c]::util::run_in_thread_pool_with_globals<rustc_interface[1e17fd868ff3e64c]::interface::run_compiler<core[c9a4dec56b015974]::result::Result<(), rustc_span[966ebffeac22211b]::ErrorGuaranteed>, rustc_driver_impl[85a2628807024745]::run_compiler::{closure#0}>::{closure#1}, core[c9a4dec56b015974]::result::Result<(), rustc_span[966ebffeac22211b]::ErrorGuaranteed>>::{closure#0}, core[c9a4dec56b015974]::result::Result<(), rustc_span[966ebffeac22211b]::ErrorGuaranteed>>::{closure#0}::{closure#0}, core[c9a4dec56b015974]::result::Result<(), rustc_span[966ebffeac22211b]::ErrorGuaranteed>>::{closure#1} as core[c9a4dec56b015974]::ops::function::FnOnce<()>>::call_once::{shim:vtable#0}
32: 0x71bc581842f9 - std::sys::pal::unix::thread::Thread::new::thread_start::hd6ed3da90976b7d6
33: 0x71bc5209ca94 - start_thread
at ./nptl/pthread_create.c:447:8
34: 0x71bc52129c3c - clone3
at ./misc/../sysdeps/unix/sysv/linux/x86_64/clone3.S:78
35: 0x0 - <unknown>
error: the compiler unexpectedly panicked. this is a bug.
note: we would appreciate a bug report: https://github.com/rust-lang/rust/issues/new?labels=C-bug%2C+I-ICE%2C+T-compiler&template=ice.md
note: please make sure that you have updated to the latest nightly
note: rustc 1.84.0-nightly (c22887b4d 2024-11-10) running on x86_64-unknown-linux-gnu
query stack during panic:
#0 [typeck] type-checking `compute`
#1 [analysis] running analysis passes on this crate
end of query stack
error: aborting due to 4 previous errors
Some errors have detailed explanations: E0261, E0601, E0658.
For more information about an error, try `rustc --explain E0261`.
```
</p>
</details>
<!--
query stack:
#0 [typeck] type-checking `compute`
#1 [analysis] running analysis passes on this crate
--> | I-ICE,P-low,T-compiler,C-bug,S-bug-has-test | low | Critical |
2,647,729,488 | tauri | [bug] Run tauri in Linux container over ssh -X fails | ### Describe the bug
I am building and running it is a Linux Container (incus) using ssh with X forwarding ssh -X name@host.
I have no idea where to look for the problem, every other X application works using X forwarding.
Where do I look for the problem?
Info Watching /home/svvs/tauri-app/src-tauri for changes...
Finished `dev` profile [unoptimized + debuginfo] target(s) in 8.18s
Gtk-Message: 20:53:51.344: Failed to load module "canberra-gtk-module"
Gtk-Message: 20:53:51.347: Failed to load module "canberra-gtk-module"
libEGL warning: DRI3: failed to query the version
libEGL warning: DRI2: failed to authenticate
(tauri-app:373): Gdk-WARNING **: 20:53:51.413: The program 'tauri-app' received an X Window System error.
This probably reflects a bug in the program.
The error was 'BadRequest (invalid request code or no such operation)'.
(Details: serial 282 error_code 1 request_code 155 (unknown) minor_code 1)
(Note to programmers: normally, X errors are reported asynchronously;
that is, you will receive the error a while after causing it.
To debug your program, run it with the GDK_SYNCHRONIZE environment
variable to change this behavior. You can then get a meaningful
backtrace from your debugger if you break on the gdk_x_error() function.)
### Reproduction
I create a incus container
incus init images:debian/12 tauri
I install rust and then tauri according to the directions
Then run $ cargo tauri dev
### Expected behavior
I expect to see a tauri window popup
### Full `tauri info` output
```text
svvs@guirust:~$ cargo tauri info
[✔] Environment
- OS: Debian 12.0.0 x86_64 (X64)
✔ webkit2gtk-4.1: 2.46.3
✔ rsvg2: 2.54.7
✔ rustc: 1.82.0 (f6e511eec 2024-10-15)
✔ cargo: 1.82.0 (8f40fc59f 2024-08-21)
✔ rustup: 1.27.1 (54dd3d00f 2024-04-24)
✔ Rust toolchain: stable-x86_64-unknown-linux-gnu (environment override by RUSTUP_TOOLCHAIN)
- node: 18.19.0
- npm: 9.2.0
[-] Packages
- tauri 🦀: 2.1.0
- tauri-build 🦀: 2.0.3
- wry 🦀: 0.47.0
- tao 🦀: 0.30.6
- tauri-cli 🦀: 2.0.4
- @tauri-apps/api : 2.1.0
- @tauri-apps/cli : 2.0.5 (outdated, latest: 2.1.0)
[-] Plugins
- tauri-plugin-shell 🦀: 2.0.2
- @tauri-apps/plugin-shell : 2.0.1
[-] App
- build-type: bundle
- CSP: unset
- frontendDist: ../dist
- devUrl: http://localhost:1420/
- framework: Vue.js
- bundler: Vite
```
### Stack trace
```text
No stack trace
```
### Additional context
_No response_ | type: bug,platform: Linux,status: needs triage | low | Critical |
2,647,753,387 | neovim | Windows and Linux Remote Wezterm Rendering Bug | ### Problem
When scrolling horizontally - graphical artifacts left behind if line is longer than screen width.
Sometimes happens when scrolling vertically.
Artifacts can be seen throughout text field, but usually appear in left column where line numbers are housed.
Refer to:
https://github.com/wez/wezterm/issues/6314
After playing with configurations with `nvim --clean` - I observed less artifacts with `set nonumber` and more with `set number`
### Steps to reproduce
Open Wezterm
SSH to a remote Linux server
Open file with characters on one line longer than the width of the window in Neovim with clean or not
Scroll right until horizontal scrolling is triggered
### Expected behavior
No artifacts should be left behind on scrolling.
### Nvim version (nvim -v)
.10.2
### Vim (not Nvim) behaves the same?
no
### Operating system/version
Windows or WSL Ubuntu local, Rhel 7, 8, 9 remote via ssh
### Terminal name/version
Wezterm/20241104-083404-51c794ac and current release: 20241015-083151-9ddca7bd
### $TERM environment variable
xterm-256color
### Installation
Both Wezterm builds via Windows and Ubuntu and installed via zip or tar. Version .10.2
Vulkan and DX12 Discreet GPU. | bug,tui,display | low | Critical |
2,647,765,105 | next.js | Turbopack loader ignores resource query on aliased imports | ### Link to the code that reproduces this issue
https://github.com/sam3d/turbopack-resource-query-repro
### To Reproduce
Start the application usng `pnpm dev` (turbopack is enabled by default)
### Current vs. Expected behavior
I'd expect both a relative and aliased import to behave the same way:
```tsx
import isHello from "./example.txt?hello";
import shouldBeWorld from "@/app/example.txt?world";
export default async function Page() {
return (
<ul>
<li>isHello: {isHello}</li>
<li>shouldBeWorld: {shouldBeWorld}</li>
</ul>
);
}
```
However they do not, as the resource query is not passed to the loader when accessing with `@/app/...`:
<img width="336" alt="image" src="https://github.com/user-attachments/assets/b96cd793-3b50-4189-9d0c-e5757f78bd0b">
### Provide environment information
```bash
Operating System:
Platform: darwin
Arch: arm64
Version: Darwin Kernel Version 24.0.0: Mon Aug 12 20:51:54 PDT 2024; root:xnu-11215.1.10~2/RELEASE_ARM64_T6000
Available memory (MB): 65536
Available CPU cores: 10
Binaries:
Node: 20.11.1
npm: 10.2.4
Yarn: 1.22.19
pnpm: 9.12.3
Relevant Packages:
next: 15.0.4-canary.4 // Latest available version is detected (15.0.4-canary.4).
eslint-config-next: 15.0.4-canary.4
react: 19.0.0-rc-66855b96-20241106
react-dom: 19.0.0-rc-66855b96-20241106
typescript: 5.6.3
Next.js Config:
output: N/A
```
### Which area(s) are affected? (Select all that apply)
Turbopack
### Which stage(s) are affected? (Select all that apply)
next dev (local)
### Additional context
_No response_ | bug,Turbopack | low | Minor |
2,647,796,056 | pytorch | Code Bug: Strange/Unexpected Logic in FSDP _recursive_wrap | ### 🐛 Describe the bug
In `torch/distributed/fsdp/wrap.py`, the recursive wrap function `_recursive_wrap` is defined as below in current version:
```python
def _recursive_wrap(
module: nn.Module,
auto_wrap_policy: Callable,
wrapper_cls: Callable,
ignored_modules: Set[nn.Module],
ignored_params: Set[nn.Parameter],
only_wrap_children: bool = False,
**kwargs: Any,
) -> Tuple[nn.Module, int]:
"""
Wraps submodules of ``module`` for which ``auto_wrap_policy`` returns
``True`` with ``wrapper_cls``.
Args:
module (nn.Module): Module to recursively wrap.
auto_wrap_policy (Callable): A callable representing a policy that
determines which modules to recursively wrap with ``wrapper_cls``.
ignored_modules (Set[torch.nn.Module]): Modules to ignore when
wrapping.
ignored_params (Set[torch.nn.Parameter]): Parameters to ignore when
wrapping; these should be the parameters contained in the modules
in ``ignored_modules``.
Returns:
(nn.Module, int):
``module`` after wrapping and the numel recursively wrapped.
"""
assert auto_wrap_policy is not None, "Must specify auto_wrap_policy."
assert wrapper_cls is not None, "Must specify wrapper_cls"
# Make sure no child is already wrapped.
for _, child in module.named_modules():
if child in ignored_modules:
continue
try:
assert not isinstance(child, cast(type, wrapper_cls))
except TypeError:
# wrapper_cls is a function as opposed to a class type, just bypass above check.
pass
# We count all params, assuming none of them are already wrapped.
nonwrapped_numel = sum(
p.numel() for p in module.parameters() if p not in ignored_params
)
assert auto_wrap_policy is not None
if auto_wrap_policy(module=module, recurse=True, nonwrapped_numel=nonwrapped_numel):
total_wrapped_numel = 0
# Iterate through the children, recursively wrap if necessary
for name, child in module.named_children():
if child in ignored_modules:
continue
wrapped_child, num_wrapped_params = _recursive_wrap(
module=child,
auto_wrap_policy=auto_wrap_policy,
wrapper_cls=wrapper_cls,
ignored_modules=ignored_modules,
ignored_params=ignored_params,
**kwargs,
)
setattr(module, name, wrapped_child)
# Keep track of how many parameters have been wrapped
total_wrapped_numel += num_wrapped_params
# decide if we need to wrap the current module,
# since the left over parameters exceed the number of params to wrap
remainder = nonwrapped_numel - total_wrapped_numel
# ---------- MARK HERE ----------
if not only_wrap_children and auto_wrap_policy(
module=module, recurse=False, nonwrapped_numel=remainder
):
# Leaf node or final wrapping of the remainder both happen here.
return _wrap(module, wrapper_cls, **kwargs), nonwrapped_numel
else:
return module, total_wrapped_numel
return module, 0
```
We can observe that: the `if` clause at the marked place, which checks the policy when `recurse=False`, is **inside** of the `if` clause which checks with `recurse=True`. Therefore, if the policy just rejects to recurse to the children modules of the current module, the current module **itself** will also not be wrapped.
This behavior seems contradicts with the expected behavior when using the default `size_based_wrap_policy`:
```python
def size_based_auto_wrap_policy(
module: nn.Module,
recurse: bool,
nonwrapped_numel: int,
# Additional custom arguments
min_num_params: int = int(1e8),
force_leaf_modules: Optional[Set[Type[nn.Module]]] = None,
exclude_wrap_modules: Optional[Set[Type[nn.Module]]] = None,
) -> bool:
"""
A size-based auto wrap policy.
Args:
module (nn.Module): Current module being considered.
recurse (bool): If ``False``, then this function must decide whether
``module`` should be wrapped as an FSDP instance or not. If
``True``, then the function is still recursing down the module
tree as a part of the DFS.
nonwrapped_numel (int): Parameter numel not yet wrapped.
min_num_params (int): Customizable policy input that controls the size
threshold over which a module is ready to be wrapped. This is in
units of numel.
force_leaf_modules (Set[Type[nn.Module]]): Set of module types to keep
as leaves, i.e. their children will never be wrapped.
exclude_wrap_modules (Set[Type[nn.Module]]): Set of module types to be
excluded in wrapping.
Returns:
Whether ``module`` should be wrapped.
"""
force_leaf_modules = (
size_based_auto_wrap_policy.FORCE_LEAF_MODULES # type: ignore[attr-defined]
if force_leaf_modules is None
else force_leaf_modules
)
exclude_wrap_modules = (
size_based_auto_wrap_policy.EXCLUDE_WRAP_MODULES # type: ignore[attr-defined]
if exclude_wrap_modules is None
else exclude_wrap_modules
)
# Keep the argument `min_num_params` for BC for now, but it represents the
# minimum non-wrapped *numel* before triggering a wrapping
min_nonwrapped_numel = min_num_params
is_large = nonwrapped_numel >= min_nonwrapped_numel
# ---------- MARK HERE ----------
if recurse:
# We should recurse if the module is big enough but not in force_leaf_modules list.
return is_large and not isinstance(module, tuple(force_leaf_modules))
else:
# If we are not recursing, determine if we should wrap.
return is_large and not isinstance(module, tuple(exclude_wrap_modules))
```
The marked line checks whether the current module is one of the `force_leaf_modules`. However, as this check always returns `False` when `recurse=True`, the check with `recurse=False` will directly be skipped, and therefore the current module will **never** be wrapped by `return _wrap(module, wrapper_cls, **kwargs), nonwrapped_numel`. This contradicts with the expected behavior that this module should be wrapped as a leaf node.
One possible correct implementation is to move out the inner if clause:
```python
def _recursive_wrap(
module: nn.Module,
auto_wrap_policy: Callable,
wrapper_cls: Callable,
ignored_modules: Set[nn.Module],
ignored_params: Set[nn.Parameter],
only_wrap_children: bool = False,
**kwargs: Any,
) -> Tuple[nn.Module, int]:
"""
Wraps submodules of ``module`` for which ``auto_wrap_policy`` returns
``True`` with ``wrapper_cls``.
Args:
module (nn.Module): Module to recursively wrap.
auto_wrap_policy (Callable): A callable representing a policy that
determines which modules to recursively wrap with ``wrapper_cls``.
ignored_modules (Set[torch.nn.Module]): Modules to ignore when
wrapping.
ignored_params (Set[torch.nn.Parameter]): Parameters to ignore when
wrapping; these should be the parameters contained in the modules
in ``ignored_modules``.
Returns:
(nn.Module, int):
``module`` after wrapping and the numel recursively wrapped.
"""
assert auto_wrap_policy is not None, "Must specify auto_wrap_policy."
assert wrapper_cls is not None, "Must specify wrapper_cls"
# Make sure no child is already wrapped.
for _, child in module.named_modules():
if child in ignored_modules:
continue
try:
assert not isinstance(child, cast(type, wrapper_cls))
except TypeError:
# wrapper_cls is a function as opposed to a class type, just bypass above check.
pass
# We count all params, assuming none of them are already wrapped.
nonwrapped_numel = sum(
p.numel() for p in module.parameters() if p not in ignored_params
)
assert auto_wrap_policy is not None
total_wrapped_numel = 0
if auto_wrap_policy(module=module, recurse=True, nonwrapped_numel=nonwrapped_numel):
# Iterate through the children, recursively wrap if necessary
for name, child in module.named_children():
if child in ignored_modules:
continue
wrapped_child, num_wrapped_params = _recursive_wrap(
module=child,
auto_wrap_policy=auto_wrap_policy,
wrapper_cls=wrapper_cls,
ignored_modules=ignored_modules,
ignored_params=ignored_params,
**kwargs,
)
setattr(module, name, wrapped_child)
# Keep track of how many parameters have been wrapped
total_wrapped_numel += num_wrapped_params
# decide if we need to wrap the current module,
# since the left over parameters exceed the number of params to wrap
# MODIFIED HERE: moved out this if
remainder = nonwrapped_numel - total_wrapped_numel
if not only_wrap_children and auto_wrap_policy(
module=module, recurse=False, nonwrapped_numel=remainder
):
# Leaf node or final wrapping of the remainder both happen here.
return _wrap(module, wrapper_cls, **kwargs), nonwrapped_numel
else:
return module, total_wrapped_numel
return module, 0
```
### Versions
All the recent versions, including main, 2.5, 2.4, etc.
cc @H-Huang @awgu @kwen2501 @wanchaol @fegin @fduwjj @wz337 @wconstab @d4l3k @c-p-i-o @zhaojuanmao @mrshenli @rohan-varma @chauhang | oncall: distributed,module: fsdp | low | Critical |
2,647,806,052 | vscode | Can’t use Cmd-W to close editors/windows on iPad | Hi, not sure if this is a Safari, iPadOS or VSCode issue, but as the title says, pressing Cmd-W doesn’t do anything on iPadOS (using vscode.dev as a PWA).
iPad Pro 13-inch (M4), iPadOS 18.1.
Can’t add a custom shortcut either, it doesn’t take Cmd-W (creating a shortcut for other key combos such as Cmd-E seems to work fine).
Version: 1.95.2
Commit: e8653663e8840adaf45af01eab5c627a5af81807
User Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/18.1 Safari/605.1.15
Embedder: codespaces
<!-- generated by web issue reporter --> | bug,keybindings,ios-ipados | low | Minor |
2,647,864,282 | godot | Layout / position of children of SubViewportContainer not editable | ### Tested versions
- Reproducible in v4.4.dev.custom_build [40e7fe519]
### System information
Godot v4.4.dev (40e7fe519) - Windows 10.0.19045 - Multi-window, 1 monitor - Vulkan (Forward+) - dedicated Radeon RX 580 Series (Advanced Micro Devices, Inc.; 31.0.21912.14) - AMD Ryzen 5 2600 Six-Core Processor (12 threads)
### Issue description
It is not possible to position other children of a SubViewportContainer other than the SubViewport using the editor. It is however possible to do so via script. If more than the SubViewport child is inside the SubViewportContainer it seems like the preview in the editor is broken as well as the layout of nested nodes in the other SubViewportContainer child node.

In the image above the selected red area should be positioned inside the `vp_container` node with its layout intact (notice the space between the left and right side of the nested nodes)
Running the project reveals the following result:

The layout of the nested nodes is lost. This can be circumvented by running
```gd
set_anchors_and_offsets_preset(Control.PRESET_FULL_RECT, Control.PRESET_MODE_MINSIZE, 2)
```
on the ``viewport_hbox`` node. This is also done in the C++ source code for the [Advanced import settings dialog](https://github.com/godotengine/godot/blob/0f5f3bc9546b46b2029fc8896dc859697f1eab97/editor/import/3d/scene_import_settings.cpp#L1621-L1778). Another option would be to save the ``viewport_hbox`` node as a separate scene and edit the layout there to full rect anchor.
Both are workarounds though. This should be possible in-editor as well. The SubviewportContainer doesn't offer these solutions though and this bug results.
### Steps to reproduce
1. Open the MRP
2. Try to move the position / layout of the children inside the ``vp_container`` node.
### Minimal reproduction project (MRP)
[MRP-SubViewportContainer-Bug.zip](https://github.com/user-attachments/files/17693924/MRP-SubViewportContainer-Bug.zip)
| bug,topic:gui | low | Critical |
2,647,866,616 | langchain | DOC: DSPy integration with LangChain | ### URL
https://python.langchain.com/docs/integrations/providers/dspy/
### Checklist
- [X] I added a very descriptive title to this issue.
- [X] I included a link to the documentation page I am referring to (if applicable).
### Issue with current documentation:
Hi,
I noticed from DSPy side, this integration is no longer supported.
[DSPy reference](https://github.com/stanfordnlp/dspy/blob/main/examples/tweets/compiling_langchain.ipynb)
As mentioned in the notebook:
> This integration with LangChain is no longer supported.
May I clarify if it is same from LangChain side? Does this integration still supported? Thanks in advance.
### Idea or request for content:
_No response_ | 🤖:docs | low | Minor |
2,647,944,860 | pytorch | Cannot export FeaStConv-based network to onnx via torch.jit.script | ### 🐛 Describe the bug
Hello!
I planned to export a network based on graph neural network as an onnx model, but it always failed. Then I found that exporting a single-layer network `torch_geometric.nn.FeaStConv` to onnx would also fail. I don’t know if any of you have encountered similar problems. I would like to know why and how to solve them.
Thanks!
what I run:
```python
import torch.onnx
from torch_geometric.nn import FeaStConv
from torch_geometric.data import Data
class GNNModule1(torch.nn.Module):
def __init__(self, in_channel=3):
super(GNNModule1, self).__init__()
self.l_conv1 = FeaStConv(in_channel, 32, 9).jittable()
def forward(self, x, edge_index, edge_weight):
x = self.l_conv1(x, edge_index)
return x
if __name__ =="__main__":
num_nodes = 4
in_channels = 3
x = torch.randn((num_nodes, in_channels))
edge_index = torch.tensor([
[0, 1, 2, 3], # 边的起始节点
[1, 2, 3, 0] # 边的终止节点
], dtype=torch.long)
edge_weight = torch.tensor([1.0, 0.5, 1.2, 0.8], dtype=torch.float)
data = Data(x=x, edge_index=edge_index, edge_weight=edge_weight)
model = GNNModule1(in_channel=in_channels)
model.eval()
# output = model(data.x, data.edge_index, data.edge_weight)
# print("Output:", output)
model = torch.jit.script(model) # comment this line will be success
onnx_path = "gnn_model.onnx"
try:
torch.onnx.export(
model, # 脚本化后的模型
(data.x, data.edge_index, data.edge_weight), # 模型输入
onnx_path, # 输出文件路径
input_names=['x', 'edge_index', 'edge_weight'], # 输入的名称
output_names=['out'], # 输出的名称
verbose=True,
opset_version=15 # ONNX opset 版本
)
print(f"ONNX model saved to {onnx_path}")
except Exception as e:
print("Error during ONNX export:", e)
```
what I got:
```shell
[W C:\actions-runner\_work\pytorch\pytorch\builder\windows\pytorch\torch\csrc\jit\passes\lower_tuples.cpp:254] Warning: tuple appears in the op outputs, but this op does not forward tuples, unsupported kind: prim::unchecked_cast (function flattenOutputs)
Torch IR graph at exception: graph(%x.1 : Float(4, 3, strides=[3, 1], requires_grad=0, device=cpu),
%edge_index.1 : Long(2, 4, strides=[4, 1], requires_grad=0, device=cpu),
%edge_weight : Float(4, strides=[1], requires_grad=0, device=cpu),
%l_conv1.u.weight : Float(9, 3, strides=[3, 1], requires_grad=0, device=cpu),
%l_conv1.c : Float(9, strides=[1], requires_grad=0, device=cpu),
%l_conv1.lin.weight : Float(288, 3, strides=[3, 1], requires_grad=0, device=cpu),
%l_conv1.bias : Float(32, strides=[1], requires_grad=0, device=cpu)):
......
%548 : int = aten::dim(%out.9), scope: GNNModule1::/torch_geometric.nn.aggr.basic.MeanAggregation::aggr_module # D:\ProgramData\anaconda3\envs\Mesh_Net\lib\site-packages\torch_geometric\utils\scatter.py:19:21
%size.17 : int[] = aten::mul(%10, %548), scope: GNNModule1::/torch_geometric.nn.aggr.basic.MeanAggregation::aggr_module # D:\ProgramData\anaconda3\envs\Mesh_Net\lib\site-packages\torch_geometric\utils\scatter.py:19:15
%550 : int[] = aten::_set_item(%size.17, %514, %19), scope: GNNModule1::/torch_geometric.nn.aggr.basic.MeanAggregation::aggr_module # D:\ProgramData\anaconda3\envs\Mesh_Net\lib\site-packages\torch_geometric\utils\scatter.py:20:8
%551 : Tensor = aten::view(%count.13, %size.17), scope: GNNModule1::/torch_geometric.nn.aggr.basic.MeanAggregation::aggr_module # D:\ProgramData\anaconda3\envs\Mesh_Net\lib\site-packages\torch_geometric\utils\scatter.py:21:15
%552 : Tensor = aten::expand_as(%551, %out.9), scope: GNNModule1::/torch_geometric.nn.aggr.basic.MeanAggregation::aggr_module # D:\ProgramData\anaconda3\envs\Mesh_Net\lib\site-packages\torch_geometric\utils\scatter.py:21:15
%553 : Tensor = aten::div(%out.9, %552), scope: GNNModule1::/torch_geometric.nn.aggr.basic.MeanAggregation::aggr_module # D:\ProgramData\anaconda3\envs\Mesh_Net\lib\site-packages\torch_geometric\utils\scatter.py:84:19
-> (%553)
%bias.3 : Tensor = prim::GetAttr[name="bias"](%l_conv1), scope: GNNModule1::
%x.5 : Tensor = aten::add(%out.7, %bias.3, %31), scope: GNNModule1:: # C:\Users\WooChi\AppData\Local\Temp\WooChi_pyg\tmpc6s_oinc.py:247:18
return (%x.5)
Error during ONNX export: tuple use not matched to tuple construct. Instead found: prim::Return
```
It seems to be an error caused by outputting a tuple (the log is very long and complicated, forgive me for not understanding it), but the output of the network is actually a tensor, and when `torch.jit.script` is not used, it can be exported normally as onnx model.
(Actually, I searched issues and found #41548 which is similar but seems different.)
### Versions
My environment:
```shell
python collect_env.py
Collecting environment information...
PyTorch version: 1.13.0+cu116
Is debug build: False
CUDA used to build PyTorch: 11.6
ROCM used to build PyTorch: N/A
OS: Microsoft Windows 11 专业版 (10.0.22631 64 位)
GCC version: Could not collect
Clang version: Could not collect
CMake version: version 3.30.2
Libc version: N/A
Python version: 3.8.16 (default, Jun 12 2023, 21:00:42) [MSC v.1916 64 bit (AMD64)] (64-bit runtime)
Python platform: Windows-10-10.0.22631-SP0
Is CUDA available: True
CUDA runtime version: 11.6.55
CUDA_MODULE_LOADING set to: LAZY
GPU models and configuration: GPU 0: NVIDIA RTX 2000 Ada Generation Laptop GPU
Nvidia driver version: 556.12
cuDNN version: C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.6\bin\cudnn_ops_train64_8.dll
HIP runtime version: N/A
MIOpen runtime version: N/A
Is XNNPACK available: True
CPU:
Name: 13th Gen Intel(R) Core(TM) i7-13850HX
Manufacturer: GenuineIntel
Family: 198
Architecture: 9
ProcessorType: 3
DeviceID: CPU0
CurrentClockSpeed: 2100
MaxClockSpeed: 2100
L2CacheSize: 28672
L2CacheSpeed: None
Revision: None
Versions of relevant libraries:
[pip3] numpy==1.24.4
[pip3] onnx==1.16.1
[pip3] torch==1.13.0+cu116
[pip3] torch-cluster==1.6.1+pt113cu116
[pip3] torch-geometric==2.3.1
[pip3] torch-scatter==2.1.1+pt113cu116
[pip3] torch-sparse==0.6.17+pt113cu116
[pip3] torchaudio==0.13.0+cu116
[pip3] torchvision==0.14.0+cu116
[conda] numpy 1.24.4 pypi_0 pypi
[conda] torch 1.13.0+cu116 pypi_0 pypi
[conda] torch-cluster 1.6.1+pt113cu116 pypi_0 pypi
[conda] torch-geometric 2.3.1 pypi_0 pypi
[conda] torch-scatter 2.1.1+pt113cu116 pypi_0 pypi
[conda] torch-sparse 0.6.17+pt113cu116 pypi_0 pypi
[conda] torchaudio 0.13.0+cu116 pypi_0 pypi
[conda] torchvision 0.14.0+cu116 pypi_0 pypi
``` | module: onnx,triaged | low | Critical |
2,647,953,648 | terminal | How to avoid changing cursorShape when focus terminal window if I set `unfocusedAppearance` | ### Windows Terminal version
1.21.2911.0
### Windows build number
10.0.22631.0
### Other Software
NVIM v0.10.2
### Steps to reproduce
1. snippet in settings.json
```json
"opacity": 50,
"cursorShape": "underscore",
"unfocusedAppearance": {
"opacity": 90
},
```
2. open `nvim`, current cursor shape is `block`
3. press `alt`+`tab` to focus another window, then press it again to focus back to terminal
4. cursor shape changes to `underscore`. (If I delete the `unfocusedAppearance` in settings.json, the cursor shape does not change after focus back)
### Expected Behavior
When I set `unfocusedAppearance`, do not change my cursor shape after focus back to terminal window.
### Actual Behavior
Cursor shape changes to `underscore` that I set in `cursorShape`. | Help Wanted,Issue-Bug,Area-TerminalControl,Product-Terminal | low | Minor |
2,647,954,349 | ollama | vram usage does not go back down after model unloads | ### What is the issue?
when i set keep alive to 0 the memory usage does not go all the way back down. also it uses system ram when vram still avalible
gpu 7800xt
platform windows
rocm version 6.1
### OS
Windows
### GPU
AMD
### CPU
AMD
### Ollama version
0.4.1 | bug,amd,needs more info | medium | Major |
2,647,963,155 | rust | Code size regression (opt-level=z) after 1.82.0 | Code:
```rust
#![no_main]
#[no_mangle]
pub struct Point {
x: i32,
y: i32,
}
#[no_mangle]
pub fn f(a: Point) -> i32 {
if a.x > 0 && a.y < 0 || a.x < 0 && a.y > 0 {
a.x * a.y
} else {
a.x + a.y
}
}
```
Link: https://godbolt.org/z/ocs7aGMxK | A-LLVM,P-low,T-compiler,C-bug,regression-untriaged | low | Minor |
2,647,997,951 | PowerToys | In v0.86.0 unbounded mouse cannot drag-and-drop sort re-layout | ### Microsoft PowerToys version
v0.86.0
### Installation method
GitHub
### Running as admin
None
### Area(s) with issue?
Mouse Without Borders
### Steps to reproduce
In v0.86.0 unbounded mouse cannot drag and drop to sort and re-layout, as shown below:

This is my actual computer location layout, which is normal on the machine:

However, the display on the main controller is the opposite, and it cannot be dragged. Obviously, the controller is on the right, but I can only move the mouse from the left to another controller, which is very awkward. In fact, the other computer is on my right. Shouldn't it slide from the left to the right?

### ✔️ Expected Behavior
_No response_
### ❌ Actual Behavior
_No response_
### Other Software
_No response_ | Issue-Bug,Needs-Triage | low | Minor |
2,648,092,107 | PowerToys | Keyboard and Mouse Freeze | ### Description of the new feature / enhancement
Quick shortcut to disable all input from keyboard and mouse except the hotkey combination.
### Scenario when this would be used?
Useful when cleaning keyboard or watching movie to prevent distraction.
### Supporting information
[BlueLife KeyFreeze](https://www.sordum.org/7921/bluelife-keyfreeze-v1-4-block-keyboard-and-mouse/) | Needs-Triage | low | Minor |
2,648,102,429 | rust | Code size regression iterating an array (opt-level=z) after 1.82.0 | Code:
```rust
#![no_main]
#[no_mangle]
pub fn f(a: i32) -> i32 { a + a }
#[no_mangle]
pub fn g(a: [i32; 5]) -> i32 {
let mut sum = 0;
let arr = [1, 2, 3, 4, 5];
for i in a.iter().chain(arr.iter()) {
sum += i;
}
sum
}
```
Link: https://godbolt.org/z/14KM5KnPx | A-LLVM,P-low,T-compiler,C-bug,I-heavy,regression-untriaged | low | Minor |
2,648,172,788 | langchain | unable to set max_token_limit in then create_retrieval_chain function when using ContextualCompressionRetriever to compress documents. | ### Checked other resources
- [X] I added a very descriptive title to this issue.
- [X] I searched the LangChain documentation with the integrated search.
- [X] I used the GitHub search to find a similar question and didn't find it.
- [X] I am sure that this is a bug in LangChain rather than my code.
- [X] The bug is not resolved by updating to the latest stable version of LangChain (or the specific integration package).
### Example Code
```python
from langchain_core.retrievers import BaseRetriever
from langchain.retrievers import ContextualCompressionRetriever
from langchain_community.document_compressors import LLMLinguaCompressor
from GPUtil import getAvailable
from langchain_core.documents import Document
from langchain_core.embeddings.embeddings import Embeddings
from typing import Any, List
from pydantic import BaseModel
from langchain_qdrant import FastEmbedSparse, QdrantVectorStore, RetrievalMode
from qdrant_client import QdrantClient, models
from qdrant_client.http.models import Distance
from langchain_google_genai import (
ChatGoogleGenerativeAI,
HarmBlockThreshold,
HarmCategory
)
sparse_embedding = FastEmbedSparse(model_name="Qdrant/bm25")
sparse_vector_params = {
"index": models.SparseIndexParams(on_disk=False)
}
quantization_config = models.ScalarQuantization(
scalar = models.ScalarQuantizationConfig(
type=models.ScalarType.INT8,
quantile=0.99,
always_ram=True,
)
)
def create_compression_retriever(retriever: BaseRetriever):
device_map = "cuda" if getAvailable() else "cpu"
model_name="openai-community/gpt2"
compressor = LLMLinguaCompressor(model_name=model_name, device_map=device_map)
return ContextualCompressionRetriever(base_compressor=compressor, base_retriever=retriever)
google_api_key = ''
model = 'gemini-1.5-flash-002'
embedding = GoogleGenerativeAIEmbeddings(
model = model,
google_api_key = google_api_key
)
collection_name='test_collection'
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=500,
chunk_overlap=10,
)
documents = []
for pdf_file in pdf_files:
loader = PyPDFLoader(file_path=pdf_file) # Create a loader for each PDF
docs = loader.load_and_split(text_splitter=text_splitter) # Load documents from the PDF
documents.extend(docs)
client = QdrantClient(url='http://localhost:6333')
store = QdrantVectorStore.construct_instance(
client_options={ "url": "http://localhost:6333" },
embedding=embedding,
sparse_embedding=sparse_embedding,
collection_name=collection_name,
retrieval_mode=RetrievalMode.HYBRID,
force_recreate=True,
sparse_vector_params=sparse_vector_params,
distance=Distance.COSINE
)
client.update_collection(collection_name=collection_name, quantization_config=quantization_config)
store.add_documents(documents)
retriever = create_compression_retriever(retriever=store.as_retriever())
system_prompt_with_context = (
"You are an expert in ESG reporting."
"\n\n"
"{context}"
)
qa_prompt = ChatPromptTemplate.from_messages(
[
("system", system_prompt_with_context),
MessagesPlaceholder("chat_history"),
("human", "{input}")
]
)
llm = ChatGoogleGenerativeAI(
model='gemini-1.5-flash-002',
api_key=<api key>,
max_tokens=2048,
temperature=0.2,
top_k=3,
top_p=0.75,
verbose=True,
safety_settings={
HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
HarmCategory.HARM_CATEGORY_HATE_SPEECH: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
HarmCategory.HARM_CATEGORY_HARASSMENT: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
}
)
question_answer_chain = create_stuff_documents_chain(llm=llm, prompt=qa_prompt)
chain = create_retrieval_chain(retriever=retriever, combine_docs_chain=question_answer_chain)
answer = chain.invoke({ input: "What is a ISSB reporting?")
print (answer)
```
### Error Message and Stack Trace (if applicable)
Token indices sequence length is longer than the specified maximum sequence length for this model (1059 > 1024). Running this sequence through the model will result in indexing errors"
### Description
I want to use lang chain's ContextualCompressionRetriever to compress the documents and use the compressed data as the LLM's context information. However, the LLM throws "Token indices sequence length is longer than the specified maximum sequence length for this model (1059 > 1024). Running this sequence through the model will result in indexing errors", and I am unable to set the max_token_limit in create_retrieval_chain.
### System Info
System Information
------------------
> OS: Linux
> OS Version: #48~22.04.1-Ubuntu SMP PREEMPT_DYNAMIC Mon Oct 7 11:24:13 UTC 2
> Python Version: 3.10.12 (main, Sep 11 2024, 15:47:36) [GCC 11.4.0]
Package Information
-------------------
> langchain_core: 0.3.10
> langchain: 0.3.3
> langchain_community: 0.3.2
> langsmith: 0.1.131
> langchain_chroma: 0.1.4
> langchain_google_genai: 2.0.1
> langchain_ollama: 0.2.0
> langchain_openai: 0.2.2
> langchain_qdrant: 0.1.4
> langchain_text_splitters: 0.3.0
> langgraph: 0.2.37
Optional packages not installed
-------------------------------
> langserve
Other Dependencies
------------------
> aiohttp: 3.10.9
> async-timeout: 4.0.3
> chromadb: 0.5.11
> dataclasses-json: 0.6.7
> fastapi: 0.115.0
> fastembed: 0.4.1
> google-generativeai: 0.8.3
> httpx: 0.27.2
> jsonpatch: 1.33
> langgraph-checkpoint: 2.0.1
> langgraph-sdk: 0.1.33
> numpy: 1.26.4
> ollama: 0.3.3
> openai: 1.51.0
> orjson: 3.10.7
> packaging: 24.1
> pillow: 10.4.0
> pydantic: 2.9.2
> pydantic-settings: 2.5.2
> PyYAML: 6.0.2
> qdrant-client: 1.12.1
> requests: 2.32.3
> requests-toolbelt: 1.0.0
> SQLAlchemy: 2.0.35
> tenacity: 8.5.0
> tiktoken: 0.8.0
> typing-extensions: 4.12.2
| Ɑ: core | low | Critical |
2,648,253,319 | stable-diffusion-webui | [Bug]: REinstalling mmcv on every launch | ### Checklist
- [ ] The issue exists after disabling all extensions
- [ ] The issue exists on a clean installation of webui
- [ ] The issue is caused by an extension, but I believe it is caused by a bug in the webui
- [ ] The issue exists in the current version of the webui
- [ ] The issue has not been reported before recently
- [X] The issue has been reported before but has not been fixed yet
### What happened?
Whenever I launch ./webui.sh on Ubuntu 22.04 the script outputs: "Installing dependencies" and then uninstalls mmcv, mmdet and openmim.
It then proceeds to reinstall the 3 packages but installs mmcv 2.2 and immediately complains with an AssertionError that mmcv 2.0.0rc4 should be installed.
If I uninstall 2.2 and manually reinstall 2.0.0rc4 in the venv, on the next launch it uninstalls the package and groundhog day...
When passing --disable-all-extensions then the script does not install dependencies.
### Steps to reproduce the problem
pip uninstall mmcv
pip install mmvc==2.0.0rc4
./webui.sh
### What should have happened?
If manual installation of mmcv does not meet the requirement, then why is the script upgrading to 2.2 every time?
### What browsers do you use to access the UI ?
Other
### Sysinfo
{
"Platform": "Linux-5.15.0-125-generic-x86_64-with-glibc2.35",
"Python": "3.10.12",
"Version": "v1.10.1",
"Commit": "82a973c04367123ae98bd9abdf80d9eda9b910e2",
"Git status": "On branch master\nYour branch is up to date with 'origin/master'.\n\nChanges not staged for commit:\n (use \"git add <file>...\" to update what will be committed)\n (use \"git restore <file>...\" to discard changes in working directory)\n\tmodified: requirements.txt\n\tmodified: webui-user.bat\n\tmodified: webui-user.sh\n\nUntracked files:\n (use \"git add <file>...\" to include in what will be committed)\n\t.webui-user.bat.un~\n\t.webui-user.sh.un~\n\t.webui.sh.un~\n\t=2.0.0\n\t=3.0.0\n\tapi_out/\n\thtml/img/\n\tnohup.out\n\tscripts/detect_extension.py\n\nno changes added to commit (use \"git add\" and/or \"git commit -a\")",
"Script path": "/data/stable-diffusion-webui",
"Data path": "/data/stable-diffusion-webui",
"Extensions dir": "/data/stable-diffusion-webui/extensions",
"Checksum": "2fba5f7e0e22457b806051af343991029b79abbfe12017f5cdbbd81626cc07aa",
"Commandline": [
"launch.py",
"--medvram",
"--xformers",
"--no-half-vae"
],
"Torch env info": {
"torch_version": "2.1.2+cu121",
"is_debug_build": "False",
"cuda_compiled_version": "12.1",
"gcc_version": "(Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0",
"clang_version": null,
"cmake_version": null,
"os": "Ubuntu 22.04.5 LTS (x86_64)",
"libc_version": "glibc-2.35",
"python_version": "3.10.12 (main, Sep 11 2024, 15:47:36) [GCC 11.4.0] (64-bit runtime)",
"python_platform": "Linux-5.15.0-125-generic-x86_64-with-glibc2.35",
"is_cuda_available": "True",
"cuda_runtime_version": null,
"cuda_module_loading": "LAZY",
"nvidia_driver_version": "535.216.01",
"nvidia_gpu_models": "GPU 0: NVIDIA GeForce RTX 3060 Ti",
"cudnn_version": null,
"pip_version": "pip3",
"pip_packages": [
"numpy==1.26.2",
"open-clip-torch==2.20.0",
"pytorch-lightning==1.9.4",
"torch==2.1.2+cu121",
"torchdiffeq==0.2.3",
"torchmetrics==1.5.2",
"torchsde==0.2.6",
"torchvision==0.16.2+cu121",
"triton==2.1.0"
],
"conda_packages": null,
"hip_compiled_version": "N/A",
"hip_runtime_version": "N/A",
"miopen_runtime_version": "N/A",
"caching_allocator_config": "garbage_collection_threshold:0.9,max_split_size_mb:512",
"is_xnnpack_available": "True",
"cpu_info": [
"Architecture: x86_64",
"CPU op-mode(s): 32-bit, 64-bit",
"Address sizes: 46 bits physical, 48 bits virtual",
"Byte Order: Little Endian",
"CPU(s): 48",
"On-line CPU(s) list: 0-47",
"Vendor ID: GenuineIntel",
"Model name: Intel(R) Xeon(R) CPU E5-2697 v2 @ 2.70GHz",
"CPU family: 6",
"Model: 62",
"Thread(s) per core: 2",
"Core(s) per socket: 12",
"Socket(s): 2",
"Stepping: 4",
"CPU max MHz: 3500.0000",
"CPU min MHz: 1200.0000",
"BogoMIPS: 5400.22",
"Flags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm cpuid_fault epb pti intel_ppin ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid fsgsbase smep erms xsaveopt dtherm ida arat pln pts md_clear flush_l1d",
"Virtualization: VT-x",
"L1d cache: 768 KiB (24 instances)",
"L1i cache: 768 KiB (24 instances)",
"L2 cache: 6 MiB (24 instances)",
"L3 cache: 60 MiB (2 instances)",
"NUMA node(s): 2",
"NUMA node0 CPU(s): 0-11,24-35",
"NUMA node1 CPU(s): 12-23,36-47",
"Vulnerability Gather data sampling: Not affected",
"Vulnerability Itlb multihit: KVM: Mitigation: VMX disabled",
"Vulnerability L1tf: Mitigation; PTE Inversion; VMX conditional cache flushes, SMT vulnerable",
"Vulnerability Mds: Mitigation; Clear CPU buffers; SMT vulnerable",
"Vulnerability Meltdown: Mitigation; PTI",
"Vulnerability Mmio stale data: Unknown: No mitigations",
"Vulnerability Reg file data sampling: Not affected",
"Vulnerability Retbleed: Not affected",
"Vulnerability Spec rstack overflow: Not affected",
"Vulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl and seccomp",
"Vulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization",
"Vulnerability Spectre v2: Mitigation; Retpolines; IBPB conditional; IBRS_FW; STIBP conditional; RSB filling; PBRSB-eIBRS Not affected; BHI Not affected",
"Vulnerability Srbds: Not affected",
"Vulnerability Tsx async abort: Not affected"
]
},
"Exceptions": [
{
"exception": "MMCV==2.2.0 is used but incompatible. Please install mmcv>=2.0.0rc4, <2.2.0.",
"traceback": [
[
"/data/stable-diffusion-webui/modules/scripts.py, line 515, load_scripts",
"script_module = script_loading.load_module(scriptfile.path)"
],
[
"/data/stable-diffusion-webui/modules/script_loading.py, line 13, load_module",
"module_spec.loader.exec_module(module)"
],
[
"<frozen importlib._bootstrap_external>, line 883, exec_module",
""
],
[
"<frozen importlib._bootstrap>, line 241, _call_with_frames_removed",
""
],
[
"/data/stable-diffusion-webui/extensions/dddetailer/scripts/dddetailer.py, line 975, <module>",
"from mmdet.apis import inference_detector, init_detector"
],
[
"/data/stable-diffusion-webui/venv/lib/python3.10/site-packages/mmdet/__init__.py, line 17, <module>",
"and mmcv_version < digit_version(mmcv_maximum_version)), \\"
]
]
}
],
"CPU": {
"model": "x86_64",
"count logical": 48,
"count physical": 24
},
"RAM": {
"total": "126GB",
"used": "7GB",
"free": "4GB",
"active": "21GB",
"inactive": "98GB",
"buffers": "696MB",
"cached": "114GB",
"shared": "192MB"
},
"Extensions": [
{
"name": "adetailer",
"path": "/data/stable-diffusion-webui/extensions/adetailer",
"commit": "8ddf919e1e5c234d6398e83f9cee6565acf550f3",
"branch": "main",
"remote": "https://github.com/Bing-su/adetailer.git"
},
{
"name": "canvas-zoom",
"path": "/data/stable-diffusion-webui/extensions/canvas-zoom",
"commit": "b9c3cff892d448f8825186500aeca710f243752a",
"branch": "main",
"remote": "https://github.com/richrobber2/canvas-zoom.git"
},
{
"name": "controlnet",
"path": "/data/stable-diffusion-webui/extensions/controlnet",
"commit": "",
"branch": null,
"remote": null
},
{
"name": "dddetailer",
"path": "/data/stable-diffusion-webui/extensions/dddetailer",
"commit": "f82fe8980ebc5c93f8b5c2c7a36133dc3422c098",
"branch": "master",
"remote": "https://github.com/Bing-su/dddetailer"
},
{
"name": "multidiffusion-upscaler-for-automatic1111",
"path": "/data/stable-diffusion-webui/extensions/multidiffusion-upscaler-for-automatic1111",
"commit": "22798f6822bc9c8a905b51da8954ee313b973331",
"branch": "main",
"remote": "https://github.com/pkuliyi2015/multidiffusion-upscaler-for-automatic1111.git"
},
{
"name": "sd-webui-aspect-ratio-helper",
"path": "/data/stable-diffusion-webui/extensions/sd-webui-aspect-ratio-helper",
"commit": "99fcf9b0a4e3f8c8cac07b12d17b66f12297b828",
"branch": "main",
"remote": "https://github.com/thomasasfk/sd-webui-aspect-ratio-helper.git"
},
{
"name": "sd-webui-controlnet",
"path": "/data/stable-diffusion-webui/extensions/sd-webui-controlnet",
"commit": "56cec5b2958edf3b1807b7e7b2b1b5186dbd2f81",
"branch": "main",
"remote": "https://github.com/Mikubill/sd-webui-controlnet.git"
},
{
"name": "sd-webui-infinite-image-browsing",
"path": "/data/stable-diffusion-webui/extensions/sd-webui-infinite-image-browsing",
"commit": "7215a4cadfc14151a3ef8e036ecb0ba8e27d8a68",
"branch": "main",
"remote": "https://github.com/zanllp/sd-webui-infinite-image-browsing.git"
},
{
"name": "sd-webui-inpaint-anything",
"path": "/data/stable-diffusion-webui/extensions/sd-webui-inpaint-anything",
"commit": "91568a8c5f581c15fd5439dba5e25bdc49c563b1",
"branch": "main",
"remote": "https://github.com/Uminosachi/sd-webui-inpaint-anything.git"
},
{
"name": "ultimate-upscale-for-automatic1111",
"path": "/data/stable-diffusion-webui/extensions/ultimate-upscale-for-automatic1111",
"commit": "2322caa480535b1011a1f9c18126d85ea444f146",
"branch": "master",
"remote": "https://github.com/Coyote-A/ultimate-upscale-for-automatic1111.git"
}
],
"Inactive extensions": [],
"Environment": {
"COMMANDLINE_ARGS": "--medvram --xformers --no-half-vae",
"GIT": "git",
"GRADIO_ANALYTICS_ENABLED": "False"
},
"Config": {
"samples_save": false,
"samples_format": "png",
"samples_filename_pattern": "",
"save_images_add_number": true,
"grid_save": false,
"grid_format": "png",
"grid_extended_filename": true,
"grid_only_if_multiple": true,
"grid_prevent_empty_spots": false,
"grid_zip_filename_pattern": "",
"n_rows": -1,
"enable_pnginfo": true,
"save_txt": false,
"save_images_before_face_restoration": false,
"save_images_before_highres_fix": false,
"save_images_before_color_correction": false,
"save_mask": false,
"save_mask_composite": false,
"jpeg_quality": 80,
"webp_lossless": false,
"export_for_4chan": true,
"img_downscale_threshold": 4.0,
"target_side_length": 4000,
"img_max_size_mp": 200,
"use_original_name_batch": true,
"use_upscaler_name_as_suffix": true,
"save_selected_only": true,
"save_init_img": false,
"temp_dir": "",
"clean_temp_dir_at_start": false,
"outdir_samples": "",
"outdir_txt2img_samples": "outputs/txt2img-images",
"outdir_img2img_samples": "outputs/img2img-images",
"outdir_extras_samples": "outputs/extras-images",
"outdir_grids": "",
"outdir_txt2img_grids": "outputs/txt2img-grids",
"outdir_img2img_grids": "outputs/img2img-grids",
"outdir_save": "log/images",
"outdir_init_images": "outputs/init-images",
"save_to_dirs": true,
"grid_save_to_dirs": true,
"use_save_to_dirs_for_ui": false,
"directories_filename_pattern": "[date]",
"directories_max_prompt_words": 8,
"ESRGAN_tile": 192,
"ESRGAN_tile_overlap": 8,
"realesrgan_enabled_models": [
"R-ESRGAN 4x+",
"R-ESRGAN 4x+ Anime6B"
],
"upscaler_for_img2img": null,
"face_restoration_model": "CodeFormer",
"code_former_weight": 0.5,
"face_restoration_unload": false,
"show_warnings": true,
"memmon_poll_rate": 8,
"samples_log_stdout": false,
"multiple_tqdm": true,
"print_hypernet_extra": false,
"list_hidden_files": true,
"unload_models_when_training": false,
"pin_memory": false,
"save_optimizer_state": false,
"save_training_settings_to_txt": true,
"dataset_filename_word_regex": "",
"dataset_filename_join_string": " ",
"training_image_repeats_per_epoch": 1,
"training_write_csv_every": 500,
"training_xattention_optimizations": false,
"training_enable_tensorboard": false,
"training_tensorboard_save_images": false,
"training_tensorboard_flush_every": 120,
"sd_model_checkpoint": "v1-5-pruned-emaonly.safetensors [6ce0161689]",
"sd_checkpoint_cache": 0,
"sd_vae_checkpoint_cache": 0,
"sd_vae": "vaeFtMse840000EmaPruned_vaeFtMse840k.safetensors",
"sd_vae_as_default": true,
"sd_unet": "Automatic",
"inpainting_mask_weight": 1.0,
"initial_noise_multiplier": 1.0,
"img2img_color_correction": false,
"img2img_fix_steps": false,
"img2img_background_color": "#ffffff",
"enable_quantization": false,
"enable_emphasis": true,
"enable_batch_seeds": true,
"comma_padding_backtrack": 20,
"CLIP_stop_at_last_layers": 1,
"upcast_attn": false,
"randn_source": "GPU",
"cross_attention_optimization": "Automatic",
"s_min_uncond": 0.0,
"token_merging_ratio": 0.0,
"token_merging_ratio_img2img": 0.0,
"token_merging_ratio_hr": 0.0,
"pad_cond_uncond": false,
"experimental_persistent_cond_cache": false,
"use_old_emphasis_implementation": false,
"use_old_karras_scheduler_sigmas": false,
"no_dpmpp_sde_batch_determinism": false,
"use_old_hires_fix_width_height": false,
"dont_fix_second_order_samplers_schedule": false,
"hires_fix_use_firstpass_conds": false,
"interrogate_keep_models_in_memory": false,
"interrogate_return_ranks": false,
"interrogate_clip_num_beams": 1,
"interrogate_clip_min_length": 24,
"interrogate_clip_max_length": 48,
"interrogate_clip_dict_limit": 1500,
"interrogate_clip_skip_categories": [],
"interrogate_deepbooru_score_threshold": 0.5,
"deepbooru_sort_alpha": true,
"deepbooru_use_spaces": true,
"deepbooru_escape": true,
"deepbooru_filter_tags": "",
"extra_networks_show_hidden_directories": true,
"extra_networks_hidden_models": "Always",
"extra_networks_default_view": "cards",
"extra_networks_default_multiplier": 1.0,
"extra_networks_card_width": 300.0,
"extra_networks_card_height": 200.0,
"extra_networks_add_text_separator": " ",
"ui_extra_networks_tab_reorder": "",
"sd_hypernetwork": "None",
"localization": "None",
"gradio_theme": "Default",
"img2img_editor_height": 720,
"return_grid": true,
"return_mask": false,
"return_mask_composite": false,
"do_not_show_images": false,
"send_seed": true,
"send_size": true,
"font": "",
"js_modal_lightbox": true,
"js_modal_lightbox_initially_zoomed": true,
"js_modal_lightbox_gamepad": false,
"js_modal_lightbox_gamepad_repeat": 250,
"show_progress_in_title": true,
"samplers_in_dropdown": true,
"dimensions_and_batch_together": true,
"keyedit_precision_attention": 0.1,
"keyedit_precision_extra": 0.05,
"keyedit_delimiters": ".,\\/!?%^*;:{}=`~()",
"quicksettings_list": [
"sd_model_checkpoint",
"sd_vae",
"CLIP_stop_at_last_layers",
"face_restoration",
"save_images_before_face_restoration",
"face_restoration_model",
"img2img_color_correction",
"initial_noise_multiplier"
],
"ui_tab_order": [],
"hidden_tabs": [],
"ui_reorder_list": [],
"hires_fix_show_sampler": false,
"hires_fix_show_prompts": false,
"disable_token_counters": false,
"add_model_hash_to_info": true,
"add_model_name_to_info": true,
"add_version_to_infotext": true,
"disable_weights_auto_swap": true,
"infotext_styles": "Apply if any",
"show_progressbar": true,
"live_previews_enable": true,
"live_previews_image_format": "png",
"show_progress_grid": true,
"show_progress_every_n_steps": 10,
"show_progress_type": "Approx NN",
"live_preview_content": "Prompt",
"live_preview_refresh_period": 1000,
"hide_samplers": [],
"eta_ddim": 0.0,
"eta_ancestral": 1.0,
"ddim_discretize": "uniform",
"s_churn": 0.0,
"s_tmin": 0.0,
"s_noise": 1.0,
"k_sched_type": "Automatic",
"sigma_min": 0.0,
"sigma_max": 0.0,
"rho": 0.0,
"eta_noise_seed_delta": 0,
"always_discard_next_to_last_sigma": false,
"uni_pc_variant": "bh1",
"uni_pc_skip_type": "time_uniform",
"uni_pc_order": 3,
"uni_pc_lower_order_final": true,
"postprocessing_enable_in_main_ui": [],
"postprocessing_operation_order": [],
"upscaling_max_images_in_cache": 5,
"disabled_extensions": [],
"disable_all_extensions": "none",
"restore_config_state_file": "",
"sd_checkpoint_hash": "6ce0161689b3853acaa03779ec93eafe75a02f4ced659bee03f50797806fa2fa",
"ldsr_steps": 100,
"ldsr_cached": false,
"SCUNET_tile": 256,
"SCUNET_tile_overlap": 8,
"SWIN_tile": 192,
"SWIN_tile_overlap": 8,
"lora_functional": false,
"sd_lora": "None",
"lora_preferred_name": "Alias from file",
"lora_add_hashes_to_infotext": true,
"extra_options": [],
"extra_options_accordion": false,
"canvas_hotkey_zoom": "Alt",
"canvas_hotkey_adjust": "Ctrl",
"canvas_hotkey_move": "F",
"canvas_hotkey_fullscreen": "S",
"canvas_hotkey_reset": "R",
"canvas_hotkey_overlap": "O",
"canvas_show_tooltip": true,
"canvas_disabled_functions": [
"Overlap"
],
"sd_vae_overrides_per_model_preferences": false,
"SWIN_torch_compile": false,
"hypertile_enable_unet": false,
"hypertile_enable_unet_secondpass": false,
"hypertile_max_depth_unet": 3,
"hypertile_max_tile_unet": 256,
"hypertile_swap_size_unet": 3,
"hypertile_enable_vae": false,
"hypertile_max_depth_vae": 3,
"hypertile_max_tile_vae": 128,
"hypertile_swap_size_vae": 3,
"save_images_replace_action": "Replace",
"grid_text_active_color": "#000000",
"grid_text_inactive_color": "#999999",
"grid_background_color": "#ffffff",
"save_incomplete_images": false,
"notification_audio": true,
"notification_volume": 100,
"auto_backcompat": true,
"use_old_scheduling": false,
"use_downcasted_alpha_bar": false,
"refiner_switch_by_sample_steps": false,
"extra_networks_dir_button_function": false,
"extra_networks_card_text_scale": 1,
"extra_networks_card_show_desc": true,
"extra_networks_card_description_is_html": false,
"extra_networks_card_order_field": "Path",
"extra_networks_card_order": "Ascending",
"extra_networks_tree_view_style": "Dirs",
"extra_networks_tree_view_default_enabled": true,
"extra_networks_tree_view_default_width": 180.0,
"textual_inversion_print_at_load": false,
"textual_inversion_add_hashes_to_infotext": true,
"lora_show_all": true,
"lora_hide_unknown_for_versions": [],
"lora_in_memory_limit": 0,
"lora_not_found_warning_console": true,
"lora_not_found_gradio_warning": true,
"pad_cond_uncond_v0": false,
"persistent_cond_cache": true,
"batch_cond_uncond": true,
"fp8_storage": "Disable",
"cache_fp16_weight": false,
"s_tmax": 0,
"sgm_noise_multiplier": false,
"sd_noise_schedule": "Default",
"sd_checkpoints_limit": 1,
"sd_checkpoints_keep_in_cpu": true,
"emphasis": "Original",
"tiling": false,
"hires_fix_refiner_pass": "second pass",
"enable_prompt_comments": true,
"sdxl_crop_top": 0.0,
"sdxl_crop_left": 0.0,
"sdxl_refiner_low_aesthetic_score": 2.5,
"sdxl_refiner_high_aesthetic_score": 6.0,
"auto_vae_precision_bfloat16": true,
"auto_vae_precision": false,
"sd_vae_encode_method": "Full",
"sd_vae_decode_method": "Full",
"img2img_extra_noise": 0,
"img2img_sketch_default_brush_color": "#ffffff",
"img2img_inpaint_mask_brush_color": "#ffffff",
"img2img_inpaint_sketch_default_brush_color": "#ffffff",
"img2img_batch_show_results_limit": 32,
"overlay_inpaint": true,
"sd_webui_modal_lightbox_icon_opacity": 1,
"sd_webui_modal_lightbox_toolbar_opacity": 0.9,
"gallery_height": "",
"open_dir_button_choice": "Subdirectory",
"add_vae_name_to_info": true,
"add_vae_hash_to_info": true,
"add_user_name_to_info": false,
"infotext_skip_pasting": [],
"live_preview_allow_lowvram_full": false,
"live_preview_fast_interrupt": false,
"js_live_preview_in_modal_lightbox": false,
"keyedit_delimiters_whitespace": [
"Tab",
"Carriage Return",
"Line Feed"
],
"keyedit_move": true,
"include_styles_into_token_counters": true,
"extra_options_txt2img": [],
"extra_options_img2img": [],
"extra_options_cols": 1,
"compact_prompt_box": false,
"sd_checkpoint_dropdown_use_short": false,
"txt2img_settings_accordion": false,
"img2img_settings_accordion": false,
"interrupt_after_current": true,
"gradio_themes_cache": true,
"enable_reloading_ui_scripts": false,
"api_enable_requests": true,
"api_forbid_local_requests": true,
"api_useragent": "",
"prioritized_callbacks_app_started": [],
"prioritized_callbacks_model_loaded": [],
"prioritized_callbacks_ui_settings": [],
"prioritized_callbacks_infotext_pasted": [],
"prioritized_callbacks_script_unloaded": [],
"prioritized_callbacks_before_ui": [],
"prioritized_callbacks_list_optimizers": [],
"prioritized_callbacks_before_token_counter": [],
"prioritized_callbacks_script_before_process": [],
"prioritized_callbacks_script_process": [],
"prioritized_callbacks_script_post_sample": [],
"prioritized_callbacks_script_on_mask_blend": [],
"prioritized_callbacks_script_postprocess_maskoverlay": [],
"auto_launch_browser": "Local",
"enable_console_prompts": false,
"show_gradio_deprecation_warnings": true,
"enable_upscale_progressbar": true,
"disable_mmap_load_safetensors": false,
"hide_ldm_prints": true,
"dump_stacks_on_signal": false,
"face_restoration": false,
"postprocessing_disable_in_extras": [],
"postprocessing_existing_caption_action": "Ignore",
"dat_enabled_models": [
"DAT x2",
"DAT x3",
"DAT x4"
],
"DAT_tile": 192,
"DAT_tile_overlap": 8,
"set_scale_by_when_changing_upscaler": false,
"canvas_hotkey_shrink_brush": "Q",
"canvas_hotkey_grow_brush": "W",
"canvas_auto_expand": true,
"canvas_blur_prompt": false,
"prioritized_callbacks_ui_tabs": [],
"arh_javascript_aspect_ratio_show": true,
"arh_javascript_aspect_ratio": "1:1, 3:2, 4:3, 5:4, 16:9",
"arh_ui_javascript_selection_method": "Aspect Ratios Dropdown",
"arh_hide_accordion_by_default": true,
"arh_expand_by_default": false,
"arh_ui_component_order_key": "MaxDimensionScaler, MinDimensionScaler, PredefinedAspectRatioButtons, PredefinedPercentageButtons",
"arh_show_max_width_or_height": false,
"arh_max_width_or_height": 1024.0,
"arh_show_min_width_or_height": false,
"arh_min_width_or_height": 1024.0,
"arh_show_predefined_aspect_ratios": false,
"arh_predefined_aspect_ratio_use_max_dim": false,
"arh_predefined_aspect_ratios": "1:1, 4:3, 16:9, 9:16, 21:9",
"arh_show_predefined_percentages": false,
"arh_predefined_percentages": "25, 50, 75, 125, 150, 175, 200",
"arh_predefined_percentages_display_key": "Incremental/decremental percentage (-50%, +50%)",
"dd_save_previews": false,
"outdir_ddetailer_previews": "extensions/dddetailer/outputs/masks-previews",
"dd_save_masks": false,
"outdir_ddetailer_masks": "extensions/dddetailer/outputs/masks",
"control_net_detectedmap_dir": "detected_maps",
"control_net_models_path": "",
"control_net_modules_path": "",
"control_net_unit_count": 3,
"control_net_model_cache_size": 2,
"control_net_inpaint_blur_sigma": 7,
"control_net_no_detectmap": false,
"control_net_detectmap_autosaving": false,
"control_net_allow_script_control": false,
"control_net_sync_field_args": true,
"controlnet_show_batch_images_in_ui": false,
"controlnet_increment_seed_during_batch": false,
"controlnet_disable_openpose_edit": false,
"controlnet_disable_photopea_edit": false,
"controlnet_photopea_warning": true,
"controlnet_ignore_noninpaint_mask": false,
"controlnet_clip_detector_on_cpu": false,
"controlnet_control_type_dropdown": false,
"ad_max_models": 4,
"ad_extra_models_dir": "",
"ad_save_previews": false,
"ad_save_images_before": false,
"ad_only_selected_scripts": true,
"ad_script_names": "dynamic_prompting,dynamic_thresholding,lora_block_weight,negpip,wildcard_recursive,wildcards",
"ad_bbox_sortby": "None",
"ad_same_seed_for_each_tab": false,
"prioritized_callbacks_after_component": [],
"prioritized_callbacks_on_reload": [],
"prioritized_callbacks_script_before_process_batch": [],
"prioritized_callbacks_script_postprocess": [],
"prioritized_callbacks_script_postprocess_batch": [],
"prioritized_callbacks_script_after_component": [],
"prioritized_callbacks_script_postprocess_image": [],
"canvas_zoom_undo_extra_key": "Ctrl",
"canvas_zoom_hotkey_undo": "Z",
"canvas_zoom_inc_brush_size": "]",
"canvas_zoom_dec_brush_size": "[",
"canvas_zoom_hotkey_open_colorpanel": "Q",
"canvas_zoom_hotkey_pin_colorpanel": "T",
"canvas_zoom_hotkey_dropper": "A",
"canvas_zoom_hotkey_fill": "X",
"canvas_zoom_hotkey_transparency": "C",
"canvas_zoom_hide_btn": true,
"canvas_zoom_mask_clear": true,
"canvas_zoom_enable_integration": true,
"canvas_zoom_brush_size": 200,
"canvas_zoom_brush_size_change": 5,
"canvas_zoom_transparency_level": 70,
"canvas_zoom_brush_opacity": false,
"canvas_zoom_inpaint_label": true,
"canvas_zoom_inpaint_warning": true,
"canvas_zoom_inpaint_change_btn_color": false,
"canvas_zoom_inpaint_btn_color": "#C33227",
"canvas_zoom_brush_outline": false,
"canvas_zoom_add_buttons": false,
"canvas_zoom_draw_staight_lines": false,
"canvas_zoom_inpaint_brushcolor": "#000000",
"canvas_zoom_disabled_functions": [
"Overlap"
],
"ad_save_images_dir": "",
"ad_dynamic_denoise_power": 0,
"ad_match_inpaint_bbox_size": "Off",
"inpaint_anything_save_folder": "inpaint-anything",
"inpaint_anything_sam_oncpu": false,
"inpaint_anything_offline_inpainting": false,
"inpaint_anything_padding_fill": 127,
"inpain_anything_sam_models_dir": ""
},
"Startup": {
"total": 71.07646226882935,
"records": {
"initial startup": 0.006743669509887695,
"prepare environment/checks": 0.00014019012451171875,
"prepare environment/git version info": 0.016297340393066406,
"prepare environment/torch GPU test": 2.610452651977539,
"prepare environment/clone repositores": 0.05684089660644531,
"prepare environment/install requirements": 4.551353693008423,
"prepare environment/run extensions installers/controlnet": 0.001188516616821289,
"prepare environment/run extensions installers/sd-webui-controlnet": 0.14631366729736328,
"prepare environment/run extensions installers/sd-webui-inpaint-anything": 5.030036687850952,
"prepare environment/run extensions installers/canvas-zoom": 2.4369723796844482,
"prepare environment/run extensions installers/dddetailer": 20.710227251052856,
"prepare environment/run extensions installers/sd-webui-aspect-ratio-helper": 0.0001430511474609375,
"prepare environment/run extensions installers/ultimate-upscale-for-automatic1111": 2.765655517578125e-05,
"prepare environment/run extensions installers/multidiffusion-upscaler-for-automatic1111": 2.3126602172851562e-05,
"prepare environment/run extensions installers/sd-webui-infinite-image-browsing": 0.2711906433105469,
"prepare environment/run extensions installers/adetailer": 5.347283363342285,
"prepare environment/run extensions installers": 33.94344687461853,
"prepare environment": 41.17867851257324,
"launcher": 0.0021791458129882812,
"import torch": 4.757909536361694,
"import gradio": 1.0641181468963623,
"setup paths": 1.707580804824829,
"import ldm": 0.0037512779235839844,
"import sgm": 5.4836273193359375e-06,
"initialize shared": 0.2651069164276123,
"other imports": 0.6259610652923584,
"opts onchange": 0.0006053447723388672,
"setup SD model": 9.799003601074219e-05,
"setup codeformer": 0.0008230209350585938,
"setup gfpgan": 0.007240772247314453,
"set samplers": 4.410743713378906e-05,
"list extensions": 0.002903461456298828,
"restore config state file": 1.52587890625e-05,
"list SD models": 0.026834487915039062,
"list localizations": 0.0003001689910888672,
"load scripts/custom_code.py": 0.004204988479614258,
"load scripts/detect_extension.py": 0.0006046295166015625,
"load scripts/img2imgalt.py": 0.0004169940948486328,
"load scripts/loopback.py": 0.00022220611572265625,
"load scripts/outpainting_mk_2.py": 0.0002722740173339844,
"load scripts/poor_mans_outpainting.py": 0.0002155303955078125,
"load scripts/postprocessing_codeformer.py": 0.0001819133758544922,
"load scripts/postprocessing_gfpgan.py": 0.00016450881958007812,
"load scripts/postprocessing_upscale.py": 0.0002722740173339844,
"load scripts/prompt_matrix.py": 0.000225067138671875,
"load scripts/prompts_from_file.py": 0.00023698806762695312,
"load scripts/sd_upscale.py": 0.000202178955078125,
"load scripts/xyz_grid.py": 0.002648591995239258,
"load scripts/ldsr_model.py": 0.420351505279541,
"load scripts/lora_script.py": 0.1832737922668457,
"load scripts/scunet_model.py": 0.027283668518066406,
"load scripts/swinir_model.py": 0.02671217918395996,
"load scripts/hotkey_config.py": 0.00019049644470214844,
"load scripts/extra_options_section.py": 0.00028014183044433594,
"load scripts/hypertile_script.py": 0.053967952728271484,
"load scripts/postprocessing_autosized_crop.py": 0.00022292137145996094,
"load scripts/postprocessing_caption.py": 0.0001900196075439453,
"load scripts/postprocessing_create_flipped_copies.py": 0.0001735687255859375,
"load scripts/postprocessing_focal_crop.py": 0.0009653568267822266,
"load scripts/postprocessing_split_oversized.py": 0.0001895427703857422,
"load scripts/soft_inpainting.py": 0.0004904270172119141,
"load scripts/!adetailer.py": 0.5497946739196777,
"load scripts/config.py": 0.00032830238342285156,
"load scripts/dddetailer.py": 16.50908327102661,
"load scripts/tilediffusion.py": 0.0053141117095947266,
"load scripts/tileglobal.py": 0.0011188983917236328,
"load scripts/tilevae.py": 0.0008006095886230469,
"load scripts/sd_webui_aspect_ratio_helper.py": 0.09862327575683594,
"load scripts/adapter.py": 0.00047659873962402344,
"load scripts/api.py": 0.3110339641571045,
"load scripts/batch_hijack.py": 0.0005223751068115234,
"load scripts/cldm.py": 0.001079559326171875,
"load scripts/controlnet.py": 0.5039541721343994,
"load scripts/controlnet_diffusers.py": 0.00034165382385253906,
"load scripts/controlnet_lllite.py": 0.00029468536376953125,
"load scripts/controlnet_lora.py": 0.0002913475036621094,
"load scripts/controlnet_model_guess.py": 0.0005085468292236328,
"load scripts/controlnet_sparsectrl.py": 0.00032448768615722656,
"load scripts/controlnet_version.py": 0.0001399517059326172,
"load scripts/enums.py": 0.002435445785522461,
"load scripts/external_code.py": 0.0001876354217529297,
"load scripts/global_state.py": 0.00030803680419921875,
"load scripts/hook.py": 0.0006806850433349609,
"load scripts/infotext.py": 0.00020742416381835938,
"load scripts/logging.py": 0.0004165172576904297,
"load scripts/lvminthin.py": 0.0006203651428222656,
"load scripts/movie2movie.py": 0.0002644062042236328,
"load scripts/supported_preprocessor.py": 0.0021371841430664062,
"load scripts/utils.py": 0.00033664703369140625,
"load scripts/xyz_grid_support.py": 0.0003867149353027344,
"load scripts/iib_setup.py": 0.10164618492126465,
"load scripts/inpaint_anything.py": 0.5150072574615479,
"load scripts/ultimate-upscale.py": 0.0007646083831787109,
"load scripts/comments.py": 0.0320131778717041,
"load scripts/refiner.py": 0.0002295970916748047,
"load scripts/sampler.py": 0.00019979476928710938,
"load scripts/seed.py": 0.000244140625,
"load scripts": 19.36632752418518,
"load upscalers": 0.0024099349975585938,
"refresh VAE": 0.001795053482055664,
"refresh textual inversion templates": 7.104873657226562e-05,
"scripts list_optimizers": 0.000644683837890625,
"scripts list_unets": 5.125999450683594e-05,
"reload hypernetworks": 0.0009043216705322266,
"initialize extra networks": 0.010030269622802734,
"scripts before_ui_callback": 0.00232696533203125,
"create ui": 1.5808982849121094,
"gradio launch": 0.33333706855773926,
"add APIs": 0.023990869522094727,
"app_started_callback/lora_script.py": 0.001112222671508789,
"app_started_callback/!adetailer.py": 0.0014429092407226562,
"app_started_callback/api.py": 0.014088869094848633,
"app_started_callback/iib_setup.py": 0.08627462387084961,
"app_started_callback": 0.10292911529541016
}
},
"Packages": [
"absl-py==2.1.0",
"accelerate==0.21.0",
"addict==2.4.0",
"aenum==3.1.15",
"aiofiles==23.2.1",
"aiohappyeyeballs==2.4.3",
"aiohttp==3.10.10",
"aiosignal==1.3.1",
"albumentations==1.4.3",
"aliyun-python-sdk-core==2.16.0",
"aliyun-python-sdk-kms==2.16.5",
"altair==5.4.1",
"antlr4-python3-runtime==4.9.3",
"anyio==3.7.1",
"async-timeout==4.0.3",
"attrs==24.2.0",
"av==13.1.0",
"basicsr==1.4.2",
"blendmodes==2022",
"certifi==2024.8.30",
"cffi==1.17.1",
"chardet==5.2.0",
"charset-normalizer==3.4.0",
"clean-fid==0.1.35",
"click==8.1.7",
"clip @ https://github.com/openai/CLIP/archive/d50d76daa670286dd6cacf3bcd80b5e4823fc8e1.zip#sha256=b5842c25da441d6c581b53a5c60e0c2127ebafe0f746f8e15561a006c6c3be6a",
"colorama==0.4.6",
"coloredlogs==15.0.1",
"colorlog==6.9.0",
"contourpy==1.3.0",
"controlnet-aux==0.0.9",
"crcmod==1.7",
"cryptography==43.0.3",
"cssselect2==0.7.0",
"cycler==0.12.1",
"Cython==3.0.11",
"deprecation==2.1.0",
"depth_anything @ https://github.com/huchenlei/Depth-Anything/releases/download/v1.0.0/depth_anything-2024.1.22.0-py2.py3-none-any.whl#sha256=26c1d38b8c3c306b4a2197d725a4b989ff65f7ebcf4fb5a96a1b6db7fbd56780",
"depth_anything_v2 @ https://github.com/MackinationsAi/UDAV2-ControlNet/releases/download/v1.0.0/depth_anything_v2-2024.7.1.0-py2.py3-none-any.whl#sha256=6848128867d1f7c7519d88df0f88bfab89100dc5225259c4d7cb90325c308c9f",
"diffusers==0.31.0",
"diskcache==5.6.3",
"dsine @ https://github.com/sdbds/DSINE/releases/download/1.0.2/dsine-2024.3.23-py3-none-any.whl#sha256=b9ea3bacce09f9b3f7fb4fa12471da7e465b2f9a60412711105a9238db280442",
"easydict==1.13",
"einops==0.4.1",
"embreex==2.17.7.post5",
"exceptiongroup==1.2.2",
"facexlib==0.3.0",
"fastapi==0.94.0",
"ffmpy==0.4.0",
"filelock==3.14.0",
"filterpy==1.4.5",
"flatbuffers==24.3.25",
"fonttools==4.54.1",
"frozenlist==1.5.0",
"fsspec==2024.10.0",
"ftfy==6.3.1",
"future==1.0.0",
"fvcore==0.1.5.post20221221",
"geffnet==1.0.2",
"gitdb==4.0.11",
"GitPython==3.1.32",
"glob2==0.5",
"gradio==3.41.2",
"gradio_client==0.5.0",
"grpcio==1.67.1",
"h11==0.12.0",
"handrefinerportable @ https://github.com/huchenlei/HandRefinerPortable/releases/download/v1.0.1/handrefinerportable-2024.2.12.0-py2.py3-none-any.whl#sha256=1e6c702905919f4c49bcb2db7b20d334e8458a7555cd57630600584ec38ca6a9",
"httpcore==0.15.0",
"httpx==0.24.1",
"huggingface-hub==0.26.2",
"humanfriendly==10.0",
"hydra-core==1.3.2",
"idna==3.10",
"imageio==2.36.0",
"importlib_metadata==8.5.0",
"importlib_resources==6.4.5",
"inflection==0.5.1",
"insightface==0.7.3",
"iopath==0.1.9",
"jax==0.4.35",
"jaxlib==0.4.35",
"Jinja2==3.1.4",
"jmespath==0.10.0",
"joblib==1.4.2",
"jsonmerge==1.8.0",
"jsonschema==4.23.0",
"jsonschema-specifications==2024.10.1",
"kiwisolver==1.4.7",
"kornia==0.6.7",
"lark==1.1.2",
"lazy_loader==0.4",
"lightning-utilities==0.11.8",
"llvmlite==0.43.0",
"lmdb==1.5.1",
"loguru==0.7.2",
"lxml==5.3.0",
"manifold3d==2.5.1",
"mapbox_earcut==1.0.2",
"Markdown==3.7",
"markdown-it-py==3.0.0",
"MarkupSafe==2.1.5",
"matplotlib==3.9.2",
"mdurl==0.1.2",
"mediapipe==0.10.15",
"ml_dtypes==0.5.0",
"mmcv==2.2.0",
"mmdet==3.3.0",
"mmengine==0.10.5",
"model-index==0.1.11",
"mpmath==1.3.0",
"multidict==6.1.0",
"narwhals==1.13.3",
"networkx==3.4.2",
"numba==0.60.0",
"numpy==1.26.2",
"omegaconf==2.2.3",
"onnx==1.17.0",
"onnxruntime-gpu==1.20.0",
"open-clip-torch==2.20.0",
"opencv-contrib-python==4.10.0.84",
"opencv-python==4.10.0.84",
"opencv-python-headless==4.10.0.84",
"opendatalab==0.0.10",
"openmim==0.3.9",
"openxlab==0.1.2",
"opt_einsum==3.4.0",
"ordered-set==4.1.0",
"orjson==3.10.11",
"oss2==2.17.0",
"packaging==24.2",
"pandas==2.2.3",
"piexif==1.1.3",
"Pillow==9.5.0",
"pillow-avif-plugin==1.4.3",
"pip==24.3.1",
"platformdirs==4.3.6",
"portalocker==2.10.1",
"prettytable==3.12.0",
"propcache==0.2.0",
"protobuf==4.25.5",
"psutil==5.9.5",
"py-cpuinfo==9.0.0",
"pycocotools==2.0.8",
"pycollada==0.8",
"pycparser==2.22",
"pycryptodome==3.21.0",
"pydantic==1.10.17",
"pydub==0.25.1",
"Pygments==2.18.0",
"pyparsing==3.2.0",
"python-dateutil==2.9.0.post0",
"python-dotenv==1.0.1",
"python-multipart==0.0.17",
"pytorch-lightning==1.9.4",
"pytz==2023.4",
"PyWavelets==1.7.0",
"PyYAML==6.0.2",
"referencing==0.35.1",
"regex==2024.11.6",
"reportlab==4.2.5",
"requests==2.28.2",
"resize-right==0.0.2",
"rich==13.4.2",
"rpds-py==0.21.0",
"Rtree==1.3.0",
"safetensors==0.4.2",
"scikit-image==0.21.0",
"scikit-learn==1.5.2",
"scipy==1.14.1",
"seaborn==0.13.2",
"segment-anything==1.0",
"semantic-version==2.10.0",
"sentencepiece==0.2.0",
"setuptools==60.2.0",
"shapely==2.0.6",
"six==1.16.0",
"smmap==5.0.1",
"sniffio==1.3.1",
"sounddevice==0.5.1",
"spandrel==0.3.4",
"spandrel_extra_arches==0.1.1",
"starlette==0.26.1",
"svg.path==6.3",
"svglib==1.5.1",
"sympy==1.13.3",
"tabulate==0.9.0",
"tb-nightly==2.19.0a20241110",
"tensorboard-data-server==0.7.2",
"termcolor==2.5.0",
"terminaltables==3.1.10",
"threadpoolctl==3.5.0",
"tifffile==2024.9.20",
"timm==0.6.7",
"tinycss2==1.4.0",
"tokenizers==0.13.3",
"tomesd==0.1.3",
"tomli==2.0.2",
"torch==2.1.2+cu121",
"torchdiffeq==0.2.3",
"torchmetrics==1.5.2",
"torchsde==0.2.6",
"torchvision==0.16.2+cu121",
"tqdm==4.65.2",
"trampoline==0.1.2",
"transformers==4.30.2",
"trimesh==4.5.2",
"triton==2.1.0",
"typing_extensions==4.12.2",
"tzdata==2024.2",
"ultralytics==8.3.28",
"ultralytics-thop==2.0.11",
"urllib3==1.26.20",
"uvicorn==0.32.0",
"vhacdx==0.0.8.post1",
"wcwidth==0.2.13",
"webencodings==0.5.1",
"websockets==11.0.3",
"Werkzeug==3.1.3",
"xatlas==0.0.9",
"xformers==0.0.23.post1",
"xxhash==3.5.0",
"yacs==0.1.8",
"yapf==0.40.2",
"yarl==1.17.1",
"zipp==3.21.0"
]
}
### Console logs
```Shell
Installing dependencies
.
.
Successfully uninstalled mmcv
Installing mmcv
AssertionError: 2.0.0rc4 is required
```
### Additional information
none | bug-report | low | Critical |
2,648,292,005 | rust | A release store operation synchronizes with an acquire load operation instead of the reverse | ### Location
In the document [park](https://doc.rust-lang.org/std/thread/fn.park.html#memory-ordering)
> Calls to park synchronize-with calls to unpark, meaning that memory operations performed before a call to unpark are made visible to the thread that consumes the token and returns from park.
>
> In atomic ordering terms, unpark performs a Release operation and park performs the corresponding Acquire operation. Calls to unpark for the same thread form a release sequence.
Since the memory order in Rust is from C++, in C++ standard, [[atomics.order] p2](https://eel.is/c++draft/atomics.order#2) says:
> An atomic operation A that performs **a release operation** on an atomic object M **synchronizes with** an atomic operation B that performs an **acquire operation** on M and takes its value from any side effect in the release sequence headed by A.
Since unpark performs a Release operation and park performs the corresponding Acquire operation, by the definition of the wording *synchronizes with*, we should say
> calls to unpark synchronize-with calls to park.
### Summary
> calls to unpark synchronize-with calls to park.
because a release store operation synchronize with an acquire load operation | A-docs,T-libs | low | Minor |
2,648,310,326 | terminal | The weight of CJK characters in the title of the active tab is inappropriate | ### Windows Terminal version
Source
### Windows build number
22621/27744
### Other Software
_No response_
### Steps to reproduce
The default font in Simplified Chinese user environments is Microsoft Yahei UI. The current font weight causes the Chinese characters in the titles to be unclear.
### Expected Behavior

100% DPI scale, right is the active tab

200% DPI scale, right is the active tab
### Actual Behavior
Set FontWeight to Normal, Medium, or Bold (instead of Default or SemiLight). | Issue-Bug,Area-UserInterface,Product-Terminal | low | Minor |
2,648,383,854 | langchain | DOC: sql-ollama leads to removed page | ### URL
https://python.langchain.com/docs/templates/sql-ollama/
### Checklist
- [X] I added a very descriptive title to this issue.
- [X] I included a link to the documentation page I am referring to (if applicable).
### Issue with current documentation:
I noticed that the link to the SQL-Ollama documentation (https://python.langchain.com/v0.1/docs/templates/sql-ollama) suggests checking a newer version, but the link seems to lead to a removed page.
Can you let me know if there’s an updated version available, or if this is just a temporary removal?
### Idea or request for content:
_No response_ | 🤖:docs | low | Minor |
2,648,395,826 | flutter | [in_app_purchase] java.lang.NullPointerException on debug console | ### What package does this bug report belong to?
in_app_purchase
### What target platforms are you seeing this bug on?
Android
### Have you already upgraded your packages?
Yes
### Dependency versions
```
dependencies:
flutter:
sdk: flutter
in_app_purchase: ^3.2.0
```
### Steps to reproduce
1. The error only throwed on a purchase subscribed or close window without subscribed
### Expected results
Even I try it with using Subscription Stream, it showing me up same issues.
### Actual results
When I click on Subscribe button, it open a subscribe window like image. Now if close this window by clicking outside of the window or even when I subscribe it, then the of closing the window, the error is thrown on debug console.
I didn't write huge code, just install the package, initialize it and get products then handler. I do not do anything on my android folder.
The error is like the second image.
### Code sample
```
class _SubscriptionPageState extends State<SubscriptionPage> {
final InAppPurchase _inAppPurchase = InAppPurchase.instance;
//Product Listing
static const List<String> _productIds = ['com....subscription'];
Future<Subscription> getProducts() async {
final bool isAvailable = await _inAppPurchase.isAvailable();
if (!isAvailable) {
return Subscription(
isAvailable: isAvailable,
products: [],
notFoundIds: [],
);
}
final ProductDetailsResponse productDetailResponse =
await _inAppPurchase.queryProductDetails(_productIds.toSet());
if (productDetailResponse.error != null) {
throw Exception(productDetailResponse.error!.message);
}
return Subscription(
isAvailable: isAvailable,
products: productDetailResponse.productDetails,
notFoundIds: productDetailResponse.notFoundIDs,
);
}
@override
Widget build(BuildContext context) {
return Scaffold(
body: QueryBuilder(
query: Query(
key: ['subscription'],
queryFn: () => getProducts(),
config: QueryConfig(
storeQuery: false,
cacheDuration: const Duration(milliseconds: 500),
),
),
builder: (context, state) {
final product = state.data!.products[0];
return BasicAppButton(
title: 'Subscribe',
onPressed: () {
final PurchaseParam param = GooglePlayPurchaseParam(
productDetails: product,
changeSubscriptionParam: null,
);
_inAppPurchase.buyNonConsumable(
purchaseParam: param,
);
},
level: '3',
);
},
),
);
}
}
```
### Screenshots or Videos

### Logs

### Flutter Doctor output

| c: crash,platform-android,p: in_app_purchase,package,has reproducible steps,P2,team-android,triaged-android,found in release: 3.24 | low | Critical |
2,648,403,030 | go | crypto/x509: parser should error on Subject Information Access and Policy Constraints being makred as critical | ### Go version
go version go1.23.2 linux/amd64
### Output of `go env` in your module/workspace:
```shell
GO111MODULE=''
GOARCH='amd64'
GOBIN=''
GOCACHE='/home/liu/.cache/go-build'
GOENV='/home/liu/.config/go/env'
GOEXE=''
GOEXPERIMENT=''
GOFLAGS=''
GOHOSTARCH='amd64'
GOHOSTOS='linux'
GOINSECURE=''
GOMODCACHE='/home/liu/go/pkg/mod'
GONOPROXY=''
GONOSUMDB=''
GOOS='linux'
GOPATH='/home/liu/go'
GOPRIVATE=''
GOPROXY='https://proxy.golang.org,direct'
GOROOT='/snap/go/10730'
GOSUMDB='sum.golang.org'
GOTMPDIR=''
GOTOOLCHAIN='auto'
GOTOOLDIR='/snap/go/10730/pkg/tool/linux_amd64'
GOVCS=''
GOVERSION='go1.23.2'
GODEBUG=''
GOTELEMETRY='local'
GOTELEMETRYDIR='/home/liu/.config/go/telemetry'
GCCGO='gccgo'
GOAMD64='v1'
AR='ar'
CC='gcc'
CXX='g++'
CGO_ENABLED='1'
GOMOD='/dev/null'
GOWORK=''
CGO_CFLAGS='-O2 -g'
CGO_CPPFLAGS=''
CGO_CXXFLAGS='-O2 -g'
CGO_FFLAGS='-O2 -g'
CGO_LDFLAGS='-O2 -g'
PKG_CONFIG='pkg-config'
GOGCCFLAGS='-fPIC -m64 -pthread -Wl,--no-gc-sections -fmessage-length=0 -ffile-prefix-map=/tmp/go-build3084352486=/tmp/go-build -gno-record-gcc-switches'
```
### What did you do?
Use x509.ParseCertificate(derBytes) to parse the der certificate
### What did you see happen?
A critical examine of the aia extension:authority info access incorrectly marked critical
Authority Information Access is described in RFC5280: Conforming CAs MUST mark this extension as non-critical.
Other extensions with the same description in RFC5280 are not checked, such as:
Subject Information Access: Conforming CAs MUST mark this extension as non-critical.
Policy Constraints: Conforming CAs MUST mark this extension as non-critical.
### What did you expect to see?
For certificates marked as critical, such as Subject Information Access and Policy Constraints, parsing errors will also occur.
| NeedsInvestigation | low | Critical |
2,648,522,293 | ollama | Please add microsoft/OmniParser model | OmniParser is a general screen parsing tool, which interprets/converts UI screenshot to structured format, to improve existing LLM based UI agent. Training Datasets include: 1) an interactable icon detection dataset, which was curated from popular web pages and automatically annotated to highlight clickable and actionable regions, and 2) an icon description dataset, designed to associate each UI element with its corresponding function.
https://huggingface.co/microsoft/OmniParser
Thanks! | model request | low | Major |
2,648,550,060 | pytorch | inconsistency in ```torch.special.logit``` on CPU and GPU | ### 🐛 Describe the bug
Consistency check on ```torch.special.logit``` function between CPU and GPU using a bfloat16 tensor.
```python #
#include <iostream>
#include <torch/torch.h>
int main() {
std::cout<<"dtype: "<<get_dtype(4)<<std::endl;
torch::Tensor self = torch::tensor({{0.0835}}, torch::kBFloat16);
auto result_cpu = torch::special::logit(self);
torch::Tensor self_cuda = self.cuda();
auto result_gpu = torch::special::logit(self_cuda);
std::cout << "initialized tensor (CPU):\n" << self << std::endl;
std::cout << "CPU result: \n" << result_cpu << std::endl;
std::cout << "GPU result: \n" << result_gpu << std::endl;
bool inconsistent = !torch::allclose(result_cpu, result_gpu.cpu(), 1e-03, 1e-02);
std::cout << "inconsistency with atol=1e-02 and rtol=1e-03: " << std::boolalpha << inconsistent << std::endl;
return 0;
}
```
outputs:
```
initialized tensor (CPU):
0.01 *
8.3496
[ CPUBFloat16Type{1,1} ]
CPU result:
-2.4062
[ CPUBFloat16Type{1,1} ]
GPU result:
-2.3906
[ CUDABFloat16Type{1,1} ]
inconsistency with atol=1e-02 and rtol=1e-03: true
```
### Versions
OS: Ubuntu 22.04.4 LTS (x86_64)
GCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0
Clang version: 16.0.4 (https://github.com/llvm/llvm-project ae42196bc493ffe877a7e3dff8be32035dea4d07)
CMake version: version 3.22.1
Libc version: glibc-2.35
Python version: 3.8.5 (default, Sep 4 2020, 07:30:14) [GCC 7.3.0] (64-bit runtime)
Python platform: Linux-5.15.0-105-generic-x86_64-with-glibc2.10
Is CUDA available: N/A
CUDA runtime version: 12.1.66
CUDA_MODULE_LOADING set to: N/A
GPU models and configuration:
GPU 0: NVIDIA GeForce RTX 3090
GPU 1: NVIDIA GeForce RTX 3090
GPU 2: NVIDIA GeForce RTX 3090
GPU 3: NVIDIA GeForce RTX 3090
Nvidia driver version: 550.78
cuDNN version: Probably one of the following:
/usr/local/cuda-11.7/targets/x86_64-linux/lib/libcudnn.so.8.9.2
/usr/local/cuda-11.7/targets/x86_64-linux/lib/libcudnn_adv_infer.so.8.9.2
/usr/local/cuda-11.7/targets/x86_64-linux/lib/libcudnn_adv_train.so.8.9.2
/usr/local/cuda-11.7/targets/x86_64-linux/lib/libcudnn_cnn_infer.so.8.9.2
/usr/local/cuda-11.7/targets/x86_64-linux/lib/libcudnn_cnn_train.so.8.9.2
/usr/local/cuda-11.7/targets/x86_64-linux/lib/libcudnn_ops_infer.so.8.9.2
/usr/local/cuda-11.7/targets/x86_64-linux/lib/libcudnn_ops_train.so.8.9.2
HIP runtime version: N/A
MIOpen runtime version: N/A
Is XNNPACK available: N/A
CPU:
Architecture: x86_64
CPU op-mode(s): 32-bit, 64-bit
Address sizes: 46 bits physical, 48 bits virtual
Byte Order: Little Endian
CPU(s): 96
On-line CPU(s) list: 0-95
Vendor ID: GenuineIntel
Model name: Intel(R) Xeon(R) Gold 6248R CPU @ 3.00GHz
CPU family: 6
Model: 85
Thread(s) per core: 2
Core(s) per socket: 24
Socket(s): 2
Stepping: 7
CPU max MHz: 4000.0000
CPU min MHz: 1200.0000
BogoMIPS: 6000.00
Flags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb cat_l3 cdp_l3 invpcid_single intel_ppin ssbd mba ibrs ibpb stibp ibrs_enhanced tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid cqm mpx rdt_a avx512f avx512dq rdseed adx smap clflushopt clwb intel_pt avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local dtherm ida arat pln pts pku ospke avx512_vnni md_clear flush_l1d arch_capabilities
Virtualization: VT-x
L1d cache: 1.5 MiB (48 instances)
L1i cache: 1.5 MiB (48 instances)
L2 cache: 48 MiB (48 instances)
L3 cache: 71.5 MiB (2 instances)
NUMA node(s): 2
NUMA node0 CPU(s): 0-23,48-71
NUMA node1 CPU(s): 24-47,72-95
Vulnerability Gather data sampling: Mitigation; Microcode
Vulnerability Itlb multihit: KVM: Mitigation: VMX disabled
Vulnerability L1tf: Not affected
Vulnerability Mds: Not affected
Vulnerability Meltdown: Not affected
Vulnerability Mmio stale data: Mitigation; Clear CPU buffers; SMT vulnerable
Vulnerability Retbleed: Mitigation; Enhanced IBRS
Vulnerability Spec rstack overflow: Not affected
Vulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl and seccomp
Vulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization
Vulnerability Spectre v2: Mitigation; Enhanced IBRS, IBPB conditional, RSB filling, PBRSB-eIBRS SW sequence
Vulnerability Srbds: Not affected
Vulnerability Tsx async abort: Mitigation; TSX disabled
Versions of relevant libraries:
[pip3] flake8==3.8.4
[pip3] numpy==1.19.2
[pip3] numpydoc==1.1.0
[pip3] torch==2.2.0a0+git9fa3350
[conda] blas 1.0 mkl
[conda] mkl 2020.2 256
[conda] mkl-service 2.3.0 py38he904b0f_0
[conda] mkl_fft 1.2.0 py38h23d657b_0
[conda] mkl_random 1.1.1 py38h0573a6f_0
[conda] numpy 1.19.2 py38h54aff64_0
[conda] numpy-base 1.19.2 py38hfa32c7d_0
[conda] numpydoc 1.1.0 pyhd3eb1b0_1
[conda] torch 2.2.0a0+git9fa3350 dev_0
cc @ptrblck @msaroufim @mruberry @kshitij12345 | module: cuda,triaged,module: bfloat16,module: special | low | Critical |
2,648,595,146 | rust | Build failure with llvm 19 | <!--
Thank you for filing a bug report! 🐛 Please provide a short summary of the bug,
along with any information you feel relevant to replicating the bug.
-->
I tried this code:
```rust
Compiling rustc_driver v0.0.0 (/root/rust-nightly/BUILD/rustc-nightly-src/compiler/rustc_driver)
error: linking with `cc` failed: exit status: 1
|
= note: LC_ALL="C" PATH="/root/rust-nightly/BUILD/rustc-nightly-src/build/aarch64-unknown-linux-gnu/stage0-sysroot/lib/rustlib/aarch64-unknown-linux-gnu/bin:/root/rust-nightly/BUILD/rustc-nightly-src/build/aarch64-unknown-linux-gnu/stage0/lib/rustlib/aarch64-unknown-linux-gnu/bin:/root/rust-nightly/BUILD/rustc-nightly-src/build/aarch64-unknown-linux-gnu/stage0/lib/rustlib/aarch64-unknown-linux-gnu/bin:/root/.local/bin:/root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" VSLANG="1033" "cc" "/tmp/rustcexm4mg/symbols.o" "/root/rust-nightly/BUILD/rustc-nightly-src/build/aarch64-unknown-linux-gnu/stage0-rustc/aarch64-unknown-linux-gnu/release/deps/rustc_main-33be16a5521dac56.rustc_main.1c7d98ad3486e0b0-cgu.0.rcgu.o" "-Wl,--as-needed" "-Wl,-Bdynamic" "/root/rust-nightly/BUILD/rustc-nightly-src/build/aarch64-unknown-linux-gnu/stage0-rustc/aarch64-unknown-linux-gnu/release/deps/librustc_driver-a1631ec590227105.so" "-Wl,-Bstatic" "/root/rust-nightly/BUILD/rustc-nightly-src/build/aarch64-unknown-linux-gnu/stage0-sysroot/lib/rustlib/aarch64-unknown-linux-gnu/lib/libcompiler_builtins-db3f30e40f5d63a0.rlib" "-Wl,-Bdynamic" "-lPolly" "-lPollyISL" "-lrt" "-ldl" "-lm" "-lz" "-lzstd" "-lxml2" "-lstdc++" "-ldl" "-lgcc_s" "-lutil" "-lrt" "-lpthread" "-lm" "-ldl" "-lc" "-Wl,--eh-frame-hdr" "-Wl,-z,noexecstack" "-L" "/root/rust-nightly/BUILD/rustc-nightly-src/build/aarch64-unknown-linux-gnu/stage0-rustc/aarch64-unknown-linux-gnu/release/build/psm-ccb1c9596e3b84f8/out" "-L" "/root/rust-nightly/BUILD/rustc-nightly-src/build/aarch64-unknown-linux-gnu/stage0-rustc/aarch64-unknown-linux-gnu/release/build/blake3-ea5437fa3aa84c23/out" "-L" "/root/rust-nightly/BUILD/rustc-nightly-src/build/aarch64-unknown-linux-gnu/stage0-rustc/aarch64-unknown-linux-gnu/release/build/rustc_llvm-24183734d575e6fb/out" "-L" "/usr/lib64" "-L" "/root/rust-nightly/BUILD/rustc-nightly-src/build/aarch64-unknown-linux-gnu/stage0-sysroot/lib/rustlib/aarch64-unknown-linux-gnu/lib" "-o" "/root/rust-nightly/BUILD/rustc-nightly-src/build/aarch64-unknown-linux-gnu/stage0-rustc/aarch64-unknown-linux-gnu/release/deps/rustc_main-33be16a5521dac56" "-Wl,--gc-sections" "-pie" "-Wl,-z,relro,-z,now" "-Wl,-O1" "-nodefaultlibs" "-Wl,-z,origin" "-Wl,-rpath,$ORIGIN/../lib"
= note: /usr/bin/aarch64-rosa-linux-gnu-ld: /root/rust-nightly/BUILD/rustc-nightly-src/build/aarch64-unknown-linux-gnu/stage0-rustc/aarch64-unknown-linux-gnu/release/deps/librustc_driver-a1631ec590227105.so: undefined reference to `llvm::DOTGraphTraits<llvm::RegionNode*>::getNodeLabel[abi:cxx11](llvm::RegionNode*, llvm::RegionNode*)'
collect2: error: ld returned 1 exit status
= note: some `extern` functions couldn't be found; some native libraries may need to be installed or have their path specified
= note: use the `-l` flag to specify native libraries to link
= note: use the `cargo:rustc-link-lib` directive to specify the native libraries to link with Cargo (see https://doc.rust-lang.org/cargo/reference/build-scripts.html#rustc-link-lib)
```
| A-linkage,A-LLVM,T-bootstrap,C-bug,O-AArch64,requires-custom-config,E-needs-investigation | low | Critical |
2,648,595,740 | vscode | `editor.wordWrap: "bounded"` don't work when inside a settings block for multiple languages | <!-- ⚠️⚠️ Do Not Delete This! bug_report_template ⚠️⚠️ -->
<!-- Please read our Rules of Conduct: https://opensource.microsoft.com/codeofconduct/ -->
<!-- 🕮 Read our guide about submitting issues: https://github.com/microsoft/vscode/wiki/Submitting-Bugs-and-Suggestions -->
<!-- 🔎 Search existing issues to avoid creating duplicates. -->
<!-- 🧪 Test using the latest Insiders build to see if your issue has already been fixed: https://code.visualstudio.com/insiders/ -->
<!-- 💡 Instead of creating your report here, use 'Report Issue' from the 'Help' menu in VS Code to pre-fill useful information. -->
<!-- 🔧 Launch with `code --disable-extensions` to check. -->
Does this issue occur when all extensions are disabled?: Yes
<!-- 🪓 If you answered No above, use 'Help: Start Extension Bisect' from Command Palette to try to identify the cause. -->
<!-- 📣 Issues caused by an extension need to be reported directly to the extension publisher. The 'Help > Report Issue' dialog can assist with this. -->
- VS Code Version: 1.95.2
- OS Version: Windows 11 (10.0.22631)
Steps to Reproduce:
1. Use the following settings:
```json
{
"[markdown][plaintext]": {
"editor.wordWrap": "bounded",
},
}
```
2. Open a markdown or plaintext file and type some text. The text won't wrap at 80th column, but wrap at the viewport edge.
Note that if you launch VSCode to open a file, the file will be correctly wrapped. However, it stops working when you switch to another tab and switch back. | bug,editor-wrapping | low | Critical |
2,648,633,967 | go | proposal: log/slog: export Source method in Record to support custom handler implementations | ### Go version
go version go1.23.1 linux/amd64
### Output of `go env` in your module/workspace:
```shell
Not relevant
```
### What did you do?
Implement a custom handler for the log/slog package. My custom handler requires access to the source location (file and line) of log entries for detailed logging.
### What did you see happen?
The source method in slog.Record is not exported, meaning it’s inaccessible to custom handlers. As a result, I had to re-implement logic to retrieve source location information, which led to redundant code and reduced consistency with Go's built-in logging behavior.
### What did you expect to see?
Expected an accessible Source method in slog.Record that allows custom handlers to retrieve source location information without re-implementing the existing internal logic. This would simplify the creation of custom handlers and ensure consistent behavior when accessing log entry sources. | Proposal | low | Major |
2,648,662,471 | stable-diffusion-webui | [Bug]: WebUI not startting with --listen | ### Checklist
- [X] The issue exists after disabling all extensions
- [X] The issue exists on a clean installation of webui
- [ ] The issue is caused by an extension, but I believe it is caused by a bug in the webui
- [X] The issue exists in the current version of the webui
- [X] The issue has not been reported before recently
- [ ] The issue has been reported before but has not been fixed yet
### What happened?
Webui crash when trying to start with --listen flag
### Steps to reproduce the problem
Go to webui-user.sh
Add --listen flag to COMMANDLINE_ARGS
### What should have happened?
Webui should start
### What browsers do you use to access the UI ?
Mozilla Firefox
### Sysinfo
[sysinfo-2024-11-11-09-02.json](https://github.com/user-attachments/files/17699866/sysinfo-2024-11-11-09-02.json)
### Console logs
```Shell
https://pastebin.com/zZQJqvQj
```
### Additional information
_No response_ | bug-report | low | Critical |
2,648,667,046 | stable-diffusion-webui | [HELP]: replacement for supermerger | ### Is there an existing issue for this?
- [X] I have searched the existing issues and checked the recent builds/commits
### What would your feature do ?
a merger that merge FLUX models and loras
option cosine and so one like supermerger
https://github.com/hako-mikan/sd-webui-supermerger/issues/415#issuecomment-2467196960
### Proposed workflow
123
### Additional information
_No response_ | enhancement | low | Minor |
2,648,683,239 | vscode | Improve contrast of +/- type zoom buttons | The contrast on these buttons is very low:

Compare this to that in context menus for example which use light grey/white instead of blue for active:

| polish,editor-hover | low | Minor |
2,648,686,040 | vscode | Increase hit target of +/- type zoom buttons | The hit target is very small:

This is actually much smaller than the breakpoint hit target:

| polish,editor-hover | low | Minor |
2,648,690,560 | vscode | Zooming into types moves the +/- buttons off screen | Awkward UX:

| feature-request,ux,polish,editor-hover | low | Minor |
2,648,694,655 | vscode | Zoomed types truncates too quickly | The content gets truncated too quickly imo, considering this is an explicit user action to get more information about the type:

| bug,editor-hover | low | Minor |
2,648,700,491 | vscode | Zooming types syntax highlighting doesn't look good | I think this is because we use the tmGrammar and hope for the best:

Some ideas:
- Perhaps we could we leverage tree sitter here instead?
- Experiment with changing the language
- Should we color the RHS of the : differently if it's not perfect TS?
- Create a custom grammar which worked well? | bug,editor-hover | low | Minor |
2,648,714,163 | deno | Env support for Jupyter kernel | I'd love for a cell like this to work:
```ts
const a = Deno.env.get("PATH")
a
```
The use-case would be to provide secrets (i.e., API keys) to the notebook.
| bug,deno jupyter | low | Minor |
2,648,741,032 | deno | Jupyter kernel - relative import | It'd be nice to write some helper functions in my local project (for example that fetches/downloads data, in `./data.ts`) and then use that somehow in a cell like this:
```ts
const { getData } from './data.ts' // or 'from ./data'
await getData()
```
But that seems to crash the deno kernel. | bug,needs investigation,deno jupyter | low | Critical |
2,648,747,711 | godot | Inconsistent/incorrect result for 'is_valid_hex_number()' when using signed '0x' prefix | ### Tested versions
4.4
### System information
Godot v4.4.dev (0f5f3bc95) - Windows 10.0.18363 - Multi-window, 4 monitors - Vulkan (Forward+) - dedicated NVIDIA GeForce GTX 970 (NVIDIA; 32.0.15.6590) - Intel(R) Core(TM) i7-4790K CPU @ 4.00GHz (8 threads)
### Issue description
The [is_valid_hex_number](https://docs.godotengine.org/en/stable/classes/class_string.html#class-string-method-is-valid-hex-number)-method have inconsistent behavior when provided with strings like `"0x"`, `"-0x"`, and `"+0x"`. The cases with plus/minus signs currently return `true`, even though they do not contain any hexadecimal digits after the `"0x"` prefix.
Example tests for the method, it has inconsistent returns for the following inputs:
```gdscript
print("-0x".is_valid_hex_number(true)) # Returns true (expected: false)
print("0x".is_valid_hex_number(true)) # Returns false (expected: false)
print("+0x".is_valid_hex_number(true)) # Returns true (expected: false)
```
Example (MRP screenshot):

### Steps to reproduce
N/A
### Minimal reproduction project (MRP)
[hex_issue.zip](https://github.com/user-attachments/files/17700015/hex_issue.zip)
| bug,topic:core | low | Minor |
2,648,767,638 | next.js | [Turbopack] : GraphQL with turbopack | ### Link to the code that reproduces this issue
https://github.com/MersadHabibi/template-fullstack-nextjs-graphql
### To Reproduce
1. run ``` npm run dev ```
2. open ``` /api/graphql ``` route
3. see error
### Current vs. Expected behavior
I was creating a template and ran into trouble with turbopack.
Apollo Studio loads correctly when I don't use turbopack, but when use turbopack i get this error
```
{
"errors": [
{
"message": "module.require is not a function",
"extensions": {
"code": "INTERNAL_SERVER_ERROR",
"stacktrace": [
"TypeError: module.require is not a function",
" at createHash (C:\\Users\\kimya\\Desktop\\templates\\test\\.next\\server\\chunks\\node_modules_9a9996._.js:4182:23)",
" at Object.html (C:\\Users\\kimya\\Desktop\\templates\\test\\.next\\server\\chunks\\node_modules_@apollo_server_dist_4d2543._.js:2381:247)",
" at ApolloServer.executeHTTPGraphQLRequest (C:\\Users\\kimya\\Desktop\\templates\\test\\.next\\server\\chunks\\node_modules_@apollo_server_dist_4d2543._.js:1948:77)"
]
}
}
]
}
```

```
Landing page `html` function threw: TypeError: module.require is not a function
```

this is my repo link
[link](https://github.com/MersadHabibi/template-fullstack-nextjs-graphql)
### Provide environment information
```bash
Node.js v20.15.0
Operating System:
Platform: win32
Arch: x64
Version: Windows 11 Pro
Available memory (MB): 12072
Available CPU cores: 8
Binaries:
Node: 20.15.0
npm: 10.7.0
Yarn: N/A
pnpm: N/A
Relevant Packages:
next: 15.0.3 // Latest available version is detected (15.0.3).
eslint-config-next: 15.0.3
react: 19.0.0-rc-66855b96-20241106
react-dom: 19.0.0-rc-66855b96-20241106
typescript: 5.6.3
Next.js Config:
output: N/A
```
### Which area(s) are affected? (Select all that apply)
Turbopack
### Which stage(s) are affected? (Select all that apply)
next dev (local)
### Additional context
its only happen on development mode with turbopack
route.ts
```typescript
import { ApolloServer } from "@apollo/server";
import {
ApolloServerPluginLandingPageLocalDefault,
ApolloServerPluginLandingPageProductionDefault,
} from "@apollo/server/plugin/landingPage/default";
import { startServerAndCreateNextHandler } from "@as-integrations/next";
import { PrismaClient } from "@prisma/client";
import fs from "fs";
import {
DateTimeTypeDefinition,
PositiveFloatTypeDefinition,
PositiveIntTypeDefinition,
URLTypeDefinition,
} from "graphql-scalars";
import { NextRequest } from "next/server";
import path from "path";
import resolvers from "./resolvers";
let typeDefs = "";
let plugins = [];
if (process.env.NODE_ENV === "production") {
typeDefs = fs.readFileSync(
path.join(path.resolve(), "./src/app/api/graphql/schema.graphql"),
"utf-8",
);
plugins = [
ApolloServerPluginLandingPageProductionDefault({
embed: true,
graphRef: "mersad.up@gmail.com",
}),
];
} else {
typeDefs = fs.readFileSync("./src/app/api/graphql/schema.graphql", "utf8");
plugins = [ApolloServerPluginLandingPageLocalDefault({ embed: true })];
}
const server = new ApolloServer({
typeDefs: [
typeDefs,
DateTimeTypeDefinition,
PositiveIntTypeDefinition,
PositiveFloatTypeDefinition,
URLTypeDefinition,
],
resolvers: {
...resolvers,
// DateTimeResolver,
// PositiveFloatResolver,
// PositiveIntResolver,
// URLResolver,
},
plugins,
});
const handler = startServerAndCreateNextHandler<NextRequest>(server, {
context: async (req) => {
const prisma = new PrismaClient();
prisma.$connect();
return {
req,
prisma,
};
},
});
export async function GET(request: NextRequest) {
return handler(request);
}
export async function POST(request: NextRequest) {
return handler(request);
}
``` | bug,Turbopack | low | Critical |
2,648,769,755 | deno | Unexpected `code: ERR_MODULE_NOT_FOUND` on module linking errors | Version: Deno 2.0.5
**Input code**
```js
// entrypoint.js
import("./mod.js").catch(console.log);
```
```js
// mod.js
import { x } from "./mod.js"
```
**Actual behavior**
The logged error has a `code: ERR_MODULE_NOT_FOUND` property.
```
deno run entrypoint.js
[SyntaxError: The requested module './mod.js' does not provide an export named 'x' at .../mod.js:1:10] {
code: "ERR_MODULE_NOT_FOUND"
}
```
**Expected behavior**
This error is an error defined by the ES spec, and not an error that hosts can define. It's defined at step 7.c.ii of https://tc39.es/ecma262/#sec-source-text-module-record-initialize-environment and it has no extra properties.
**Other info**
On the other hand, it's perfectly fine to add a `code` property to errors that are thrown when resolving an import specifier and fetching/reading the file fails, since that's host defined behavior and https://tc39.es/ecma262/#sec-HostLoadImportedModule defines no requirements on which error is thrown. | bug | low | Critical |
2,648,844,839 | rust | ReferencePropagation introduces UB into code that is accepted by Stacked Borrows | This program is accepted by Stacked Borrows:
```rust
fn main() {
struct Foo(u64);
impl Foo {
fn add(&mut self, n: u64) -> u64 {
self.0 + n
}
}
let mut f = Foo(0);
let alias = &mut f.0 as *mut u64;
let res = f.add(unsafe {
*alias = 42;
0
});
assert_eq!(res, 42);
}
```
That is a Stacked Borrows limitation; it is caused by the fact that 2-phase borrows cannot be modeled properly with just a Stack.
However, it also shows that defining an aliasing model that rejects this code is non-trivial, and we should be very careful with optimizations on such code until we have a clear plan for how to model this.
And yet, it turns out that running this code with `mir-opt-level=2` introduces UB:
```
error: Undefined Behavior: trying to retag from <1484> for Unique permission at alloc702[0x0], but that tag does not exist in the borrow stack for this location
--> 2phase.rs:4:16
|
4 | fn add(&mut self, n: u64) -> u64 {
| ^^^^^^^^^
| |
| trying to retag from <1484> for Unique permission at alloc702[0x0], but that tag does not exist in the borrow stack for this location
| this error occurs as part of function-entry retag at alloc702[0x0..0x8]
|
= help: this indicates a potential bug in the program: it performed an invalid operation, but the Stacked Borrows rules it violated are still experimental
= help: see https://github.com/rust-lang/unsafe-code-guidelines/blob/master/wip/stacked-borrows.md for further information
help: <1484> was created by a SharedReadWrite retag at offsets [0x0..0x8]
--> 2phase.rs:11:15
|
11 | let res = f.add(unsafe {
| ^
help: <1484> was later invalidated at offsets [0x0..0x8] by a write access
--> 2phase.rs:14:9
|
14 | *alias = 42;
| ^^^^^^^^^^^
= note: BACKTRACE (of the first span):
= note: inside `main::Foo::add` at 2phase.rs:4:16: 4:25
note: inside `main`
--> 2phase.rs:11:15
|
11 | let res = f.add(unsafe {
| _______________^
12 | | // This is the access at fault, but it's not immediately apparent because
13 | | // the reference that got invalidated is not under a Protector.
14 | | *alias = 42;
15 | | 0
16 | | });
| |______^
```
This is quite surprising, I thought we were very conservative in terms of doing optimizations that rely on the aliasing model. I have not yet figured out where exactly this comes from.
Cc @rust-lang/opsem @rust-lang/wg-mir-opt @cjgillot | T-compiler,C-bug,A-mir-opt,I-miscompile,E-needs-investigation,WG-mir-opt | low | Critical |
2,648,969,218 | ui | [bug]: Unable to add/install toast | ### Describe the bug
`npx shadcn@latest add toast`
is not working, I am getting the following error
`Something went wrong. Please check the error below for more details.
If the problem persists, please open an issue on GitHub.
Command failed with exit code 1: npm install @radix-ui/react-toast
npm ERR! Cannot read properties of null (reading 'matches')`
### Affected component/components
toast
### How to reproduce
`npx shadcn@latest add toast`
### Codesandbox/StackBlitz link
_No response_
### Logs
_No response_
### System Info
```bash
Mac M1, "react": "^18.2.0", shadcn": "^2.1.6",
```
### Before submitting
- [X] I've made research efforts and searched the documentation
- [X] I've searched for existing issues | bug | low | Critical |
2,649,000,793 | vscode | When searching rtl letters in Arabic using `Ctrl+F` the word gets reversed and disconnected | <!-- ⚠️⚠️ Do Not Delete This! bug_report_template ⚠️⚠️ -->
<!-- Please read our Rules of Conduct: https://opensource.microsoft.com/codeofconduct/ -->
<!-- 🕮 Read our guide about submitting issues: https://github.com/microsoft/vscode/wiki/Submitting-Bugs-and-Suggestions -->
<!-- 🔎 Search existing issues to avoid creating duplicates. -->
<!-- 🧪 Test using the latest Insiders build to see if your issue has already been fixed: https://code.visualstudio.com/insiders/ -->
<!-- 💡 Instead of creating your report here, use 'Report Issue' from the 'Help' menu in VS Code to pre-fill useful information. -->
<!-- 🔧 Launch with `code --disable-extensions` to check. -->
Does this issue occur when all extensions are disabled?: Yes/No
<!-- 🪓 If you answered No above, use 'Help: Start Extension Bisect' from Command Palette to try to identify the cause. -->
<!-- 📣 Issues caused by an extension need to be reported directly to the extension publisher. The 'Help > Report Issue' dialog can assist with this. -->
- VS Code Version: 1.95.2
- OS Version: Linux x64 6.11.6-arch1-1
Steps to Reproduce:
1. Open A file with Arabic text
2. click `Ctrl+F`
3. search for a part of the word
4. observe the word being reversed and incorrectly aligned
Wrong behavior below:

How it looks when highlighted using side search bar (correct)

| bug,editor-RTL | low | Critical |
2,649,005,577 | yt-dlp | [ArteTV] arte.tv audio `format_note` lists wrong language | ### DO NOT REMOVE OR SKIP THE ISSUE TEMPLATE
- [X] I understand that I will be **blocked** if I *intentionally* remove or skip any mandatory\* field
### Checklist
- [X] I'm asking a question and **not** reporting a bug or requesting a feature
- [X] I've looked through the [README](https://github.com/yt-dlp/yt-dlp#readme)
- [X] I've verified that I have **updated yt-dlp to nightly or master** ([update instructions](https://github.com/yt-dlp/yt-dlp#update-channels))
- [X] I've searched [known issues](https://github.com/yt-dlp/yt-dlp/issues/3766) and the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar questions **including closed ones**. DO NOT post duplicates
- [X] I've read the [guidelines for opening an issue](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue)
### Please make sure the question is worded well enough to be understood
noticed a curious bug. No idea how long has it been there. Did not find a "bug report category for particular extractor" so will report as question.
anyway, so here's a test link: https://www.arte.tv/de/videos/117216-007-A/tracks-east/
I run ./yt-dlp -F "https://www.arte.tv/de/videos/117216-007-A/tracks-east/" or ./yt-dlp -J "https://www.arte.tv/de/videos/117216-007-A/tracks-east/" and noticed that for some reason Language parameter is always correct and matched the audio stream. But format_note and format will say "Deutsch [DE]" for everything (nothing German and not).
format = VOA-STA-audio_0-Franz\U00f6sisch - audio only (Deutsch [DE])
format_note = Deutsch [DE]
language = fr
Since the audio track is French, it would be logical to say the same in format_note and format.
not a big deal for downloads, but just confusing in some cases. Apologies for being such a nerd :-)
### Provide verbose output that clearly demonstrates the problem
- [X] Run **your** yt-dlp command with **-vU** flag added (`yt-dlp -vU <your command line>`)
- [X] If using API, add `'verbose': True` to `YoutubeDL` params instead
- [X] Copy the WHOLE output (starting with `[debug] Command-line config`) and insert it below
### Complete Verbose Output
```shell
./yt-dlp -vU -F "https://www.arte.tv/de/videos/117217-024-A/tracks-east/"
[debug] Command-line config: ['-vU', '-F', 'https://www.arte.tv/de/videos/117217-024-A/tracks-east/']
[debug] Encodings: locale UTF-8, fs utf-8, pref UTF-8, out utf-8, error utf-8, screen utf-8
[debug] yt-dlp version stable@2024.11.04 from yt-dlp/yt-dlp [197d0b03b] (zip)
[debug] Python 3.12.2 (CPython arm64 64bit) - macOS-14.6.1-arm64-arm-64bit (OpenSSL 3.0.13 30 Jan 2024)
[debug] exe versions: phantomjs 2.1.1, rtmpdump 2.4
[debug] Optional libraries: Cryptodome-3.20.0, brotli-1.1.0, certifi-2024.07.04, curl_cffi-0.7.1, mutagen-1.47.0, requests-2.32.3, sqlite3-3.45.1, urllib3-2.2.2, websockets-12.0
[debug] Proxy map: {}
[debug] Request Handlers: urllib, requests, curl_cffi
[debug] Extractor Plugins: SamplePluginIE
[debug] Post-Processor Plugins: SamplePluginPP
[debug] Loaded 1963 extractors
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest
Latest version: stable@2024.11.04 from yt-dlp/yt-dlp
yt-dlp is up to date (stable@2024.11.04 from yt-dlp/yt-dlp)
[ArteTV] Extracting URL: https://www.arte.tv/de/videos/117217-024-A/tracks-east/
[ArteTV] 117217-024-A: Downloading JSON metadata
[ArteTV] 117217-024-A: Downloading m3u8 information
[debug] Formats sorted by: hasvid, ie_pref, lang, quality, res, fps, hdr:12(7), vcodec, channels, acodec, size, br, asr, proto, vext, aext, hasaud, source, id
[info] Available formats for 117217-024-A:
ID EXT RESOLUTION FPS │ FILESIZE TBR PROTO │ VCODEC VBR ACODEC MORE INFO
────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────
VA-STA-audio_0-Deutsch mp4 audio only │ m3u8 │ audio only unknown [de] Deutsch [DE]
VA-STA-audio_0-Deutsch__Klare_Sprache_ mp4 audio only │ m3u8 │ audio only unknown [de] Deutsch [DE]
VA-STA-audio_0-Französisch mp4 audio only │ m3u8 │ audio only unknown [fr] Deutsch [DE]
VA-STA-audio_0-Französisch__Klare_Sprache_ mp4 audio only │ m3u8 │ audio only unknown [fr] Deutsch [DE]
VA-STA-audio_0-Mehrsprachig mp4 audio only │ m3u8 │ audio only unknown [mul] Deutsch [DE]
VA-STA-427 mp4 384x216 25 │ ~ 92.58MiB 428k m3u8 │ avc1.42e00d 428k video only Deutsch [DE]
VA-STA-827 mp4 640x360 25 │ ~178.98MiB 827k m3u8 │ avc1.4d401e 827k video only Deutsch [DE]
VA-STA-1503 mp4 768x432 25 │ ~325.21MiB 1503k m3u8 │ avc1.4d401e 1503k video only Deutsch [DE]
VA-STA-2294 mp4 1280x720 25 │ ~496.53MiB 2295k m3u8 │ avc1.4d401f 2295k video only Deutsch [DE]
VA-STA-2688 mp4 1920x1080 25 │ ~581.79MiB 2689k m3u8 │ avc1.4d0028 2689k video only Deutsch [DE]
```
| site-bug | low | Critical |
2,649,018,245 | godot | Editor remote debugger should allow to listen on all interfaces | ### Tested versions
- Reproducible in 4.4.dev4 / current master
### System information
Tested on Windows, doubt its relevant though
### Issue description
As of right now (before https://github.com/godotengine/godot/pull/98282/), the docs for the editor setting `network/debug/remote_host` state: `This can be set to 0.0.0.0 to allow external clients to connect to the remote debugger (instead of restricting the remote debugger to connections from localhost).`
However, in the editor this is exposed as a dropdown selection of available local network interface adressses. "All" is not an option, but I'd indeed expect it to work, as the case described above makes sense (and is probably what I'd use more often rather than restricting this to, say, a specific IPv4 interface).
As this setting is a string, it may work when set from code, in which case this is more of a Editor UI issue, where we need to inject an additional wildcard/any setting for the dropdown.
### Steps to reproduce
- Open editor, browse editor (not project) settings, search for "remote host", notice the dropdown doesn't allow selectin all interfaces
### Minimal reproduction project (MRP)
N/A | enhancement,topic:editor,topic:network | low | Critical |
2,649,039,774 | rust | ErrorCompile | <!--
Thank you for finding an Internal Compiler Error! 🧊 If possible, try to provide
a minimal verifiable example. You can read "Rust Bug Minimization Patterns" for
how to create smaller examples.
http://blog.pnkfx.org/blog/2019/11/18/rust-bug-minimization-patterns/
-->
### Code
```
#![warn(clippy::all, clippy::pedantic)]
fn main() {
let arr = [1, 2, 3, 4, 5];
for num in arr.iter() {
println!("{} is unique", num);
}
}
```
### Meta
`rustc --version --verbose`:
1.82.0
```
<version>
```
### Error output
error: the compiler unexpectedly panicked. this is a bug.
note: we would appreciate a bug report: https://github.com/rust-lang/rust/issues/new?labels=C-bug%2C+I-ICE%2C+T-compiler&template=ice.md
note: rustc 1.82.0 (f6e511eec 2024-10-15) running on x86_64-pc-windows-gnu
note: compiler flags: --crate-type bin -C embed-bitcode=no -C debuginfo=2 -C incremental=[REDACTED]
note: some of the compiler flags provided by cargo are hidden
query stack during panic:
end of query stack
<!--
Include a backtrace in the code block by setting `RUST_BACKTRACE=1` in your
environment. E.g. `RUST_BACKTRACE=1 cargo build`.
-->
<details><summary><strong>Backtrace</strong></summary>
<p>
```
<backtrace>
```
</p>
</details>
| I-ICE,T-compiler,A-incr-comp,C-bug,S-needs-repro | low | Critical |
2,649,083,229 | angular | Support for ElementInternals in Angular Elements | ### Which @angular/* package(s) are relevant/related to the feature request?
elements
### Description
To participate in HTML form submission, a custom element must have its element internals defined. Example from [MDN](https://developer.mozilla.org/en-US/docs/Web/API/HTMLElement/attachInternals):
```
class CustomCheckbox extends HTMLElement {
static formAssociated = true;
constructor() {
super();
this.internals_ = this.attachInternals();
}
// …
}
```
Are there any plans to incorporate this part of the Custom Elements API into Angular Elements?
### Proposed solution
Add a new property to `NgElementConfig` called `"isFormElement"`. Then, modify the `createCustomElement` function to set `this.internals_ = this.attachInternals();` in the constructor and add the `static formAssociated = true;` property if the flag is set.
```typescript
export interface NgElementConfig {
injector: Injector;
isFormElement: boolean;
strategyFactory?: NgElementStrategyFactory;
}
```
### Alternatives considered
Use decorator-based approach for example: `@FormAssociated` | area: elements | medium | Minor |
2,649,121,917 | deno | Deno fails to find alpha package versions unless explicitly specified | Version: Deno 2.0.6
Doing `deno run -A jsr:@fresh/init` fails unless you specify the exact version number. | jsr | low | Minor |
2,649,134,332 | deno | `Deno.connect()` can't be canceled if unreachable hostname and port specified | If unreachable hostname and port pair is specified in `Deno.connect`, it can't be canceled until it times out with OS Error.
```js
const conn = await Deno.connect({ hostname: "104.20.22.46", port: 50000 });
// This times out with os error 60 in about 75 sec on mac os.
```
This situation is inconvenient for some of the node.js compat unit test cases (for example, a case in `tests/node_compat/test/parallel/test-net-autoselectfamily.js` connects to the above server and the above hang happens, which wastes about 1 min before the connection is closed from the server (See #26661)) | bug,ext/net | low | Critical |
2,649,235,615 | material-ui | Customizing the theme in nextjs: responsiveFontSizes, fontFamily and allVariants don't work with custom Typography variants | ### Steps to reproduce
[Link to live example: (required)](https://github.com/Susannah-Parsons/mui-theme-bug)
Steps:
1. Run the program from the mui-theme-bug respository
2. Check the styles for the body3 typography
3. Note that the styles don't include the fontFamily nor the styles from allVariants
4. Note that none of the typography variants have responsive fonts
### Current behavior
Custom typography variants don't include the fontFamily nor the styles in allVariants.
None of the typography variants have responsiveFontStyles
### Expected behavior
All the typography variants should use the fontFamily and allVariants properties. All the typography variants should have responsive font sizes.
### Context
Custom typography variants that work properly with the fontFamily, allVariants and responsiveFontStyles features of custom theming in MUI
### Your environment
<details>
<summary><code>npx @mui/envinfo</code></summary>
```
System:
OS: Linux 5.15 Ubuntu 22.04.4 LTS 22.04.4 LTS (Jammy Jellyfish)
Binaries:
Node: 20.17.0 - /usr/bin/node
npm: 10.8.2 - /usr/bin/npm
pnpm: Not Found
Browsers:
Chrome: Not Found
npmPackages:
@emotion/react: ^11.13.3 => 11.13.3
@emotion/styled: ^11.13.0 => 11.13.0
@mui/core-downloads-tracker: 6.1.6
@mui/material: ^6.1.6 => 6.1.6
@mui/private-theming: 6.1.6
@mui/styled-engine: 6.1.6
@mui/system: 6.1.6
@mui/types: 7.2.19
@mui/utils: 6.1.6
@types/react: ^18 => 18.3.12
react: 19.0.0-rc-66855b96-20241106 => 19.0.0-rc-66855b96-20241106
react-dom: 19.0.0-rc-66855b96-20241106 => 19.0.0-rc-66855b96-20241106
typescript: ^5 => 5.6.3
I used Microsoft Edge
Version 130.0.2849.68 (Official build) (64-bit)
to view the app
```
</details>
**Search keywords**: responsiveFontSizes fontFamily, allVariants, custom, typography, theme | docs,support: question,package: material-ui,customization: theme | low | Critical |
2,649,255,440 | tailwindcss | `src` descriptor in `@font-face` rule is transformed/formatted incorrectly | <!-- Please provide all of the information requested below. We're a small team and without all of this information it's not possible for us to help and your bug report will be closed. -->
**What version of Tailwind CSS are you using?**
For example: both 3.4.13 and 4.0.0-alpha.30
**What build tool (or framework if it abstracts the build tool) are you using?**
This happens in the online playground as well. Originally Next.js with Turbopack
**What version of Node.js are you using?**
v20
**What browser are you using?**
Chrome
**What operating system are you using?**
macOS
**Reproduction URL**
https://play.tailwindcss.com/NObbqx3vf9?file=css
**Describe your issue**
Tailwind turns
```css
@font-face {
font-family: 'Inconsolata';
font-style: normal;
font-weight: 200 900;
font-stretch: 100%;
font-display: swap;
src: url(some/url/with/query?{%22url%22:%22https://fonts.gstatic.com/s/inconsolata/v32/QlddNThLqRwH-OJ1UHjlKENVzkWGVkL3GZQmAwLyxq15IDhunJ_o.woff2%22,%22preload%22:false,%22has_size_adjust%22:true}) format('woff2');
unicode-range: U+0102-0103, U+0110-0111, U+0128-0129, U+0168-0169, U+01A0-01A1, U+01AF-01B0, U+0300-0301, U+0303-0304, U+0308-0309, U+0323, U+0329, U+1EA0-1EF9, U+20AB;
}
```
into
```css
@font-face {
font-family: 'Inconsolata';
font-style: normal;
font-weight: 200 900;
font-stretch: 100%;
font-display: swap;
src: url(some/url/with/query? {
%22url%22: %22https://fonts.gstatic.com/s/inconsolata/v32/QlddNThLqRwH-OJ1UHjlKENVzkWGVkL3GZQmAwLyxq15IDhunJ_o.woff2%22,%22preload%22:false,%22has_size_adjust%22:true;
}
) format('woff2': ) format('woff2');
unicode-range: U+0102-0103, U+0110-0111, U+0128-0129, U+0168-0169, U+01A0-01A1, U+01AF-01B0, U+0300-0301, U+0303-0304, U+0308-0309, U+0323, U+0329, U+1EA0-1EF9, U+20AB;
}
```
Notice that `src:` value has some newlines, and that `format()` is duplicated with invalid syntax.
| v3 | low | Critical |
2,649,269,060 | node | Incorrect version for `PerformanceObserverEntryList` | ### Affected URL(s)
https://nodejs.org/docs/latest/api/perf_hooks.html
### Description of the problem
this interface has shipped in v8.5.0, but the interface itself cann't be accessed before v16.7.0
see https://github.com/mdn/browser-compat-data/pull/25001 for full test | doc | low | Major |
2,649,274,070 | godot | Random crashes occur during runtime, Program crashed with signal 4 | ### Tested versions
v4.3.stable.official [77dcf97d8],v4.4.dev4.mono.official [36e6207bb]
### System information
Windows 11 - Intel(R) Core i5-12400F - RTX4060 - Godot v4.3.stable.official [77dcf97d8] (Forward+)
### Issue description
Windows 11, Intel(R) Core i5-12400F, RTX4060
The project is quite complex, with numerous files. There are no errors in the debugger, but it causes the game to crash randomly.I am not very experienced, so all I can do is provide the error message. I don't know how to pinpoint the error. Below is the error message:
```
================================================================
USER ERROR: FATAL: Index p_index = 1 is out of bounds (size() = 0).
at: get (./core/templates/cowdata.h:205)
================================================================
CrashHandlerException: Program crashed with signal 4
Engine version: Godot Engine v4.3.stable.official (77dcf97d82cbfe4e4615475fa52ca03da645dbd8)
Dumping the backtrace. Please include this when reporting the bug to the project developer.
[1] error(-1): no debug info in PE/COFF executable
[2] error(-1): no debug info in PE/COFF executable
[3] error(-1): no debug info in PE/COFF executable
[4] error(-1): no debug info in PE/COFF executable
[5] error(-1): no debug info in PE/COFF executable
[6] error(-1): no debug info in PE/COFF executable
[7] error(-1): no debug info in PE/COFF executable
[8] error(-1): no debug info in PE/COFF executable
[9] error(-1): no debug info in PE/COFF executable
[10] error(-1): no debug info in PE/COFF executable
[11] error(-1): no debug info in PE/COFF executable
[12] error(-1): no debug info in PE/COFF executable
[13] error(-1): no debug info in PE/COFF executable
[14] error(-1): no debug info in PE/COFF executable
[15] error(-1): no debug info in PE/COFF executable
[16] error(-1): no debug info in PE/COFF executable
[17] error(-1): no debug info in PE/COFF executable
[18] error(-1): no debug info in PE/COFF executable
[19] error(-1): no debug info in PE/COFF executable
[20] error(-1): no debug info in PE/COFF executable
[21] error(-1): no debug info in PE/COFF executable
[22] error(-1): no debug info in PE/COFF executable
[23] error(-1): no debug info in PE/COFF executable
[24] error(-1): no debug info in PE/COFF executable
[25] error(-1): no debug info in PE/COFF executable
[26] error(-1): no debug info in PE/COFF executable
[27] error(-1): no debug info in PE/COFF executable
[28] error(-1): no debug info in PE/COFF executable
[29] error(-1): no debug info in PE/COFF executable
[30] error(-1): no debug info in PE/COFF executable
[31] error(-1): no debug info in PE/COFF executable
[32] error(-1): no debug info in PE/COFF executable
[33] error(-1): no debug info in PE/COFF executable
[34] error(-1): no debug info in PE/COFF executable
[35] error(-1): no debug info in PE/COFF executable
[36] error(-1): no debug info in PE/COFF executable
-- END OF BACKTRACE --
================================================================
```
### Steps to reproduce
N/A
### Minimal reproduction project (MRP)
N/A | bug,topic:core,needs testing,crash | low | Critical |
2,649,324,240 | PowerToys | Unexpected behavior in Advanced Paste > Enable Paste with AI | ### Description of the new feature / enhancement
I had some repetitive work to do:
- copy values in my German Excel file with comma as decimal separator
- paste those values in Visual Studio as C# 2D array
So, I used "Advanced Paste" with "Enable Paste with AI". On the first time, I prompted "paste as C# 2D Array. Replace the comma decimal separator with the new decimal separator point".
I copied this prompt to a text file.
On the second time, I copied Excel values, opened Advanced Paste, copied the prompt and did not get my values as C# array but just the prompt.
I can fully understand, why this happens, but it is not what I expected. And I do not see a workaround for this kind of repetitive work.
What do you think about a "Prompt history"? This would make repetitive work just
Shift + Windows + V --> click
### Scenario when this would be used?
repetitive work
### Supporting information
_No response_ | Needs-Triage,Needs-Team-Response | low | Minor |
2,649,326,197 | kubernetes | The pod is in Terminating state and cannot be deleted. | ### What happened?
After the pod is created, kubelet is attaching volumes. One volume of the csi type fails to be attached. When the pod is deleted, kubelet displays the DELETE log. However, the volume still fails to be attached. However, the volume that fails to be attached is not detached in the volume detaching process. In this case, the pod is always in the Terminating state.
Here are some of the logs:
kubelet.log_20241110-011239.gz:I1109 15:20:58.829354 991024 reconciler_common.go:231] "operationExecutor.MountVolume started for volume \"log\" (UniqueName: \"kubernetes.io/csi/611e4a7f-e11c-4a85-a9de-c12786553c1b-log\") pod \"apicatalogmgrservice-76cb4ddf47-gcqdc\" (UID: \"611e4a7f-e11c-4a85-a9de-c12786553c1b\") " pod="sop/apicatalogmgrservice-76cb4ddf47-gcqdc"
kubelet.log_20241110-011239.gz:I1109 15:21:07.311383 991024 kubelet.go:2446] "SyncLoop DELETE" source="api" pods=["sop/apicatalogmgrservice-76cb4ddf47-gcqdc"]
kubelet.log_20241110-011239.gz:E1109 15:22:58.832641 991024 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/611e4a7f-e11c-4a85-a9de-c12786553c1b-log podName:611e4a7f-e11c-4a85-a9de-c12786553c1b nodeName:}" failed. No retries permitted until 2024-11-09 15:23:02.832622173 +0000 UTC m=+217774.838649689 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "log" (UniqueName: "kubernetes.io/csi/611e4a7f-e11c-4a85-a9de-c12786553c1b-log") pod "apicatalogmgrservice-76cb4ddf47-gcqdc" (UID: "611e4a7f-e11c-4a85-a9de-c12786553c1b") : rpc error: code = Unknown desc = malformed header: missing HTTP content-type
However, the operationExecutor.UnmountVolume started log is not displayed.
Only the following information is seen
kubelet.log_20241110-011239.gz:I1109 15:22:58.950400 991024 reconciler_common.go:300] "Volume detached for volume \"log\" (UniqueName: \"kubernetes.io/csi/611e4a7f-e11c-4a85-a9de-c12786553c1b-log\") on node \"master2\" DevicePath \"\""
### What did you expect to happen?
The pod should be correctly deleted or the volume that fails to be attached should trigger the volume detaching process.
### How can we reproduce it (as minimally and precisely as possible)?
Create a pod and ensure that a volume fails to be attached to a csi volume. Delete the pod. After the pod is deleted, the volume fails to be attached to the volume again. Then, the pod remains in the Terminating state.
### Anything else we need to know?
_No response_
### Kubernetes version
<details>
```console
$ kubectl version
# paste output here
```
1.31
</details>
### Cloud provider
<details>
</details>
### OS version
<details>
```console
# On Linux:
$ cat /etc/os-release
# paste output here
$ uname -a
# paste output here
# On Windows:
C:\> wmic os get Caption, Version, BuildNumber, OSArchitecture
# paste output here
```
</details>
### Install tools
<details>
</details>
### Container runtime (CRI) and version (if applicable)
<details>
</details>
### Related plugins (CNI, CSI, ...) and versions (if applicable)
<details>
</details>
| kind/bug,sig/storage,needs-triage | low | Critical |
2,649,332,569 | godot | Setting `TabContainer.current_tab` doesn't immediately change visibility | ### Tested versions
Reproducible in:
- v4.3.stable.official [77dcf97d8]
### System information
Godot v4.3.stable - Debian GNU/Linux trixie/sid trixie - Wayland - Vulkan (Mobile) - dedicated AMD Radeon RX 7600 (RADV NAVI33) - AMD Ryzen 5 7600 6-Core Processor (12 Threads)
### Issue description
When setting `current_tab` on a `TabContainer` instance, the visibility of the tabs does not get updated immediately.
In my particular case, this was causing an issue because I wanted to set the UI focus after switching the tab, but `%my_tab.find_next_valid_focus()` was returning an element outside the tab.
To set the focus one needs to either:
a) `await get_tree().process_frame` after setting `current_tab`; or
b) Just `%my_tab.visible = true` instead of setting `current_tab`.
### Steps to reproduce
1. Set `current_tab` on a `TabContainer`
2. Check the visibility of the now-current tab (hidden)
3. Wait a frame
4. Check visibility again (visible)
The MRP has an `if true:` in the script, which can be `false`d to compare behaviour between setting `current_tab` and setting `visible` directly.
### Minimal reproduction project (MRP)
Self-contained MRP tscn:
[current_tab_test.zip](https://github.com/user-attachments/files/17702886/current_tab_test.zip)
| bug,topic:gui | low | Minor |
2,649,350,169 | next.js | revalidatePath or revalidateTag does not execute internal revalidation logic if called inside a setTimeout nor does it throw an exception | ### Link to the code that reproduces this issue
https://github.com/trieb-work/nextjs-bug-revalidatetag-inside-settimeout
### To Reproduce
1. start the app with nodejs in production mode: `NODE_ENV=production VERCEL_ENV=production pnpm build && NODE_ENV=production VERCEL_ENV=production pnpm start`
2. open the index page http://localhost:3000 --> a random number will appear which will get cached
3. reload the index page: the same number is still present because it is cached
4. open http://localhost:3000/normal --> revalidatePath will get called for the index page
5. open the index page --> a new number will be displayed because of the working revalidatePath
6. open http://localhost:3000/timeout --> revalidatePath will get called inside a setTimeout
7. open the index page --> the same old number is still present because revalidateTag/revalidatePath does not work inside of a setTimeout
### Current vs. Expected behavior
if revalidateTag/revalidatePath is used inside a setTimeout it does not have any effect (tested in nodejs), but still the remaining code inside the setTimeout is executed. I implemented my own cache handler (like here: https://nextjs.org/docs/app/api-reference/next-config-js/incrementalCacheHandlerPath) and could see that the revalidateTag function from the cache handler is never called if the nextjs revalidateTag function is called inside a setTimeout.
### Provide environment information
```bash
Operating System:
Platform: darwin
Arch: arm64
Version: Darwin Kernel Version 22.6.0: Wed Jul 5 22:22:05 PDT 2023; root:xnu-8796.141.3~6/RELEASE_ARM64_T6000
Available memory (MB): 32768
Available CPU cores: 10
Binaries:
Node: 20.18.0
npm: 10.8.2
Yarn: 1.22.21
pnpm: 8.15.8
Relevant Packages:
next: 15.0.4-canary.5 // Latest available version is detected (15.0.4-canary.5).
eslint-config-next: N/A
react: 19.0.0-rc-5c56b873-20241107
react-dom: 19.0.0-rc-5c56b873-20241107
typescript: 5.3.3
Next.js Config:
output: N/A
```
### Which area(s) are affected? (Select all that apply)
Performance, Runtime
### Which stage(s) are affected? (Select all that apply)
Other (Deployed)
### Additional context
I have not tested it in deployed vercel env as we are self hosting using docker and nodejs.
I have also tested to wrap the setTimeout in an `unstable_after` function but this has not helped as well.
As a workaround the code can be rewritten to use a sleep/delay function (`const sleep = (ms: number) => new Promise((res) => setTimeout(res, ms));`) instead of a setTimout directly. Calling revalidateTag after the sleep function works as expected (`await sleep(100); revalidatePath("/")`) | bug,Performance,Runtime | low | Critical |
2,649,360,575 | neovim | loader: .cache/nvim/luac/ contains 250k files when using nvim.appimage | ### Problem
Nvim started freezing almost every session for me. Eventually I found that my `~/.cache/nvim/luac` directory has more than 250,000 files in it.
These appear to be cache files generated and named according to the path of the original lua file.
The problem is the `nvim.appimage` release mounts nvim at a different temporary path each time it is invoked. The naming scheme of these cache files changes, then on every invocation, making the cache pointless and creating N additional files on every launch of nvim. For me, that led to a directory that currently has 246,768 files in it.
Here is an example of 2 identical files in the the `./luac/` directory that are named differently only because the temporary mounting paths generated by appimage are different:
`%2ftmp%2f.mount_nvim.a65Rja0%2fusr%2fshare%2fnvim%2fruntime%2flua%2fvim%2ftreesitter.luac`
`%2ftmp%2f.mount_nvim.aNpxXgo%2fusr%2fshare%2fnvim%2fruntime%2flua%2fvim%2ftreesitter.luac`
I originally encountered this with the 0.9.5 appimage on RHEL7, but have confirmed the v0.11.0-dev-1129+gadbffff5d0 pre-release still behaves the same way.
### Steps to reproduce
To reproduce:
1) install `nvim.appimage`
2) `mv ~/.cache/nvim/luac ~/.cache/nvim/luac.backup`
3) `nvim`
4) Observe contents of `~/.cache/nvim/luac/`
5) Close nvim and run `nvim` again
6) Observe contents of `~/.cache/nvim/luac/` and see that new identical files have been added with a different mount prefix
Every time `nvim` is launched, more files are added.
### Expected behavior
Expected behavior is not to allow unbounded growth of the number of files in `~/.cache/nvim/luac/`.
When using `nvim.appimage`, the cache is effectively rendered moot between launches of nvim. A simple workaround to address the problem I'm experiencing would be to either not cache or remove cached luac files on close when appimage is used.
A more ideal solution would allow the cached files to be used and probably involves some sort of translation of appimage mount paths to a common filename in the cache directory that can be shared among multiple instances of nvim that are compatible (same version?)
### Nvim version (nvim -v)
v0.11.0-dev-1129+gadbffff5d0 all the way back to v0.9.5
### Vim (not Nvim) behaves the same?
unlikely since these are lua cache files? Not sure how to even check this.
### Operating system/version
RHEL9.3, RHEL7, and RHEL8
### Terminal name/version
zsh 5.8
### $TERM environment variable
screen
### Installation
appimage | bug,performance,lua,has:plan,startup | low | Minor |
2,649,413,924 | go | x/sync/errgroup: Group: document that Go may not be concurrent with Wait unless semaphore > 0 | (Edit: skip down to https://github.com/golang/go/issues/70284#issuecomment-2470418828; this is now a doc change request. --@adonovan)
### Go version
go version go1.23.1 darwin/amd64
### Output of `go env` in your module/workspace:
```shell
GO111MODULE=''
GOARCH='amd64'
GOBIN=''
GOCACHE='/Users/haaawk/Library/Caches/go-build'
GOENV='/Users/haaawk/Library/Application Support/go/env'
GOEXE=''
GOEXPERIMENT=''
GOFLAGS=''
GOHOSTARCH='amd64'
GOHOSTOS='darwin'
GOINSECURE=''
GOMODCACHE='/Users/haaawk/go/pkg/mod'
GONOPROXY=''
GONOSUMDB=''
GOOS='darwin'
GOPATH='/Users/haaawk/go'
GOPRIVATE=''
GOPROXY='https://proxy.golang.org,direct'
GOROOT='/usr/local/go'
GOSUMDB='sum.golang.org'
GOTMPDIR=''
GOTOOLCHAIN='auto'
GOTOOLDIR='/usr/local/go/pkg/tool/darwin_amd64'
GOVCS=''
GOVERSION='go1.23.1'
GODEBUG=''
GOTELEMETRY='local'
GOTELEMETRYDIR='/Users/haaawk/Library/Application Support/go/telemetry'
GCCGO='gccgo'
GOAMD64='v1'
AR='ar'
CC='clang'
CXX='clang++'
CGO_ENABLED='1'
GOMOD='/dev/null'
GOWORK=''
CGO_CFLAGS='-O2 -g'
CGO_CPPFLAGS=''
CGO_CXXFLAGS='-O2 -g'
CGO_FFLAGS='-O2 -g'
CGO_LDFLAGS='-O2 -g'
PKG_CONFIG='pkg-config'
GOGCCFLAGS='-fPIC -arch x86_64 -m64 -pthread -fno-caret-diagnostics -Qunused-arguments -fmessage-length=0 -ffile-prefix-map=/var/folders/_y/nj5hcsh93l12tmsszxgx7ntr0000gn/T/go-build2396141927=/tmp/go-build -gno-record-gcc-switches -fno-common'
```
### What did you do?
When following code is run:
```
package main
import (
"runtime"
"golang.org/x/sync/errgroup"
)
func main() {
runtime.GOMAXPROCS(1)
g := &errgroup.Group{}
g.SetLimit(1)
ch := make(chan struct{})
wait := make(chan struct{}, 2)
g.Go(func() error {
<-ch
wait <- struct{}{}
return nil
})
go g.Go(func() error {
println("I'm not blocked")
wait <- struct{}{}
return nil
})
println("Ok let's play")
close(ch)
g.Wait()
println("It's over already?")
<-wait
<-wait
}
```
https://go.dev/play/p/xTIsT1iouTd
### What did you see happen?
The program printed:
```
Ok let's play
It's over already?
I'm not blocked
```
### What did you expect to see?
The program printing:
```
Ok let's play
I'm not blocked
It's over already?
``` | Documentation,help wanted,NeedsFix | medium | Critical |
2,649,435,979 | nvm | NVM installer throws "/usr/bin/env: ‘node’: No such file or directory" | <!-- Thank you for being interested in nvm! Please help us by filling out the following form if you‘re having trouble. If you have a feature request, or some other question, please feel free to clear out the form. Thanks! -->
#### Operating system and version:
```ini
NAME="Pop!_OS"
VERSION="22.04 LTS"
ID=pop
ID_LIKE="ubuntu debian"
PRETTY_NAME="Pop!_OS 22.04 LTS"
VERSION_ID="22.04"
HOME_URL="https://pop.system76.com"
SUPPORT_URL="https://support.system76.com"
BUG_REPORT_URL="https://github.com/pop-os/pop/issues"
PRIVACY_POLICY_URL="https://system76.com/privacy"
VERSION_CODENAME=jammy
UBUNTU_CODENAME=jammy
LOGO=distributor-logo-pop-os
```
#### `nvm debug` output:
N/A
#### `nvm ls` output:
N/A
#### How did you install `nvm`?
<!-- (e.g. install script in readme, Homebrew) -->
`curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.40.1/install.sh | bash`
#### What steps did you perform?
I did only the install
#### What happened?
<details>
There was an error in stdout.
```
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
100 16563 100 16563 0 0 182k 0 --:--:-- --:--:-- --:--:-- 183k
=> nvm is already installed in /home/knackstedt/.nvm, trying to update using git
=> => Compressing and cleaning up git repository
=> nvm source string already in /home/knackstedt/.bashrc
=> bash_completion source string already in /home/knackstedt/.bashrc
/usr/bin/env: ‘node’: No such file or directory
=> Close and reopen your terminal to start using nvm or run the following to use it now:
export NVM_DIR="$HOME/.nvm"
[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh" # This loads nvm
[ -s "$NVM_DIR/bash_completion" ] && \. "$NVM_DIR/bash_completion" # This loads nvm bash_completion
```
</details>
#### What did you expect to happen?
No errors and the install to proceed.
#### Is there anything in any of your profile files that modifies the `PATH`?
<!-- (e.g. `.bashrc`, `.bash_profile`, `.zshrc`, etc) -->
<!-- Please remove the following section if it does not apply to you -->
#### If you are having installation issues, or getting "N/A", what does `curl -I --compressed -v https://nodejs.org/dist/` print out?
<details>
<!-- do not delete the following blank line -->
```sh
* Trying 104.20.23.46:443...
* Connected to nodejs.org (104.20.23.46) port 443 (#0)
* ALPN, offering h2
* ALPN, offering http/1.1
* CAfile: /etc/ssl/certs/ca-certificates.crt
* CApath: /etc/ssl/certs
* TLSv1.0 (OUT), TLS header, Certificate Status (22):
* TLSv1.3 (OUT), TLS handshake, Client hello (1):
* TLSv1.2 (IN), TLS header, Certificate Status (22):
* TLSv1.3 (IN), TLS handshake, Server hello (2):
* TLSv1.2 (IN), TLS header, Finished (20):
* TLSv1.2 (IN), TLS header, Supplemental data (23):
* TLSv1.3 (IN), TLS handshake, Encrypted Extensions (8):
* TLSv1.3 (IN), TLS handshake, Certificate (11):
* TLSv1.3 (IN), TLS handshake, CERT verify (15):
* TLSv1.3 (IN), TLS handshake, Finished (20):
* TLSv1.2 (OUT), TLS header, Finished (20):
* TLSv1.3 (OUT), TLS change cipher, Change cipher spec (1):
* TLSv1.2 (OUT), TLS header, Supplemental data (23):
* TLSv1.3 (OUT), TLS handshake, Finished (20):
* SSL connection using TLSv1.3 / TLS_AES_256_GCM_SHA384
* ALPN, server accepted to use h2
* Server certificate:
* subject: CN=*.nodejs.org
* start date: Feb 28 00:00:00 2024 GMT
* expire date: Mar 30 23:59:59 2025 GMT
* subjectAltName: host "nodejs.org" matched cert's "nodejs.org"
* issuer: C=GB; ST=Greater Manchester; L=Salford; O=Sectigo Limited; CN=Sectigo RSA Domain Validation Secure Server CA
* SSL certificate verify ok.
* Using HTTP2, server supports multiplexing
* Connection state changed (HTTP/2 confirmed)
* Copying HTTP/2 data in stream buffer to connection buffer after upgrade: len=0
* TLSv1.2 (OUT), TLS header, Supplemental data (23):
* TLSv1.2 (OUT), TLS header, Supplemental data (23):
* TLSv1.2 (OUT), TLS header, Supplemental data (23):
* Using Stream ID: 1 (easy handle 0x5adf80e1deb0)
* TLSv1.2 (OUT), TLS header, Supplemental data (23):
> HEAD /dist/ HTTP/2
> Host: nodejs.org
> user-agent: curl/7.81.0
> accept: */*
> accept-encoding: deflate, gzip, br, zstd
>
* TLSv1.2 (IN), TLS header, Supplemental data (23):
* TLSv1.3 (IN), TLS handshake, Newsession Ticket (4):
* TLSv1.3 (IN), TLS handshake, Newsession Ticket (4):
* old SSL session ID is stale, removing
* TLSv1.2 (IN), TLS header, Supplemental data (23):
* TLSv1.2 (OUT), TLS header, Supplemental data (23):
* TLSv1.2 (IN), TLS header, Supplemental data (23):
< HTTP/2 200
HTTP/2 200
< date: Mon, 11 Nov 2024 13:50:16 GMT
date: Mon, 11 Nov 2024 13:50:16 GMT
< content-type: text/html
content-type: text/html
< cache-control: public, max-age=3600, s-maxage=14400
cache-control: public, max-age=3600, s-maxage=14400
< last-modified: Mon, 11 Nov 2024 12:35:04 GMT
last-modified: Mon, 11 Nov 2024 12:35:04 GMT
< cf-cache-status: HIT
cf-cache-status: HIT
< age: 4489
age: 4489
< vary: Accept-Encoding
vary: Accept-Encoding
< strict-transport-security: max-age=31536000; includeSubDomains; preload
strict-transport-security: max-age=31536000; includeSubDomains; preload
< x-content-type-options: nosniff
x-content-type-options: nosniff
< server: cloudflare
server: cloudflare
< cf-ray: 8e0ec0162c281141-ORD
cf-ray: 8e0ec0162c281141-ORD
< content-encoding: br
content-encoding: br
<
* Connection #0 to host nodejs.org left intact
```
</details>
| needs followup,installing nvm | low | Critical |
2,649,436,442 | go | x/tools/gopls: specialize file watching with a setting rather than user agent | Thinking more, I am more convinced that gopls shouldn't use the `ClientName` in adjusting its functionality.
For example, we should've used an explicit setting instead of using the ClientName in https://github.com/golang/tools/blob/f1f7c26696be8f94198beafd05b36843515ac2ba/gopls/internal/cache/snapshot.go#L950
_Originally posted by @hyangah in https://github.com/golang/go/issues/70205#issuecomment-2463393543_
Extracting this to an issue: for some reason (expedience, most likely) I chose to specialize file watching patterns based on the VS Code user agent (because VS Code performs very poorly with certain watch patterns). A more explicit (and therefore better) solution would have been to make the VS Code Go client inject a setting that selects the most appropriate watch patterns.
| gopls,Tools | low | Minor |
2,649,437,171 | godot | Resizing editor window causes editor to render offset | ### Tested versions
v4.0.stable.official [92bee43ad] through v4.4.dev4.official [36e6207bb]
### System information
Godot v4.4.dev4 - Windows 10.0.19045 - Multi-window, 1 monitor - Vulkan (Forward+) - dedicated NVIDIA GeForce GTX 980 Ti (NVIDIA; 32.0.15.6603) - Intel(R) Core(TM) i7-6700K CPU @ 4.00GHz (8 threads)
### Issue description
When resizing the Godot editor window, the editor stops rendering in the correct location of the window:
https://github.com/user-attachments/assets/b93d2c34-2841-48d5-88b4-48110e728b00
This does not happen with the Project Manager, but happens with any project.
Since this happens as early as 4.0, I expect this has already been reported, but I can't find the existing issue...
Restarting the computer resolved the issue and I can no longer reproduce it, even though it was happening consistently until the restart.
### Steps to reproduce
1. Open Godot editor
2. Resize the window
### Minimal reproduction project (MRP)
N/A | bug,topic:rendering,topic:editor | low | Minor |
2,649,447,718 | react-native | Codegen not respecting ignored linked dependencies in react-native.config.js | ### Description
When a third party react-native dependency exists in an app's package.json but has been disabled from auto-linking in the `react-native.config.js` file, react-native-codegen still generates native Objective-C code for the unlinked dependency, leading to build failures.
### Steps to reproduce
Create a new RN project and add a third party library that contains native code (e.g. `react-native-screens`)
```
npx @react-native-community/cli@latest init VerifyCodegen --skip-install --version 0.76.1
cd VerifyCodegen
yarn install
yarn add react-native-screens
```
Create a `react-native.config.js` file and disable `react-native-screens` from auto-linking on iOS...
```
module.exports = {
dependencies: {
'react-native-screens': {
platforms: {
ios: null,
android: null,
},
},
},
};
```
Run pod install and try and build/run the iOS app
```
npx pod-install
yarn ios
```
Build will fail because the generated `RCTThirdPartyFabricComponentsProvider` files reference things that are linked in the project (e.g. `_RNSScreenCls`).
### React Native Version
0.76.1
### Affected Platforms
Runtime - iOS
### Areas
Codegen
### Output of `npx react-native info`
```text
info Fetching system and libraries information...
System:
OS: macOS 14.6.1
CPU: (10) arm64 Apple M1 Max
Memory: 190.97 MB / 32.00 GB
Shell:
version: "5.9"
path: /bin/zsh
Binaries:
Node:
version: 18.18.0
path: ~/.nvm/versions/node/v18.18.0/bin/node
Yarn:
version: 1.22.19
path: ~/.nvm/versions/node/v18.15.0/bin/yarn
npm:
version: 9.8.1
path: ~/.nvm/versions/node/v18.18.0/bin/npm
Watchman:
version: 2024.04.01.00
path: /opt/homebrew/bin/watchman
Managers:
CocoaPods:
version: 1.15.2
path: /Users/brent.kelly/.rvm/gems/ruby-2.7.5/bin/pod
SDKs:
iOS SDK:
Platforms:
- DriverKit 23.5
- iOS 17.5
- macOS 14.5
- tvOS 17.5
- visionOS 1.2
- watchOS 10.5
Android SDK: Not Found
IDEs:
Android Studio: 2024.2 AI-242.23339.11.2421.12550806
Xcode:
version: 15.4/15F31d
path: /usr/bin/xcodebuild
Languages:
Java:
version: 17.0.13
path: /usr/bin/javac
Ruby:
version: 2.7.5
path: /Users/brent.kelly/.rvm/rubies/ruby-2.7.5/bin/ruby
npmPackages:
"@react-native-community/cli":
installed: 15.0.0
wanted: 15.0.0
react:
installed: 18.3.1
wanted: 18.3.1
react-native:
installed: 0.76.1
wanted: 0.76.1
react-native-macos: Not Found
npmGlobalPackages:
"*react-native*": Not Found
Android:
hermesEnabled: true
newArchEnabled: true
iOS:
hermesEnabled: true
newArchEnabled: true
```
```
### Stacktrace or Logs
```text
Undefined symbols for architecture arm64:
"_RNSFullWindowOverlayCls", referenced from:
_RCTThirdPartyFabricComponentsProvider in libReact-RCTFabric.a[41](RCTThirdPartyFabricComponentsProvider.o)
"_RNSModalScreenCls", referenced from:
_RCTThirdPartyFabricComponentsProvider in libReact-RCTFabric.a[41](RCTThirdPartyFabricComponentsProvider.o)
"_RNSScreenCls", referenced from:
_RCTThirdPartyFabricComponentsProvider in libReact-RCTFabric.a[41](RCTThirdPartyFabricComponentsProvider.o)
"_RNSScreenContainerCls", referenced from:
_RCTThirdPartyFabricComponentsProvider in libReact-RCTFabric.a[41](RCTThirdPartyFabricComponentsProvider.o)
"_RNSScreenContentWrapperCls", referenced from:
_RCTThirdPartyFabricComponentsProvider in libReact-RCTFabric.a[41](RCTThirdPartyFabricComponentsProvider.o)
"_RNSScreenFooterCls", referenced from:
_RCTThirdPartyFabricComponentsProvider in libReact-RCTFabric.a[41](RCTThirdPartyFabricComponentsProvider.o)
"_RNSScreenNavigationContainerCls", referenced from:
_RCTThirdPartyFabricComponentsProvider in libReact-RCTFabric.a[41](RCTThirdPartyFabricComponentsProvider.o)
"_RNSScreenStackCls", referenced from:
_RCTThirdPartyFabricComponentsProvider in libReact-RCTFabric.a[41](RCTThirdPartyFabricComponentsProvider.o)
"_RNSScreenStackHeaderConfigCls", referenced from:
_RCTThirdPartyFabricComponentsProvider in libReact-RCTFabric.a[41](RCTThirdPartyFabricComponentsProvider.o)
"_RNSScreenStackHeaderSubviewCls", referenced from:
_RCTThirdPartyFabricComponentsProvider in libReact-RCTFabric.a[41](RCTThirdPartyFabricComponentsProvider.o)
"_RNSSearchBarCls", referenced from:
_RCTThirdPartyFabricComponentsProvider in libReact-RCTFabric.a[41](RCTThirdPartyFabricComponentsProvider.o)
ld: symbol(s) not found for architecture arm64
clang: error: linker command failed with exit code 1 (use -v to see invocation)
```
### Reproducer
https://github.com/mrbrentkelly/rn-codegen-linking-bug
### Screenshots and Videos
_No response_ | Resolution: PR Submitted,Type: New Architecture | low | Critical |
2,649,470,844 | TypeScript | Regression: Mapped type with recursive key remapping crashes tsc with "RangeError: Maximum call stack size exceeded" in TS@5.4+ | ### 🔎 Search Terms
"Mapped type", "Recursive mapped type", "Recursive type", "RangeError: maximum call stack size exceeded"
### 🕗 Version & Regression Information
- This changed between versions 5.3.3 and 5.4.5
### ⏯ Playground Link
https://www.typescriptlang.org/play/?ts=5.4.5#code/KYDwDg9gTgLgBDAnmYcBiAbAhjGwB2AKssADwDKEArlAMaqh74AmAznBAEYBWwtMAGjiEsUAObAYAPjgBeOAG8AUHDgBtANLBEcAJb44Aa20QAZnEo16cLOy07GBNnFYwo+sStVwA-Bep0wJraALpwjizsXLz8Xt6+cAAGACQK9gC+AHSpxohm6Ni4BMQoFAH0wYghQiLikjIAZC5uHumJcd4AXHD2Hd34wABuwFAhALQ+3bUSMADcSunzSkgo6LrAGMzkG3zw8sqqMFicGMDdru74YvOqpuub5y1X8+lKyyRwAILMzFDArOx9l5IK4AMIQZhnZqXa5eSGsWjuMAwXQQfCPGE3Gw-P4AjEeF5vFaoACqrBGckUXl0zH6VAAtpwRlj8Fh6VCLgSvFgcf9WN1vr8+YT3qtMDgmMBmGSKfJxUUiCRSDKoEI0Pctjt+FJ5gB6XXxQ0APR8QA
### 💻 Code
```ts
export type FlattenType<Source extends object, Target> = {
[Key in keyof Source as Key extends string
? Source[Key] extends object
? `${Key}.${keyof FlattenType<Source[Key], Target> & string}`
: Key
: never]-?: Target;
};
type FieldSelect = {
table: string;
field: string;
}
type Address = {
postCode: string;
description: string;
address: string;
}
type User = {
id: number;
name: string;
address: Address;
}
type FlattenedUser = FlattenType<User, FieldSelect>;
// ^?
```
### 🙁 Actual behavior
Compilation crashes with "RangeError: maximum call stack size exceeded"
### 🙂 Expected behavior
No crash with the type working correctly or explicit error on the recursive type like "Type instantiation is excessively deep and possibly infinite. ts(2589)"
### Additional information about the issue
Same playground in version 5.3.3 where the type resolves with no crash
https://www.typescriptlang.org/play/?ts=5.3.3#code/KYDwDg9gTgLgBDAnmYcBiAbAhjGwB2AKssADwDKEArlAMaqh74AmAznBAEYBWwtMAGjiEsUAObAYAPjgBeOAG8AUHDgBtANLBEcAJb44Aa20QAZnEo16cLOy07GBNnFYwo+sStVwA-Bep0wJraALpwjizsXLz8Xt6+cAAGACQK9gC+AHSpxohm6Ni4BMQoFAH0wYghQiLikjIAZC5uHumJcd4AXHD2Hd34wABuwFAhALQ+3bUSMADcSunzSkgo6LrAGMzkG3zw8sqqMFicGMDdru74YvOqpuub5y1X8+lKyyRwAILMzFDArOx9l5IK4AMIQZhnZqXa5eSGsWjuMAwXQQfCPGE3Gw-P4AjEeF5vFaoACqrBGckUXl0zH6VAAtpwRlj8Fh6VCLgSvFgcf9WN1vr8+YT3qtMDgmMBmGSKfJxUUiCRSDKoEI0Pctjt+FJ5gB6XXxQ0APR8QA
We ran into this crash when upgrading the ts version in our project.
There's a workaround based on the approach described in https://blog.swmansion.com/deep-flatten-typescript-type-c0d123028d82 which doesn't cause a crash on these versions.
The original type that caused the crash has mostly the same motivation as the blog post above. The provided snippet was simplified to a minimal reproducible example. | Bug,Help Wanted | low | Critical |
2,649,496,516 | bitcoin | RFC: Adopt C++ Safe Buffers? | C++ is unsafe, meaning that any code written in it may cause undefined behavior at runtime. One possible source of undefined behavior is out-of-range memory access.
While some limited compiler warnings exist to detect some obvious cases, tracking down out-of-range memory access is usually done at runtime with debugging tools such as Valgrind or Asan. However, such tools can normally not be used in production, because they are not hardening tools, see https://stackoverflow.com/a/70004411/2084795. Some C++ standard libraries provide options to enable a hardened build, which can also be used in production, see https://libcxx.llvm.org/Hardening.html.
However, this requires using the standard library containers or primitives to represent buffers. For example, instead of using a raw C-array, `std::array` should be preferred. Also, instead of using a raw C-pointer, `std::span` should be preferred.
My understanding is that only libc++ offers such a hardened build right now, so the benefit would be limited. Also, the required patch is large-ish. However, I think it would be good to keep this hardening feature in mind and use `std::array` and `std::span` for new code. Possibly in the future, those can be enforced. `std::array` via https://clang.llvm.org/extra/clang-tidy/checks/modernize/avoid-c-arrays.html and `std::span` (really all buffer representations) via `-Wunsafe-buffer-usage` https://clang.llvm.org/docs/SafeBuffers.html.
| Brainstorming | low | Critical |
2,649,560,920 | PowerToys | Option to disable quick access | ### Description of the new feature / enhancement
Having a switch (enabled by default) to allows the user to enable/disable the powertoys systemtray quick access menu.
There is nothing in that menu so being able to disable it would be nice.
Even having the option to change it behavior would also be welcome like single click opens powertoys settings.
### Scenario when this would be used?
People that do not use the feature having more choice.
### Supporting information
I do not use it and when I click it I just want it to open powertoys settings and not the quick access since there is nothing that menu anyway.
Having the option to disable or change the behavior gives the user more choice on how the access powertoys and/or it's features. | Needs-Triage | low | Minor |
2,649,603,341 | godot | `Align Transform with View` don't change position when in orthogonal mode | ### Tested versions
4.4.dev
### System information
Windows 11 - Vulkan (Forward+)
### Issue description
`Align Transform with View` only changes position when the view is in perspective mode. In orthogonal mode, it just changes rotation having the same effect as `Align Rotation with View`.
It should work in orthogonal mode too. As reference, Blender's align to view feature works in both perspective and orthogonal mode.
### Steps to reproduce
- Set the 3D view to orthogonal mode
- Create a camera (or any `Node3D`)
- Select the camera and go `3D view menu > Align Transform with View`. It just rotates.
### Minimal reproduction project (MRP)
N/A | bug,discussion,topic:editor,topic:3d | low | Minor |
2,649,707,796 | yt-dlp | [Elonet] `Unable to extract json` | ### DO NOT REMOVE OR SKIP THE ISSUE TEMPLATE
- [X] I understand that I will be **blocked** if I *intentionally* remove or skip any mandatory\* field
### Checklist
- [X] I'm reporting that yt-dlp is broken on a **supported** site
- [X] I've verified that I have **updated yt-dlp to nightly or master** ([update instructions](https://github.com/yt-dlp/yt-dlp#update-channels))
- [X] I've checked that all provided URLs are playable in a browser with the same IP and same login details
- [X] I've checked that all URLs and arguments with special characters are [properly quoted or escaped](https://github.com/yt-dlp/yt-dlp/wiki/FAQ#video-url-contains-an-ampersand--and-im-getting-some-strange-output-1-2839-or-v-is-not-recognized-as-an-internal-or-external-command)
- [X] I've searched [known issues](https://github.com/yt-dlp/yt-dlp/issues/3766) and the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues **including closed ones**. DO NOT post duplicates
- [X] I've read the [guidelines for opening an issue](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue)
- [ ] I've read about [sharing account credentials](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#are-you-willing-to-share-account-details-if-needed) and I'm willing to share it if required
### Region
_No response_
### Provide a description that is worded well enough to be understood
Example URL: https://elonet.finna.fi/Record/kavi.elonet_elokuva_107867
yt-dlp can't download from [elonet.finna.fi](https://elonet.finna.fi) anymore.
I'm still able to get [a downloadable m3u8 URL](https://d2ygzyc0kuitls.cloudfront.net/suitevodedge/_definst_/smil/256248057.smil/playlist.m3u8) from the [frame](https://players.icareus.com/elonet/embed/vod/256248057) source, so I'm not sure where the issue is exactly.
### Provide verbose output that clearly demonstrates the problem
- [X] Run **your** yt-dlp command with **-vU** flag added (`yt-dlp -vU <your command line>`)
- [ ] If using API, add `'verbose': True` to `YoutubeDL` params instead
- [X] Copy the WHOLE output (starting with `[debug] Command-line config`) and insert it below
### Complete Verbose Output
```shell
[debug] Command-line config: ['-vU', 'test:elonet']
[debug] Encodings: locale cp1252, fs utf-8, pref cp1252, out utf-8 (No VT), error utf-8 (No VT), screen utf-8 (No VT)
[debug] yt-dlp version nightly@2024.11.10.232816 from yt-dlp/yt-dlp-nightly-builds [b83ca24eb] (win_exe)
[debug] Python 3.10.11 (CPython AMD64 64bit) - Windows-8.1-6.3.9600-SP0 (OpenSSL 1.1.1t 7 Feb 2023)
[debug] exe versions: ffmpeg 4.3, ffprobe 2022-07-24-git-39a538f430-full_build-www.gyan.dev
[debug] Optional libraries: Cryptodome-3.21.0, brotli-1.1.0, certifi-2024.08.30, curl_cffi-0.5.10, mutagen-1.47.0, requests-2.32.3, sqlite3-3.40.1, urllib3-2.2.3, websockets-13.1
[debug] Proxy map: {}
[debug] Request Handlers: urllib, requests, websockets, curl_cffi
[debug] Loaded 1839 extractors
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp-nightly-builds/releases/latest
Latest version: nightly@2024.11.10.232816 from yt-dlp/yt-dlp-nightly-builds
yt-dlp is up to date (nightly@2024.11.10.232816 from yt-dlp/yt-dlp-nightly-builds)
[TestURL] Extracting URL: test:elonet
[TestURL] Test URL: https://elonet.finna.fi/Record/kavi.elonet_elokuva_107867
[Elonet] Extracting URL: https://elonet.finna.fi/Record/kavi.elonet_elokuva_107867
[Elonet] 107867: Downloading webpage
ERROR: [Elonet] 107867: Unable to extract json; please report this issue on https://github.com/yt-dlp/yt-dlp/issues?q= , filling out the appropriate issue template. Confirm you are on the latest version using yt-dlp -U
File "yt_dlp\extractor\common.py", line 742, in extract
File "yt_dlp\extractor\elonet.py", line 45, in _real_extract
File "yt_dlp\extractor\common.py", line 1382, in _html_search_regex
File "yt_dlp\extractor\common.py", line 1346, in _search_regex
# Using the m3u8 URL
[debug] Command-line config: ['-v', 'https://d2ygzyc0kuitls.cloudfront.net/suitevodedge/_definst_/smil/256248057.smil/playlist.m3u8']
[debug] Encodings: locale cp1252, fs utf-8, pref cp1252, out utf-8 (No VT), error utf-8 (No VT), screen utf-8 (No VT)
[debug] yt-dlp version nightly@2024.11.10.232816 from yt-dlp/yt-dlp-nightly-builds [b83ca24eb] (win_exe)
[debug] Python 3.10.11 (CPython AMD64 64bit) - Windows-8.1-6.3.9600-SP0 (OpenSSL 1.1.1t 7 Feb 2023)
[debug] exe versions: ffmpeg 4.3, ffprobe 2022-07-24-git-39a538f430-full_build-www.gyan.dev
[debug] Optional libraries: Cryptodome-3.21.0, brotli-1.1.0, certifi-2024.08.30, curl_cffi-0.5.10, mutagen-1.47.0, requests-2.32.3, sqlite3-3.40.1, urllib3-2.2.3, websockets-13.1
[debug] Proxy map: {}
[debug] Request Handlers: urllib, requests, websockets, curl_cffi
[debug] Loaded 1839 extractors
[generic] Extracting URL: https://d2ygzyc0kuitls.cloudfront.net/suitevodedge/_definst_/smil/256248057.smil/playlist.m3u8
[generic] playlist: Downloading webpage
[debug] Identified a direct video link
[generic] playlist: Downloading m3u8 information
[generic] playlist: Checking m3u8 live status
[debug] Formats sorted by: hasvid, ie_pref, lang, quality, res, fps, hdr:12(7), vcodec, channels, acodec, size, br, asr, proto, vext, aext, hasaud, source, id
[debug] Default format spec: bestvideo*+bestaudio/best
[info] playlist: Downloading 1 format(s): 4128
[debug] Invoking hlsnative downloader on "https://d2ygzyc0kuitls.cloudfront.net/suitevodedge/_definst_/smil/256248057.smil/chunklist_b4128000_cfc21pbC8yNTYyNDgwNTcudHRtbA==.m3u8"
[hlsnative] Downloading m3u8 manifest
[hlsnative] Total fragments: 671
[download] Destination: playlist [playlist].mp4
[debug] File locking is not supported. Proceeding without locking
[download] 100% of 2.48GiB in 00:13:11 at 3.21MiB/s
[debug] ffprobe command line: ffprobe -hide_banner -show_format -show_streams -print_format json "file:playlist [playlist].mp4"
[debug] ffmpeg command line: ffprobe -show_streams "file:playlist [playlist].mp4"
[FixupM3u8] Fixing MPEG-TS in MP4 container of "playlist [playlist].mp4"
[debug] ffmpeg command line: ffmpeg -y -loglevel repeat+info -i "file:playlist [playlist].mp4" -map 0 -dn -ignore_unknown -c copy -f mp4 -bsf:a aac_adtstoasc -movflags +faststart "file:playlist [playlist].temp.mp4"
```
| site-bug,triage | low | Critical |
2,649,731,739 | pytorch | There is probably a destructor ordering accident going on with registerPythonModule | Thanks to @ezyang for suggesting this.
Here's what I think it looks like:
- m.set_python_module initializes the pythonModulesSingleton
- during program exit, the pythonModulesSingleton gets destroyed first (statically initialized objects are destroyed in reverse order)
- m (the torch::Library)'s destructor runs and needs to access pythonModulesSingleton
- undefined behavior ensures.
The implication is that somehow we need to initialize the pythonModulesSingleton before the torch::Library object. The thing I'm not sure about is: how does torch::Library and the dispatcherSingleton interact? They have this same problem, unless we always initialize a dispatcherSingleton before the torch::Library object or this theory is wrong.
This needs a little more investigation
cc @jbschlosser @albanD | module: crash,module: cpp,triaged,module: python frontend | low | Minor |
2,649,761,010 | ui | [bug]: Calender UI is messed up after updating to react-day-picker | ### Describe the bug
Calender component has issue i updated react-day-picker then issue appeared. UI of calender is messed up terible UI.
Look on following image:

**I am using NextJs14 and problem version of react-day-picker is ^9.3.0**
Another thing i noticed that the dev of react-day-picker has changed something on it like currently i am getting error on

Thanks @shadcn
### Affected component/components
Calender Component
### How to reproduce
Just update react-day-picker to latest version that is 9.3.0
### Codesandbox/StackBlitz link
_No response_
### Logs
_No response_
### System Info
```bash
Windows 10
NextJs14
```
### Before submitting
- [X] I've made research efforts and searched the documentation
- [X] I've searched for existing issues | bug | medium | Critical |
2,649,805,443 | rust | Bad suggestion for macro_rules macros generated from a proc-macro | In a proc-macro that generates a macro_rules macro that triggers lints like `unsafe_op_in_unsafe_fn`, or `unsafe_attr_outside_unsafe`, the suggestion ends up suggesting that the `unsafe` should go around the attribute, which is invalid syntax.
```rust
// proc-macro `pm`
use proc_macro::TokenStream;
#[proc_macro_attribute]
pub fn pm(_attr: TokenStream, _item: TokenStream) -> TokenStream {
"macro_rules! foo {
() => {pub unsafe fn foo() { let _ = std::mem::zeroed::<i32>(); } };
}"
.parse()
.unwrap()
}
```
```rust
#![warn(unsafe_op_in_unsafe_fn)]
#[pm::pm]
struct S;
foo! {}
```
Gives a suggestion that ends up being:
```rust
{ unsafe #[pm::pm]}
```
which is invalid syntax.
Seen with the [`vtable`](https://crates.io/crates/vtable) crate.
This is a bit of curious case, as I would have expected the tokens of the macro_rules definition to be 2021 edition, and that the tokens it in turns generates also be 2021. Offhand I don't know how macro_rules determine which edition the tokens it emits should be (I'm guessing it is using the edition of the local crate, instead of the edition of the generated macro_rules definition).
### Meta
`rustc --version --verbose`:
```
rustc 1.84.0-nightly (143ce0920 2024-11-10)
binary: rustc
commit-hash: 143ce0920a2307b19831160a01f06f107610f1b2
commit-date: 2024-11-10
host: aarch64-unknown-linux-gnu
release: 1.84.0-nightly
LLVM version: 19.1.3
```
| A-lints,A-diagnostics,T-compiler,C-bug,A-suggestion-diagnostics,D-invalid-suggestion,D-edition,A-edition-2024,L-unsafe_op_in_unsafe_fn,I-edition-triaged | low | Major |
2,649,811,387 | electron | Native Wayland is completely BROKEN in Electron | ### Preflight Checklist
- [x] I have read the [Contributing Guidelines](https://github.com/electron/electron/blob/main/CONTRIBUTING.md) for this project.
- [x] I agree to follow the [Code of Conduct](https://github.com/electron/electron/blob/main/CODE_OF_CONDUCT.md) that this project adheres to.
- [x] I have searched the [issue tracker](https://www.github.com/electron/electron/issues) for a bug report that matches the one I want to file, without success.
### Electron Version
32.2.1
### What operating system(s) are you using?
Ubuntu
### Operating System Version
Ubuntu 24.04.1
### What arch are you using?
x64
### Last Known Working Electron version
_No response_
### Expected Behavior
The flags ``--enable-features=UseOzonePlatform --ozone-platform=wayland`` or ``--ozone-platform-hint=wayland --enable-features=WaylandWindowDecorations`` should open the Electron application in a native wayland window without any issues.
### Actual Behavior
The application does not open **at all**, it will only open in XWayland only. Forcing Native Wayland with any flags such as ``--enable-features=UseOzonePlatform --ozone-platform=wayland`` or ``--ozone-platform-hint=wayland --enable-features=WaylandWindowDecorations`` will prevent the application from opening.
### Testcase Gist URL
_No response_
### Additional Information
Native Wayland is simply not supported in Electron, this has been tested on GNOME Shell 46, if the results are different on other wayland compositor shells or operating systems, please list them below. To test it out, please use the ``--enable-features=UseOzonePlatform --ozone-platform=wayland`` or ``--ozone-platform-hint=wayland --enable-features=WaylandWindowDecorations`` flags.
This is also why other Electron applications such as VS Code have these issues:
https://github.com/microsoft/vscode/issues/207033
https://github.com/microsoft/vscode/issues/231955 | platform/linux,bug :beetle:,wayland,32-x-y | medium | Critical |
2,649,822,476 | kubernetes | not increment pInfo.Attempts when the scheduling fails with Error status | The current `pInfo.Attempts` is simple; when the scheduler `Pop()` the pod, it increments `pInfo.Attempts`.
And, also this `pInfo.Attempts` is used to calculate the backoff time.
There are basically two failures that the scheduling cycle could make; Unschedulable or Error.
- Unschedulable: the Pod is unschedulable; e.g., no node has enough resource, nodeaffinity, etc.
- Error: the Pod cannot be scheduled because of unexpected errors; e.g., kube-apiserver is unstable, network issue, etc.
Given [the backoff is a penalty of wasting the scheduling cycle](https://github.com/kubernetes/kubernetes/blob/8fe10dc378b7cc3b077b83aef86622e1019302d5/pkg/scheduler/framework/interface.go#L205-L207), we shouldn't increment `Attempts` when the Pod comes back to queue with Error status
so that the backoff time doesn't get longer by those errors.
As the actual example, what could happen now is:
1. The scheduler fails to schedule all the pods because the cluster is under the networking issue.
2. `pInfo.Attempts` in all the Pods gets bigger and bigger meanwhile.
3. The network issue is resolved and some Pods go scheduled.
4. But, then if some Pods fail, they have a large `pInfo.Attempts`, and hence gets super long backoff time, which is unfair because the most of their `Attempts` are failed not by the scheduling reason, but by the network issue.
/kind feature
/assign
/sig scheduling | sig/scheduling,kind/feature,needs-triage | low | Critical |
2,649,937,324 | pytorch | TorchScript model doesn't work with autocast | ### 🐛 Describe the bug
Hi I want to use autocast with a script model and had the following error.
```python
import torch
import torch.nn as nn
from torch.amp import autocast
class SimpleCNN(nn.Module):
def __init__(self):
super(SimpleCNN, self).__init__()
self.conv1 = nn.Conv2d(in_channels=3, out_channels=6, kernel_size=5)
def forward(self, x):
with autocast(device_type="cuda", enabled=True):
return self.conv1(x)
device = torch.device('cuda')
# Create an instance of the network
net = SimpleCNN()
net.to(device)
# Create a sample input tensor
input_tensor = torch.randn(1, 3, 28, 28)
input_tensor = input_tensor.to(device)
# Pass the input through the network
output = net(input_tensor)
print(output.shape)
# Pass the input through the script network
script_net = torch.jit.script(net)
output2 = script_net(input_tensor)
print(output2.shape)
```
```
torch.Size([1, 6, 24, 24])
Traceback (most recent call last):
File "D:\workspace_tf\Einstein_reg_8\src\neosoft\misc\SimpleCNN.py", line 39, in <module>
output2 = script_net(input_tensor)
File "C:\Python39\lib\site-packages\torch\nn\modules\module.py", line 1736, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\Python39\lib\site-packages\torch\nn\modules\module.py", line 1747, in _call_impl
return forward_call(*args, **kwargs)
RuntimeError: The following operation failed in the TorchScript interpreter.
Traceback of TorchScript (most recent call last):
File "D:\workspace_tf\Einstein_reg_8\src\neosoft\misc\SimpleCNN.py", line 21, in forward
# x = torch.relu(self.fc1(x))
# return x
return self.conv1(x)
~~~~~~~~~~ <--- HERE
File "C:\Python39\lib\site-packages\torch\nn\modules\conv.py", line 554, in forward
def forward(self, input: Tensor) -> Tensor:
return self._conv_forward(input, self.weight, self.bias)
~~~~~~~~~~~~~~~~~~ <--- HERE
File "C:\Python39\lib\site-packages\torch\nn\modules\conv.py", line 549, in _conv_forward
self.groups,
)
return F.conv2d(
~~~~~~~~ <--- HERE
input, weight, bias, self.stride, self.padding, self.dilation, self.groups
)
RuntimeError: Input type (struct c10::Half) and bias type (float) should be the same
```
### Versions
Collecting environment information...
PyTorch version: 2.5.1+cu118
Is debug build: False
CUDA used to build PyTorch: 11.8
ROCM used to build PyTorch: N/A
OS: Microsoft Windows 11 Pro (10.0.22631 64-bit)
GCC version: Could not collect
Clang version: Could not collect
CMake version: Could not collect
Libc version: N/A
Python version: 3.9.7 (tags/v3.9.7:1016ef3, Aug 30 2021, 20:19:38) [MSC v.1929 64 bit (AMD64)] (64-bit runtime)
Python platform: Windows-10-10.0.22631-SP0
Is CUDA available: True
CUDA runtime version: 11.8.89
CUDA_MODULE_LOADING set to: LAZY
GPU models and configuration:
GPU 0: NVIDIA RTX A6000
GPU 1: NVIDIA RTX A6000
Nvidia driver version: 528.02
cuDNN version: C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.8\bin\cudnn_ops_train64_8.dll
HIP runtime version: N/A
MIOpen runtime version: N/A
Is XNNPACK available: True
CPU:
Name: Intel(R) Core(TM) i9-10900F CPU @ 2.80GHz
Manufacturer: GenuineIntel
Family: 207
Architecture: 9
ProcessorType: 3
DeviceID: CPU0
CurrentClockSpeed: 2808
MaxClockSpeed: 2808
L2CacheSize: 2560
L2CacheSpeed: None
Revision: None
Versions of relevant libraries:
[pip3] numpy==1.22.4
[pip3] onnx==1.14.0
[pip3] onnxruntime-gpu==1.16.3
[pip3] pytorch-lightning==2.2.4
[pip3] tf2onnx==1.15.1
[pip3] torch==2.5.1+cu118
[pip3] torch-dct==0.1.6
[pip3] torch-tb-profiler==0.4.3
[pip3] torchaudio==2.5.1+cu118
[pip3] torchdiffeq==0.2.3
[pip3] torchmetrics==1.4.0
[pip3] torchvision==0.20.1+cu118
[conda] Could not collect
cc @EikanWang @jgong5 @wenzhe-nrv @sanchitintel | oncall: jit | low | Critical |
2,649,969,152 | terminal | Ctrl+clicking URLs partially out of viewport not working as expected | ### Windows Terminal version
1.22.2912.0
### Windows build number
10.0.22635.4445
### Other Software
_No response_
### Steps to reproduce
1. Open Windows Terminal
2. Resize Terminal window to a small size
3. Press 'Enter' key until the active command prompt (with blinking cursor) reaches the last visible row in the window:

4. Run a command with a long URL, like: `echo 'https://github.com/microsoft/terminal/blob/main/src/cascadia/TerminalCore/lib/terminalcore-lib.vcxproj'`
5. Ensure the URL wraps to span 2 or more lines. If not, resize the window to reduce the width.

6. Scroll up one line so that only the first line of the wrapped URL is visible: 
7. Ctrl+click the visible part of the URL
### Expected Behavior
It should navigate to the full URL (https://github.com/microsoft/terminal/blob/main/src/cascadia/TerminalCore/lib/terminalcore-lib.vcxproj)
### Actual Behavior
It navigates to the part of the URL that's visible (https://github.com/microsoft/terminal/bl), which is often invalid | Area-Interaction,Issue-Bug,Product-Terminal | low | Minor |
2,649,982,759 | bitcoin | Tracepoint Interface Tracking Issue | With https://github.com/bitcoin/bitcoin/pull/26593 merged, a few **ideas** for the Bitcoin Core tracepoint interface that _could_ be next steps. I posted these as a comment in https://github.com/bitcoin/bitcoin/pull/26593#issuecomment-2468000391 but surfacing them here for more visibility.
These ideas are all up for discussion and aren't TODOs. The numbers don't indicate order or priority but make it easier reference them.
## Dependencies
1. - [ ] We _could_ internalize the relevant macro parts of systemtap's `sys/sdt.h` for the Linux tracepoints. This would allow us to drop the external dependency on systemtap, as we don't use 99% of it. Some prior commentary on this can be found here: https://github.com/hebasto/bitcoin/pull/162#issuecomment-2074645621
## MacOS & FreeBSD support
2. - [ ] In the past I've managed to use a simple macro build a bitcoind with tracepoints on macOS. While our ebpf based demo scripts aren't compatible, @kouloumos DTrace scripts from https://github.com/bitcoin/bitcoin/pull/25541 are. This could look similar to https://github.com/0xB10C/bitcoin/blob/13b0ce221600fc7040502c834c51433ca96f91c3/src/util/trace.h#L35-L63. However, I currently don't have access to a macOS system to further work on this - I'm looking to rent one.
3. - [ ] The same could possible on FreeBSD with e.g. these macros https://github.com/0xB10C/bitcoin/blob/13b0ce221600fc7040502c834c51433ca96f91c3/src/util/trace.h#L104-L119. I haven't tested this on FreeBSD yet. In https://github.com/bitcoin/bitcoin/pull/27458#pullrequestreview-1387810492, vasild mentiones he'd interested in FreeBSD tracepoints. My understanding is that the same macOS DTrace scripts from 25541 would work there too.
4. - [ ] At least for macOS, we'd need an per-tracepoint interface definition similar to https://github.com/0xB10C/bitcoin/blob/13b0ce221600fc7040502c834c51433ca96f91c3/src/util/trace.h#L121-L236. With some more commentary, these _could_ replace the list of tracepoints in https://github.com/bitcoin/bitcoin/blob/master/doc/tracing.md#tracepoint-documentation. This would solve something similar to https://github.com/bitcoin/bitcoin/pull/29877#issuecomment-2061028339.
## Interface stabillity improvements
5. - [ ] Even if we don't do 4. (because we e.g. don't want to do 2.), casting the tracepoint arguments to the type we expect to pass would be worthwhile to avoid problems like https://github.com/bitcoin/bitcoin/pull/29877. For some of our traceponts, we already do this: e.g. https://github.com/bitcoin/bitcoin/blob/900b17239fb25750fd30b4af6e6c38096374b71f/src/validation.cpp#L2902-L2907
## Example scripts
6. - [ ] We *could* drop the example scripts from `/contrib/tracing/*` and maintain them, along with proper tests in a CI, Bitcoin Core version compatibility information, possibly libbpf-based C or Rust tools (https://github.com/bitcoin/bitcoin/issues/30298), ... in an external repository like, for example, `0xb10c/tracing-scripts`, `bitcoin-core/tracing-scripts`, or `bitcoin-monitoring/tracing-scripts` (what ever would works best).
## Maintain tracepoint interface, drop tracepoint implemenation
7. - [ ] If we at some point decide that maintaining the tracepoints in Bitcoin Core adds too much maintenance burden compared to the actual usage they're getting, we could drop the tracepoints but keep the tracepoint interface. We now have a unit test that includes a few nop tracepoints to check that the interface will still compile (https://github.com/0xB10C/bitcoin/blob/0de3e96e333090548a43e5e870c4cb8941d6baf1/src/test/util_trace_tests.cpp). This would allow us to drop the bcc python dependency in the CI and to remove the `interface_usdt_*` functional tests (which need to run in VM and can't run in a container). Tracepoint users could maintain a patch on Bitcoin Core adding the tracepoints (and tests) they want back in. We'd however loose the tracepoints in release (or actually all) builds which currently allow e.g. everyone experiencing problems with their node to hook into them and extract data without needing to restart it. | Resource usage,interfaces | low | Major |
2,649,994,656 | rust | Compiler fails to see an existing trait implementation for an indirectly referred type | I get compilation errors for a `typenum`-based code which can be minimized to this snippet ([playground](https://play.rust-lang.org/?version=stable&mode=debug&edition=2021&gist=2bf0adba26b2b4ef8f5a9412684edfbf)):
```rust
use core::ops::Add;
use typenum::{Sum, Unsigned, U8};
pub type TagSize = U8;
pub type TagSizePlus1 = typenum::Add1<TagSize>; // does not work
// pub type TagSizePlus1 = typenum::U9; // works
pub fn foo<N>()
where
N: Add<TagSizePlus1>,
Sum<N, TagSizePlus1>: Unsigned,
{
// code
}
```
`Add1<A>` is an [alias](https://docs.rs/typenum/latest/typenum/operator_aliases/type.Add1.html) for `<A as Add<B1>>::Output;` and `Sum<A, B>` is an [alias](https://docs.rs/typenum/latest/typenum/operator_aliases/type.Sum.html) for `<A as Add<B>>::Output`;
The snippet results in the following compilation error:
```text
error[E0277]: cannot add `UInt<UInt<UInt<UInt<UTerm, B1>, B0>, B0>, B1>` to `N`
--> src/lib.rs:10:1
|
10 | / pub fn foo<N>()
11 | | where
12 | | N: Add<TagSizePlus1>,
13 | | Sum<N, TagSizePlus1>: Unsigned,
| |___________________________________^ no implementation for `N + UInt<UInt<UInt<UInt<UTerm, B1>, B0>, B0>, B1>`
```
But replacing `Sub1<SomeSize>` with `U9` (an [alias](https://docs.rs/typenum/latest/typenum/consts/type.U9.html) for `UInt<UInt<UInt<UInt<UTerm, B1>, B0>, B0>, B1>`) works just fine!
It looks like the compiler successfully resolves `Add1<SomeSize>` to `UInt<UInt<UInt<UInt<UTerm, B1>, B0>, B0>, B1>`, but then for some reason gets tripped while resolving the bounds and is unable to see that the bound on `N` which allows the addition.
Potentially relevant issue: https://github.com/rust-lang/rust/issues/79629 | A-trait-system,A-associated-items,C-bug,T-types,fixed-by-next-solver | low | Critical |
2,650,022,661 | pytorch | torch.norm produces wrong results | ### 🐛 Describe the bug
```python
import torch
import numpy as np
TV = torch.load("/home/dsq/NIPS/opt/siren/experiment_scripts/TV_e.pth").cpu()
print(TV.shape)
print(torch.linalg.norm(TV).item())
print(torch.linalg.norm(torch.linalg.norm(TV, dim=-1, keepdim=True)).item())
print(np.linalg.norm(TV.cpu().detach().numpy()).item())
print(torch.linalg.norm(torch.sqrt(TV[:, 0] ** 2 + TV[:, 1] ** 2)).item())
print(torch.sqrt((torch.sqrt(TV[:, 0] ** 2 + TV[:, 1] ** 2) ** 2).sum()).item())
```
2.5.1+cu12.4, numpy 1.26.4 results:
torch.Size([38000, 2])
141.7647705078125
141.76475524902344
141.7647705078125
141.76475524902344
141.76475524902344
2.4.1+cu12.4, numpy 1.24.4 results:
torch.Size([38000, 2])
141.7647705078125
141.76475524902344
141.76475524902344
141.76475524902344
141.76475524902344
Expected results:
141.76475524902344
### Versions
2.5.1+cu12.4, 2.4.1+cu12.4, and 1.13.1+cu11.7
cc @jianyuh @nikitaved @pearu @mruberry @walterddr @xwang233 @Lezcano | needs reproduction,triaged,module: linear algebra | low | Critical |
2,650,040,776 | vscode | Terminal: Run Recent Command shows up when there are no terminals | Repro:
1. Close all terminals
2. Run the command, 🐛 nothing happens

We should either hide it or implement it when there isn't a terminal. | bug,help wanted,terminal-shell-integration | low | Minor |
2,650,045,072 | godot | float `rotate_toward()` behaves incorrectly with negative delta | ### Tested versions
4.3
### System information
Any
### Issue description
`rotate_toward()` is implemented by https://github.com/godotengine/godot/pull/80225, but set the incorrect target when delta is negative.
`move_toward()` when delta is negative, does not reach the original target and goes infinitely negative. It is correct.
`rotate_toward()` can still reach the original target even if delta is negative, but it will erroneously reach the inversed target.
It makes complete no sense to reach the inverted target. If you wanted to do that, you could simply invert the to instead of delta when passing the argument in the first place. Also, if using negative delta result is reaching the inverted target, it means that you cannot use negative delta for rotating turrets with fixed rotation direction and the like.
```gdscript
extends Node2D
func _ready() -> void:
var scl_pos = 0
var scl_neg = 0
var scr_to = 10
var rad_pos = 0
var rad_neg = 0
var rad_to = PI * 0.5
print("scl src: " + str(0))
print("scl target: " + str(scr_to))
print("rad src: " + str(0))
print("rad target: " + str(rad_to))
rad_pos = rotate_toward(rad_pos, rad_to, PI * 0.1)
rad_neg = rotate_toward(rad_neg, rad_to, PI * -0.1)
scl_pos = move_toward(scl_pos, scr_to, 1)
scl_neg = move_toward(scl_neg, scr_to, -1)
print(("scl pos by %s x1: " % str(1)) + str(scl_pos))
print(("scl neg by %s x1: " % str(-1)) + str(scl_neg))
print(("rad pos by %s x1: " % str(PI * 0.1)) + str(rad_pos))
print(("rad neg by %s x1: " % str(PI * -0.1)) + str(rad_neg))
for i in range(99):
rad_pos = rotate_toward(rad_pos, rad_to, PI * 0.1)
rad_neg = rotate_toward(rad_neg, rad_to, PI * -0.1)
scl_pos = move_toward(scl_pos, scr_to, 1)
scl_neg = move_toward(scl_neg, scr_to, -1)
print(("scl pos by %s x100: " % str(1)) + str(scl_pos))
print(("scl neg by %s x100: " % str(-1)) + str(scl_neg))
print(("rad pos by %s x100: " % str(PI * 0.1)) + str(rad_pos))
print(("rad neg by %s x100: " % str(PI * -0.1)) + str(rad_neg))
```
Output:
```
scl src: 0
scl target: 10
rad src: 0
rad target: 1.5707963267949
scl pos by 1 x1: 1
scl neg by -1 x1: -1
rad pos by 0.31415926535898 x1: 0.31415926535898
rad neg by -0.31415926535898 x1: -0.31415926535898
scl pos by 1 x100: 10
scl neg by -1 x100: -100
rad pos by 0.31415926535898 x100: 1.5707963267949
rad neg by -0.31415926535898 x100: -1.5707963267949
```
`rad neg by -0.31415926535898 x1` is `-0.31415926535898`, not `0.31415926535898` it is correct.
But `rad neg by -0.31415926535898 x100` must be `1.5707963267949`, not `-1.5707963267949`. cc @ettiSurreal
### Steps to reproduce
Use `rotate_toward()` with negative delta to reach the original target.
### Minimal reproduction project (MRP)
[wrong_rotate_toward_calc.zip](https://github.com/user-attachments/files/17707012/wrong_rotate_toward_calc.zip)
| bug,topic:core | low | Major |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.