organization string | repo_name string | base_commit string | iss_html_url string | iss_label string | title string | body string | code null | pr_html_url string | commit_html_url string | file_loc string | own_code_loc list | ass_file_loc list | other_rep_loc list | analysis dict | loctype dict | iss_has_pr int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
huggingface | transformers | 88ac60f7b5f6d4b62245dc21653ea3d5db7d4935 | https://github.com/huggingface/transformers/issues/11357 | possible mistake in documentation | Looking at description of the parameter "decoder_input_ids" in "forward" method of BartForConditionalGeneration/T5ForConditionalGeneration, I see following:
BartForConditionalGeneration:
decoder_input_ids - ... For translation and summarization training, decoder_input_ids should be provided. If no decoder_input_ids is provided, the model will create this tensor by shifting the !!INPUT_IDS!! to the right for denoising pretraining following the paper.
T5ForConditionalGeneration:
decoder_input_ids - ... To know more on how to prepare decoder_input_ids for pretraining take a look at T5 Training. If decoder_input_ids and decoder_inputs_embeds are both unset, decoder_input_ids takes the value of !!INPUT_IDS!!.
Looks like there should be LABELS instead of INPUT_IDS.
Thanks,
@patrickvonplaten, @patil-suraj
| null | https://github.com/huggingface/transformers/pull/11466 | null | {'base_commit': '88ac60f7b5f6d4b62245dc21653ea3d5db7d4935', 'files': [{'path': 'src/transformers/models/bart/modeling_bart.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [585]}}}, {'path': 'src/transformers/models/bart/modeling_tf_bart.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [562]}}}, {'path': 'src/transformers/models/blenderbot/modeling_blenderbot.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [549]}}}, {'path': 'src/transformers/models/blenderbot/modeling_tf_blenderbot.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [563]}}}, {'path': 'src/transformers/models/blenderbot_small/modeling_blenderbot_small.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [550]}}}, {'path': 'src/transformers/models/blenderbot_small/modeling_tf_blenderbot_small.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [568]}}}, {'path': 'src/transformers/models/fsmt/modeling_fsmt.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [243]}}}, {'path': 'src/transformers/models/m2m_100/modeling_m2m_100.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [598]}}}, {'path': 'src/transformers/models/marian/modeling_marian.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [562]}}}, {'path': 'src/transformers/models/marian/modeling_tf_marian.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [597]}}}, {'path': 'src/transformers/models/mbart/modeling_mbart.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [585]}}}, {'path': 'src/transformers/models/mbart/modeling_tf_mbart.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [536]}}}, {'path': 'src/transformers/models/pegasus/modeling_pegasus.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [561]}}}, {'path': 'src/transformers/models/pegasus/modeling_tf_pegasus.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [597]}}}, {'path': 'src/transformers/models/prophetnet/modeling_prophetnet.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [98]}}}, {'path': 'src/transformers/models/speech_to_text/modeling_speech_to_text.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [619]}}}, {'path': 'src/transformers/models/t5/modeling_t5.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [1066]}}}]} | [] | [] | [] | {
"iss_type": "4",
"iss_reason": "1",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"src/transformers/models/blenderbot/modeling_tf_blenderbot.py",
"src/transformers/models/marian/modeling_tf_marian.py",
"src/transformers/models/blenderbot/modeling_blenderbot.py",
"src/transformers/models/blenderbot_small/modeling_blenderbot_small.py",
"src/transformers/models/marian/modeling_marian.py",
"src/transformers/models/pegasus/modeling_tf_pegasus.py",
"src/transformers/models/bart/modeling_bart.py",
"src/transformers/models/mbart/modeling_tf_mbart.py",
"src/transformers/models/speech_to_text/modeling_speech_to_text.py",
"src/transformers/models/m2m_100/modeling_m2m_100.py",
"src/transformers/models/blenderbot_small/modeling_tf_blenderbot_small.py",
"src/transformers/models/t5/modeling_t5.py",
"src/transformers/models/prophetnet/modeling_prophetnet.py",
"src/transformers/models/mbart/modeling_mbart.py",
"src/transformers/models/bart/modeling_tf_bart.py",
"src/transformers/models/pegasus/modeling_pegasus.py",
"src/transformers/models/fsmt/modeling_fsmt.py"
],
"doc": [],
"test": [],
"config": [],
"asset": []
} | 1 | |
huggingface | transformers | 8bbb53e20b7873ba7f63be70d4d798e0c3568bfa | https://github.com/huggingface/transformers/issues/13826 | Tokenizer - Raises wrong "UserWarning: `max_length` is ignored when `padding`=`True`" | In the newest version of transformers (4.11.2 & 4.12.0.dev0) I get the following warning:
```
C:\Anaconda3\envs\sbert\lib\site-packages\transformers\tokenization_utils_base.py:2227: UserWarning: `max_length` is ignored when `padding`=`True`.
warnings.warn("`max_length` is ignored when `padding`=`True`.")
```
Code to re-produce:
```python
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased')
texts = ["Short sentence", "A really really really really really long sentence to test max length"]
output = tokenizer(texts, padding=True, truncation=True, max_length=5, return_tensors='pt')
print(output['input_ids'].shape)
output = tokenizer(texts, padding=True, truncation=True, return_tensors='pt')
print(output['input_ids'].shape)
```
Output:
```
C:\Anaconda3\envs\sbert\lib\site-packages\transformers\tokenization_utils_base.py:2227: UserWarning: `max_length` is ignored when `padding`=`True`.
warnings.warn("`max_length` is ignored when `padding`=`True`.")
torch.Size([2, 5])
torch.Size([2, 14])
````
As we see, max_length is not ignored when padding = True. It truncates the text as expected to a max_length of 5.
I would say that the warning is incorrect and should not be raised.
Should I fix it?
Or is it really intended that max_length is ignored when padding=True? This would be horrible, I want to truncate my text to a certain max_length. | null | https://github.com/huggingface/transformers/pull/13829 | null | {'base_commit': '8bbb53e20b7873ba7f63be70d4d798e0c3568bfa', 'files': [{'path': 'src/transformers/tokenization_utils_base.py', 'status': 'modified', 'Loc': {"('PreTrainedTokenizerBase', '_get_padding_truncation_strategies', 2183)": {'mod': [2226, 2227]}}}]} | [] | [] | [] | {
"iss_type": "2",
"iss_reason": "1",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"src/transformers/tokenization_utils_base.py"
],
"doc": [],
"test": [],
"config": [],
"asset": []
} | 1 | |
huggingface | transformers | 010e0460b22ddd7f74e31163f69ab3da2e9741ba | https://github.com/huggingface/transformers/issues/3227 | Core: Pipeline
Version mismatch | An Error report about pipeline | # 🐛 Bug
## Information
This may be an easy question, but it has been bothering me all day.
When I run the code:
nlp = pipeline("question-answering")
It always tells me:
Couldn't reach server at 'https://s3.amazonaws.com/models.huggingface.co/bert/distilbert-base-cased-distilled-squad-modelcard.json' to download model card file.
Creating an empty model card.
If I ignore it and continue to run the rest of the code:
nlp({
'question': 'What is the name of the repository ?',
'context': 'Pipeline have been included in the huggingface/transformers repository'
})
The error will appear:
KeyError: 'token_type_ids' | null | https://github.com/huggingface/transformers/pull/3439 | null | {'base_commit': '010e0460b22ddd7f74e31163f69ab3da2e9741ba', 'files': [{'path': 'examples/utils_multiple_choice.py', 'status': 'modified', 'Loc': {"(None, 'convert_examples_to_features', 294)": {'mod': [323]}}}, {'path': 'src/transformers/data/processors/squad.py', 'status': 'modified', 'Loc': {"(None, 'squad_convert_example_to_features', 86)": {'add': [141]}}}]} | [] | [] | [] | {
"iss_type": "1",
"iss_reason": "1",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"src/transformers/data/processors/squad.py",
"examples/utils_multiple_choice.py"
],
"doc": [],
"test": [],
"config": [],
"asset": []
} | 1 |
huggingface | transformers | ba1b3db70907b975b5ca52b9957c5ed7a186a0fa | https://github.com/huggingface/transformers/issues/12762 | t5 fast tokenizer save_vocabulary fails without sentencepiece file | ## Environment info
- `transformers` version: 4.9.0.dev0
- Platform: Linux-5.4.0-1043-gcp-x86_64-with-glibc2.29
- Python version: 3.8.10
- PyTorch version (GPU?): 1.9.0+cu102 (False)
- Tensorflow version (GPU?): 2.5.0 (False)
- Flax version (CPU?/GPU?/TPU?): 0.3.4 (tpu)
- Jax version: 0.2.16
- JaxLib version: 0.1.68
- Using GPU in script?: no (tpu)
- Using distributed or parallel set-up in script?: I guess data parallel
### Who can help
Models:
- t5: @patrickvonplaten
Library:
- tokenizers: @LysandreJik
## Information
Model I am using (Bert, XLNet ...):
The problem arises when using:
* [x] the official example scripts: (give details below)
* [ ] my own modified scripts: (give details below)
The tasks I am working on is:
* [x] an official GLUE/SQUaD task: (give the name)
* [] my own task or dataset: (give details below)
Task is summarization
## To reproduce
Steps to reproduce the behavior:
1. Use the [summarization example code](https://github.com/huggingface/transformers/blob/3cd15c1dd62c5c9a9202fae9f00b8eba3eb2b95d/examples/pytorch/summarization/run_summarization.py) and fine tune a pre-trained t5 tokenizer and model created according to the flax mlm example scripts and [t5 tokenizer](https://github.com/huggingface/transformers/blob/master/examples/flax/language-modeling/t5_tokenizer_model.py) -- for instance [t5-base-norwegian](https://huggingface.co/patrickvonplaten/t5-base-norwegian/tree/main)
When the finetuning-summary-trainer saves the model, it will also attempt to save the vocabulary. This will fail with the following stack trace, because the tokenizers `self.vocab_file` is None, where it is expected to point at a sentencepiece file:
```
Traceback (most recent call last):
File "/home/yeb/Developer/yhavinga/t5-base-dutch-summarization/run_summarization.py", line 620, in <module>
main()
File "/home/yeb/Developer/yhavinga/t5-base-dutch-summarization/run_summarization.py", line 545, in main
trainer.save_model() # Saves the tokenizer too for easy upload
File "/home/yeb/Developer/yhavinga/t5-base-dutch-summarization/transformers/src/transformers/trainer.py", line 1883, in save_model
self._save(output_dir)
File "/home/yeb/Developer/yhavinga/t5-base-dutch-summarization/transformers/src/transformers/trainer.py", line 1933, in _save
self.tokenizer.save_pretrained(output_dir)
File "/home/yeb/Developer/yhavinga/t5-base-dutch-summarization/transformers/src/transformers/tokenization_utils_base.py", line 1958, in save_pretrained
save_files = self._save_pretrained(
File "/home/yeb/Developer/yhavinga/t5-base-dutch-summarization/transformers/src/transformers/tokenization_utils_fast.py", line 567, in _save_pretrained
vocab_files = self.save_vocabulary(save_directory, filename_prefix=filename_prefix)
File "/home/yeb/Developer/yhavinga/t5-base-dutch-summarization/transformers/src/transformers/models/t5/tokenization_t5_fast.py", line 150, in save_vocabulary
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
File "/usr/lib/python3.8/posixpath.py", line 374, in abspath
path = os.fspath(path)
TypeError: expected str, bytes or os.PathLike object, not NoneType
Process finished with exit code 1
```
The following hack works around the problem:
```
diff --git a/src/transformers/models/t5/tokenization_t5_fast.py b/src/transformers/models/t5/tokenization_t5_fast.py
index 3f972b006..cc238a119 100644
--- a/src/transformers/models/t5/tokenization_t5_fast.py
+++ b/src/transformers/models/t5/tokenization_t5_fast.py
@@ -147,9 +147,10 @@ class T5TokenizerFast(PreTrainedTokenizerFast):
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
)
- if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
- copyfile(self.vocab_file, out_vocab_file)
- logger.info(f"Copy vocab file to {out_vocab_file}")
+ if self.vocab_file:
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
+ copyfile(self.vocab_file, out_vocab_file)
+ logger.info(f"Copy vocab file to {out_vocab_file}")
return (out_vocab_file,)
```
## Expected behavior
No error.
| null | https://github.com/huggingface/transformers/pull/12806 | null | {'base_commit': 'ba1b3db70907b975b5ca52b9957c5ed7a186a0fa', 'files': [{'path': 'src/transformers/models/albert/tokenization_albert_fast.py', 'status': 'modified', 'Loc': {"('AlbertTokenizerFast', '__init__', 122)": {'add': [160]}, "('AlbertTokenizerFast', None, 73)": {'add': [218]}}}, {'path': 'src/transformers/models/barthez/tokenization_barthez_fast.py', 'status': 'modified', 'Loc': {"('BarthezTokenizerFast', '__init__', 110)": {'add': [139]}, "('BarthezTokenizerFast', None, 59)": {'add': [189]}}}, {'path': 'src/transformers/models/big_bird/tokenization_big_bird_fast.py', 'status': 'modified', 'Loc': {"('BigBirdTokenizerFast', '__init__', 104)": {'add': [140]}, "('BigBirdTokenizerFast', None, 59)": {'add': [229]}}}, {'path': 'src/transformers/models/camembert/tokenization_camembert_fast.py', 'status': 'modified', 'Loc': {"('CamembertTokenizerFast', '__init__', 106)": {'add': [137]}, "('CamembertTokenizerFast', None, 54)": {'add': [188]}}}, {'path': 'src/transformers/models/herbert/tokenization_herbert_fast.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [25, 26, 27, 28]}}}, {'path': 'src/transformers/models/mbart50/tokenization_mbart50_fast.py', 'status': 'modified', 'Loc': {"('MBart50TokenizerFast', '__init__', 111)": {'add': [147]}, "('MBart50TokenizerFast', None, 57)": {'add': [260]}}}, {'path': 'src/transformers/models/pegasus/tokenization_pegasus_fast.py', 'status': 'modified', 'Loc': {"('PegasusTokenizerFast', '__init__', 99)": {'add': [150]}, "('PegasusTokenizerFast', None, 52)": {'add': [194]}}}, {'path': 'src/transformers/models/reformer/tokenization_reformer_fast.py', 'status': 'modified', 'Loc': {"('ReformerTokenizerFast', '__init__', 88)": {'add': [106]}, "('ReformerTokenizerFast', None, 54)": {'add': [108]}}}, {'path': 'src/transformers/models/t5/tokenization_t5_fast.py', 'status': 'modified', 'Loc': {"('T5TokenizerFast', '__init__', 105)": {'add': [139]}, "('T5TokenizerFast', None, 63)": {'add': [142]}}}, {'path': 'src/transformers/models/xlm_roberta/tokenization_xlm_roberta_fast.py', 'status': 'modified', 'Loc': {"('XLMRobertaTokenizerFast', '__init__', 118)": {'add': [147]}, "('XLMRobertaTokenizerFast', None, 67)": {'add': [200]}}}, {'path': 'src/transformers/models/xlnet/tokenization_xlnet_fast.py', 'status': 'modified', 'Loc': {"('XLNetTokenizerFast', '__init__', 125)": {'add': [166]}, "('XLNetTokenizerFast', None, 64)": {'add': [224]}}}, {'path': 'src/transformers/tokenization_utils_fast.py', 'status': 'modified', 'Loc': {"('PreTrainedTokenizerFast', None, 76)": {'add': [89]}, "('PreTrainedTokenizerFast', '_save_pretrained', 535)": {'mod': [554]}}}, {'path': 'tests/test_tokenization_common.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [40, 58, 3391]}}}]} | [] | [] | [] | {
"iss_type": "1",
"iss_reason": "1",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"src/transformers/models/barthez/tokenization_barthez_fast.py",
"src/transformers/models/mbart50/tokenization_mbart50_fast.py",
"src/transformers/models/pegasus/tokenization_pegasus_fast.py",
"src/transformers/models/big_bird/tokenization_big_bird_fast.py",
"src/transformers/models/camembert/tokenization_camembert_fast.py",
"src/transformers/models/xlm_roberta/tokenization_xlm_roberta_fast.py",
"src/transformers/models/reformer/tokenization_reformer_fast.py",
"src/transformers/tokenization_utils_fast.py",
"src/transformers/models/herbert/tokenization_herbert_fast.py",
"src/transformers/models/xlnet/tokenization_xlnet_fast.py",
"src/transformers/models/albert/tokenization_albert_fast.py",
"src/transformers/models/t5/tokenization_t5_fast.py"
],
"doc": [],
"test": [
"tests/test_tokenization_common.py"
],
"config": [],
"asset": []
} | 1 | |
huggingface | transformers | edb314ae2ba4ac0e89d6a31d48037b8943978bff | https://github.com/huggingface/transformers/issues/28286 | `contrastive-image-text/run_clip.py` example problems | ### System Info
- `transformers` version: 4.37.0.dev0
- Platform: Linux-5.15.0-88-generic-x86_64-with-glibc2.31
- Python version: 3.11.5
- Huggingface_hub version: 0.20.1
- Safetensors version: 0.4.1
- Accelerate version: 0.25.0
- Accelerate config: not found
- PyTorch version (GPU?): 2.1.2+cu121 (True)
- Tensorflow version (GPU?): not installed (NA)
- Flax version (CPU?/GPU?/TPU?): not installed (NA)
- Jax version: not installed
- JaxLib version: not installed
- Using GPU in script?: Yes
- Using distributed or parallel set-up in script?: No
### Who can help?
@amyeroberts
### Information
- [X] The official example scripts
- [ ] My own modified scripts
### Tasks
- [ ] An officially supported task in the `examples` folder (such as GLUE/SQuAD, ...)
- [X] My own task or dataset (give details below)
### Reproduction
The following example script has some issues: https://github.com/huggingface/transformers/blob/main/examples/pytorch/contrastive-image-text/run_clip.py
#### Minor issue:
When using `--train_file dataset.csv`, the tokenizer fails if the caption is "None", "null" or "NA"
#### Curiosity:
- There seems to be no parameter to specify the hub repository to push to.
- Also, there seems to be no place to track the experiment (like wandb)
#### Actual issue
With the following parameters
```bash
--model_name_or_path "openai/clip-vit-base-patch32" \
--freeze_text_model \
--train_file "train.csv" \
--image_column "image_path" \
--caption_column "caption" \
--remove_unused_columns=False \
--do_train \
--per_device_train_batch_size="64" \
--per_device_eval_batch_size="64" \
--learning_rate="5e-5" --warmup_steps="0" --weight_decay 0.1 \
--overwrite_output_dir \
--push_to_hub
```
I get the following error:
```bash
[INFO|trainer.py:1712] 2023-12-30 18:16:36,697 >> ***** Running training *****
[INFO|trainer.py:1713] 2023-12-30 18:16:36,697 >> Num examples = 348,784
[INFO|trainer.py:1714] 2023-12-30 18:16:36,697 >> Num Epochs = 3
[INFO|trainer.py:1715] 2023-12-30 18:16:36,698 >> Instantaneous batch size per device = 64
[INFO|trainer.py:1718] 2023-12-30 18:16:36,698 >> Total train batch size (w. parallel, distributed & accumulation) = 64
[INFO|trainer.py:1719] 2023-12-30 18:16:36,698 >> Gradient Accumulation steps = 1
[INFO|trainer.py:1720] 2023-12-30 18:16:36,698 >> Total optimization steps = 16,350
[INFO|trainer.py:1721] 2023-12-30 18:16:36,698 >> Number of trainable parameters = 88,111,361
0%| | 0/16350 [00:00<?, ?it/s]Traceback (most recent call last):
File "/home/amoryo/sign-language/signwriting-clip/signwriting_clip/transformers/examples/pytorch/contrastive-image-text/run_clip.py", line 590, in <module>
main()
File "/home/amoryo/sign-language/signwriting-clip/signwriting_clip/transformers/examples/pytorch/contrastive-image-text/run_clip.py", line 559, in main
train_result = trainer.train(resume_from_checkpoint=checkpoint)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/data/amoryo/conda/envs/clip/lib/python3.11/site-packages/transformers/trainer.py", line 1534, in train
return inner_training_loop(
^^^^^^^^^^^^^^^^^^^^
File "/data/amoryo/conda/envs/clip/lib/python3.11/site-packages/transformers/trainer.py", line 1860, in _inner_training_loop
tr_loss_step = self.training_step(model, inputs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/data/amoryo/conda/envs/clip/lib/python3.11/site-packages/transformers/trainer.py", line 2737, in training_step
loss = self.compute_loss(model, inputs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/data/amoryo/conda/envs/clip/lib/python3.11/site-packages/transformers/trainer.py", line 2760, in compute_loss
outputs = model(**inputs)
^^^^^^^^^^^^^^^
File "/data/amoryo/conda/envs/clip/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/data/amoryo/conda/envs/clip/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/data/amoryo/conda/envs/clip/lib/python3.11/site-packages/transformers/models/clip/modeling_clip.py", line 1108, in forward
text_outputs = self.text_model(
^^^^^^^^^^^^^^^^
File "/data/amoryo/conda/envs/clip/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/data/amoryo/conda/envs/clip/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/data/amoryo/conda/envs/clip/lib/python3.11/site-packages/transformers/models/clip/modeling_clip.py", line 691, in forward
hidden_states = self.embeddings(input_ids=input_ids, position_ids=position_ids)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/data/amoryo/conda/envs/clip/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/data/amoryo/conda/envs/clip/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/data/amoryo/conda/envs/clip/lib/python3.11/site-packages/transformers/models/clip/modeling_clip.py", line 219, in forward
embeddings = inputs_embeds + position_embeddings
~~~~~~~~~~~~~~^~~~~~~~~~~~~~~~~~~~~
RuntimeError: The size of tensor a (128) must match the size of tensor b (77) at non-singleton dimension 1
```
### Expected behavior
Example script should train, and push to hub correctly | null | https://github.com/huggingface/transformers/pull/28482 | null | {'base_commit': 'edb314ae2ba4ac0e89d6a31d48037b8943978bff', 'files': [{'path': 'examples/pytorch/contrastive-image-text/run_clip.py', 'status': 'modified', 'Loc': {"(None, 'main', 241)": {'mod': [562]}}}]} | [] | [] | [] | {
"iss_type": "1",
"iss_reason": "1",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"examples/pytorch/contrastive-image-text/run_clip.py"
],
"doc": [],
"test": [],
"config": [],
"asset": []
} | 1 | |
huggingface | transformers | 6d00033e97e1751a897f2317fdfd35dd853cee29 | https://github.com/huggingface/transformers/issues/1801 | wontfix | run_glue.py RuntimeError: module must have its parameters and buffers on device cuda:0 (device_ids[0]) but found one of them on device: cuda:3 | ## 🐛 Bug
<!-- Important information -->
Model I am using (Bert, XLNet....): Bert
Language I am using the model on (English, Chinese....): English
The problem arise when using:
* [ ] the official example scripts: (give details) : transformers/examples/run_glue.py
* [ ] my own modified scripts: (give details)
The tasks I am working on is:
* [ ] an official GLUE/SQUaD task: (give the name) : MRPC
* [ ] my own task or dataset: (give details)
## To Reproduce
Steps to reproduce the behavior:
1.
I've tested using
python -m pytest -sv ./transformers/tests/
python -m pytest -sv ./examples/
and it works fine without couple of tesks.
2.
after test, i downloaded glue datafile via
https://gist.github.com/W4ngatang/60c2bdb54d156a41194446737ce03e2e
and tried run_glue.py
pip install -r ./examples/requirements.txt
export GLUE_DIR=/path/to/glue
export TASK_NAME=MRPC
3.
python ./examples/run_glue.py \
--model_type bert \
--model_name_or_path bert-base-uncased \
--task_name $TASK_NAME \
--do_train \
--do_eval \
--do_lower_case \
--data_dir $GLUE_DIR/$TASK_NAME \
--max_seq_length 128 \
--per_gpu_eval_batch_size=8 \
--per_gpu_train_batch_size=8 \
--learning_rate 2e-5 \
--num_train_epochs 3.0 \
--output_dir /tmp/$TASK_NAME/
and i got this error.
`11/11/2019 21:10:50 - INFO - __main__ - Total optimization steps = 345
Epoch: 0%| | 0/3 [00:00<?, ?it/sTraceback (most recent call last): | 0/115 [00:00<?, ?it/s]
File "./examples/run_glue.py", line 552, in <module>
main()
File "./examples/run_glue.py", line 503, in main
global_step, tr_loss = train(args, train_dataset, model, tokenizer)
File "./examples/run_glue.py", line 146, in train
outputs = model(**inputs)
File "/home/insublee/anaconda3/envs/py_torch4/lib/python3.7/site-packages/torch/nn/modules/module.py", line 541, in __call__
result = self.forward(*input, **kwargs)
File "/home/insublee/anaconda3/envs/py_torch4/lib/python3.7/site-packages/torch/nn/parallel/data_parallel.py", line 146, in forward
"them on device: {}".format(self.src_device_obj, t.device))
RuntimeError: module must have its parameters and buffers on device cuda:0 (device_ids[0]) but found one of them on device: cuda:3`
<!-- A clear and concise description of what you expected to happen. -->
## Environment
* OS: ubuntu16.04LTS
* Python version: 3.7.5
* PyTorch version: 1.2.0
* PyTorch Transformers version (or branch): 2.1.1
* Using GPU ? 4-way 2080ti
* Distributed of parallel setup ? cuda10.0 cudnn 7.6.4
* Any other relevant information:
## Additional context
thank you. | null | https://github.com/huggingface/transformers/pull/3842 | null | {'base_commit': '6d00033e97e1751a897f2317fdfd35dd853cee29', 'files': [{'path': 'examples/hans/test_hans.py', 'status': 'modified', 'Loc': {"(None, 'evaluate', 240)": {'mod': [258]}}}, {'path': 'examples/mm-imdb/run_mmimdb.py', 'status': 'modified', 'Loc': {"(None, 'evaluate', 265)": {'mod': [281]}}}, {'path': 'examples/ner/run_ner.py', 'status': 'modified', 'Loc': {"(None, 'evaluate', 247)": {'mod': [256]}}}, {'path': 'examples/run_language_modeling.py', 'status': 'modified', 'Loc': {"(None, 'evaluate', 407)": {'mod': [430]}}}, {'path': 'examples/run_multiple_choice.py', 'status': 'modified', 'Loc': {"(None, 'evaluate', 242)": {'mod': [259]}}}, {'path': 'examples/run_xnli.py', 'status': 'modified', 'Loc': {"(None, 'evaluate', 252)": {'mod': [269]}}}]} | [] | [] | [] | {
"iss_type": "1",
"iss_reason": "1",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"examples/mm-imdb/run_mmimdb.py",
"examples/run_multiple_choice.py",
"examples/run_xnli.py",
"examples/run_language_modeling.py",
"examples/ner/run_ner.py"
],
"doc": [],
"test": [
"examples/hans/test_hans.py"
],
"config": [],
"asset": []
} | 1 |
huggingface | transformers | 43b9d93875cbf6756baf402a4720ca23d8c75015 | https://github.com/huggingface/transformers/issues/6193 | Some weights not initialized in pre-trained RobertaForMaskedLM | The bug is similar to #2202.
I am trying to evaluate MLM perplexity (without training/finetuning) using Roberta with `run_language_modeling.py` (from the [official example](https://github.com/huggingface/transformers/tree/master/examples/language-modeling)). However, some weights seems to be reinitialized instead of getting loading from the pretrained Roberta checkpoint.
## To Reproduce (~~with master branch~~):
```
import logging
logging.basicConfig(level=logging.INFO)
from transformers import RobertaForMaskedLM
_ = RobertaForMaskedLM.from_pretrained('roberta-base')
```
It gives the following warning message:
```
WARNING:transformers.modeling_utils:Some weights of RobertaForMaskedLM were not initialized from the model checkpoint at roberta-base and are newly initialized: ['roberta.embeddings.position_ids', 'lm_head.decoder.bias']
You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
```
The perplexities I get on direct evaluation on Wikitext-2/103 datasets are also much higher than the official Roberta implementation from fairseq. I suspect this could be the reason. | null | https://github.com/huggingface/transformers/pull/7282 | null | {'base_commit': '43b9d93875cbf6756baf402a4720ca23d8c75015', 'files': [{'path': 'src/transformers/modeling_roberta.py', 'status': 'modified', 'Loc': {"('RobertaForMaskedLM', None, 303)": {'add': [305]}}}]} | [] | [] | [] | {
"iss_type": "2",
"iss_reason": "2",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"src/transformers/modeling_roberta.py"
],
"doc": [],
"test": [],
"config": [],
"asset": []
} | 1 | |
huggingface | transformers | 836e88caee95eb37a860a6c82bbd2becc6b9dc7b | https://github.com/huggingface/transformers/issues/30073 | Feature request
Audio | SPDA/FA2 Attention for the Wav2Vec2 Family of Models | ### Feature request
Addition of [PyTorch SDPA](https://pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html) and [Flash Attention 2](https://github.com/Dao-AILab/flash-attention) to the Wav2Vec2 modelling code.
### Motivation
Wav2Vec2 and its derived models remain some of the most popular speech recognition and audio classification models in the library. However, only one [attention implementation](https://github.com/huggingface/transformers/blob/9b5a6450d481b0f02834684ffd8b3ba4cbbd6fe0/src/transformers/models/wav2vec2/modeling_wav2vec2.py#L487) is available to users: the slowest and most memory consuming "eager" mode. We should update the modelling code to provide two newer attention implementations: SDPA and FA2, both of which are faster and more memory efficient.
Since Wav2Vec2 copies its attention from BART, and SDPA & FA2 were added for BART in [this PR](https://github.com/huggingface/transformers/pull/27203), this should be quite a straightforward PR, mostly copying out the logic from the BART PR and pasting it into Wav2Vec2. We should then be sure to add two fast tests (one for each of SDPA and FA2), e.g. in the style of the test [here](https://github.com/huggingface/transformers/blob/9b5a6450d481b0f02834684ffd8b3ba4cbbd6fe0/tests/models/whisper/test_modeling_whisper.py#L891), and two slow integration tests, e.g. in the style of the tests [here](https://github.com/huggingface/transformers/blob/9b5a6450d481b0f02834684ffd8b3ba4cbbd6fe0/tests/models/gemma/test_modeling_gemma.py#L657-L659).
### Your contribution
Want to take this one @kamilakesbi? | null | https://github.com/huggingface/transformers/pull/30121 | null | {'base_commit': '836e88caee95eb37a860a6c82bbd2becc6b9dc7b', 'files': [{'path': 'docs/source/en/model_doc/hubert.md', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [46]}}}, {'path': 'docs/source/en/model_doc/wav2vec2.md', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [41]}}}, {'path': 'docs/source/en/perf_infer_gpu_one.md', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [66, 196]}}}, {'path': 'src/transformers/models/data2vec/modeling_data2vec_audio.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [22, 41, 47, 67, 480], 'mod': [506]}, "('Data2VecAudioEncoder', '__init__', 543)": {'add': [550]}, "('Data2VecAudioPreTrainedModel', None, 674)": {'add': [683]}, "('Data2VecAudioEncoderLayer', '__init__', 508)": {'mod': [510]}, "('Data2VecAudioEncoder', 'forward', 552)": {'mod': [568, 569, 570, 571, 572, 573]}}}, {'path': 'src/transformers/models/hubert/modeling_hubert.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [21, 33, 39, 63, 543], 'mod': [569, 630]}, "('HubertEncoder', '__init__', 678)": {'add': [685]}, "('HubertEncoderStableLayerNorm', '__init__', 760)": {'add': [769]}, "('HubertPreTrainedModel', None, 844)": {'add': [853]}, "('HubertEncoderLayer', '__init__', 571)": {'mod': [573]}, "('HubertEncoderLayerStableLayerNorm', '__init__', 632)": {'mod': [634]}, "('HubertEncoder', 'forward', 687)": {'mod': [703, 704, 705, 706, 707, 708]}, "('HubertEncoderStableLayerNorm', 'forward', 771)": {'mod': [787, 788, 789, 790, 791, 792]}}}, {'path': 'src/transformers/models/sew/modeling_sew.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [22, 34, 61, 538], 'mod': [31, 564]}, "('SEWEncoder', '__init__', 600)": {'add': [609]}, "('SEWPreTrainedModel', None, 703)": {'add': [712]}, "('SEWEncoderLayer', '__init__', 566)": {'mod': [568]}, "('SEWEncoder', 'forward', 611)": {'mod': [623, 624, 626, 627, 628, 629, 630, 631, 632, 633, 634, 635]}}}, {'path': 'src/transformers/models/unispeech/modeling_unispeech.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [23, 36, 42, 62, 579], 'mod': [605, 666]}, "('UniSpeechEncoder', '__init__', 714)": {'add': [721]}, "('UniSpeechEncoderStableLayerNorm', '__init__', 796)": {'add': [805]}, "('UniSpeechPreTrainedModel', None, 950)": {'add': [959]}, "('UniSpeechEncoderLayer', '__init__', 607)": {'mod': [609]}, "('UniSpeechEncoderLayerStableLayerNorm', '__init__', 668)": {'mod': [670]}, "('UniSpeechEncoder', 'forward', 723)": {'mod': [739, 740, 741, 742, 743, 744]}, "('UniSpeechEncoderStableLayerNorm', 'forward', 807)": {'mod': [823, 824, 825, 826, 827, 828]}}}, {'path': 'src/transformers/models/unispeech_sat/modeling_unispeech_sat.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [23, 43, 50, 78, 596], 'mod': [622, 683]}, "('UniSpeechSatEncoder', '__init__', 731)": {'add': [738]}, "('UniSpeechSatEncoderStableLayerNorm', '__init__', 813)": {'add': [822]}, "('UniSpeechSatPreTrainedModel', None, 967)": {'add': [976]}, "('UniSpeechSatEncoderLayer', '__init__', 624)": {'mod': [626]}, "('UniSpeechSatEncoderLayerStableLayerNorm', '__init__', 685)": {'mod': [687]}, "('UniSpeechSatEncoder', 'forward', 740)": {'mod': [756, 757, 758, 759, 760, 761]}, "('UniSpeechSatEncoderStableLayerNorm', 'forward', 824)": {'mod': [840, 841, 842, 843, 844, 845]}}}, {'path': 'src/transformers/models/wav2vec2/modeling_wav2vec2.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [23, 46, 61, 94, 644]}, "('Wav2Vec2Encoder', '__init__', 749)": {'add': [756]}, "('Wav2Vec2EncoderStableLayerNorm', '__init__', 830)": {'add': [839]}, "('Wav2Vec2PreTrainedModel', None, 1064)": {'add': [1073]}, "('Wav2Vec2ForPreTraining', 'forward', 1649)": {'add': [1744]}, "('Wav2Vec2EncoderLayer', '__init__', 670)": {'mod': [672]}, "('Wav2Vec2EncoderLayerStableLayerNorm', '__init__', 704)": {'mod': [706]}, "('Wav2Vec2Encoder', 'forward', 758)": {'mod': [774, 775, 776, 777, 778, 779]}, "('Wav2Vec2EncoderStableLayerNorm', 'forward', 841)": {'mod': [857, 858, 859, 860, 861, 862]}}}, {'path': 'src/transformers/models/wav2vec2_conformer/modeling_wav2vec2_conformer.py', 'status': 'modified', 'Loc': {"('Wav2Vec2ConformerForPreTraining', 'forward', 1422)": {'add': [1517]}}}, {'path': 'tests/models/wav2vec2/test_modeling_wav2vec2.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [27, 35, 38]}, "('Wav2Vec2ModelIntegrationTest', 'test_inference_mms_1b_all', 1958)": {'add': [1997]}}}]} | [] | [] | [] | {
"iss_type": "4",
"iss_reason": "2",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"src/transformers/models/hubert/modeling_hubert.py",
"src/transformers/models/data2vec/modeling_data2vec_audio.py",
"src/transformers/models/unispeech_sat/modeling_unispeech_sat.py",
"src/transformers/models/wav2vec2_conformer/modeling_wav2vec2_conformer.py",
"src/transformers/models/wav2vec2/modeling_wav2vec2.py",
"src/transformers/models/unispeech/modeling_unispeech.py",
"src/transformers/models/sew/modeling_sew.py"
],
"doc": [
"docs/source/en/model_doc/wav2vec2.md",
"docs/source/en/model_doc/hubert.md",
"docs/source/en/perf_infer_gpu_one.md"
],
"test": [
"tests/models/wav2vec2/test_modeling_wav2vec2.py"
],
"config": [],
"asset": []
} | 1 |
huggingface | transformers | 95ffbe168690d34e385cdd16c69e9a3f8d877abf | https://github.com/huggingface/transformers/issues/11294 | serious bug with trainer.py when restarting the training from a checkpoint | ## Environment info
<!-- You can run the command `transformers-cli env` and copy-and-paste its output below.
Don't forget to fill out the missing fields in that output! -->
- `transformers` version: 4.5.1
- Platform: Linux
- Python version: 3.8
- PyTorch version (GPU?): 1.8
- Tensorflow version (GPU?): -
- Using GPU in script?: -
- Using distributed or parallel set-up in script?: -
### Who can help
<!-- Your issue will be replied to more quickly if you can figure out the right person to tag with @
If you know how to use git blame, that is the easiest way, otherwise, here is a rough guide of **who to tag**.
Please tag fewer than 3 people.
Models:
- albert, bert, xlm: @LysandreJik
- blenderbot, bart, marian, pegasus, encoderdecoder, t5: @patrickvonplaten, @patil-suraj
- longformer, reformer, transfoxl, xlnet: @patrickvonplaten
- fsmt: @stas00
- funnel: @sgugger
- gpt2: @patrickvonplaten, @LysandreJik
- rag: @patrickvonplaten, @lhoestq
- tensorflow: @Rocketknight1
Library:
- benchmarks: @patrickvonplaten
- deepspeed: @stas00
- ray/raytune: @richardliaw, @amogkam
- text generation: @patrickvonplaten
- tokenizers: @LysandreJik
- trainer: @sgugger
- pipelines: @LysandreJik
Documentation: @sgugger
Model hub:
- for issues with a model report at https://discuss.huggingface.co/ and tag the model's creator.
HF projects:
- datasets: [different repo](https://github.com/huggingface/datasets)
- rust tokenizers: [different repo](https://github.com/huggingface/tokenizers)
Examples:
- maintained examples (not research project or legacy): @sgugger, @patil-suraj
- research_projects/bert-loses-patience: @JetRunner
- research_projects/distillation: @VictorSanh
-->
trainer: @sgugger, @patil-suraj
## Information
Hi, I see this serious issue with trainer.py class, let please consider run_translation.py script [1] after you define the model, let freeze the encoder, or wrap the model in a class. So one can modify the model after this line https://github.com/huggingface/transformers/blob/d9c62047a8d75e18d2849d345ab3394875a712ef/examples/seq2seq/run_translation.py#L331
Then, during the training, one can stop the training, and now would like to continue the training from the place it is stopped, if you print the number of parameters inside trainer.py, right before this line:
https://github.com/huggingface/transformers/blob/d9c62047a8d75e18d2849d345ab3394875a712ef/src/transformers/trainer.py#L1062
like this
```
for n,p in model.named_parameters():
if p.requires_grad:
print(n)
```
what would we see? We see all parameters are there, even the ones we made frozen, this is a serious bug that if the user modify the model after creation, those modifications are not considered when restarting the training, could you kindly have a look?
thanks
[1] https://github.com/huggingface/transformers/blob/master/examples/seq2seq/run_translation.py
## Expected behavior
The user should be able to continue training the modified model as they are modified. | null | https://github.com/huggingface/transformers/pull/11318 | null | {'base_commit': '95ffbe168690d34e385cdd16c69e9a3f8d877abf', 'files': [{'path': 'src/transformers/configuration_utils.py', 'status': 'modified', 'Loc': {"('PretrainedConfig', '__init__', 196)": {'mod': [274]}}}, {'path': 'src/transformers/trainer.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [55, 58]}, "('Trainer', 'train', 933)": {'add': [999], 'mod': [1003, 1004, 1005, 1007, 1284, 1285, 1286, 1287, 1288, 1289, 1290]}}}, {'path': 'tests/test_trainer.py', 'status': 'modified', 'Loc': {"('TrainerIntegrationTest', None, 287)": {'add': [727]}}}]} | [] | [] | [] | {
"iss_type": "2",
"iss_reason": "1",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"src/transformers/configuration_utils.py",
"src/transformers/trainer.py"
],
"doc": [],
"test": [
"tests/test_trainer.py"
],
"config": [],
"asset": []
} | 1 | |
geekan | MetaGPT | 61cbee18ae0ea0c20773f7257dc62923d9a42240 | https://github.com/geekan/MetaGPT/issues/1014 | bug | [IPKernelApp] WARNING | Parent appears to have exited, shutting down. | **Bug description**
<!-- Clearly and directly describe the current bug -->
I got the error "[IPKernelApp] WARNING | Parent appears to have exited, shutting down." while I'm running the example.
**Bug solved method**
<!-- If you solved the bug, describe the idea or process to solve the current bug. Of course, you can also paste the URL address of your Pull Request. -->
<!-- If not, provide more auxiliary information to facilitate our further positioning and investigation -->
**Environment information**
<!-- Environment:System version (like ubuntu 22.04), Python version (conda python 3.7), LLM type and model (OpenAI gpt-4-1106-preview) -->
- LLM type and model name:
- System version: MacOS
- Python version: Python 3.11.7
<!-- Dependent packagess:the packages version cause the bug(like `pydantic 1.10.8`), installation method(like `pip install metagpt` or `pip install from source` or `run in docker`) -->
- packages version:
- installation method:
**Screenshots or logs**
<!-- Screenshots or logs of the bug can help us understand the problem more quickly -->
```bash
MacBook-Pro mytest % python solve_math_problems.py
2024-03-15 22:03:02.242 | INFO | metagpt.const:get_metagpt_package_root:29 - Package root set to /Users/jason/git/github/MetaGPT/workspace/mytest
-```json
[
{
"task_id": "1",
"dependent_task_ids": [],
"instruction": "Find the prime factorization of 6 and 126."
},
{
"task_id": "2",
"dependent_task_ids": ["1"],
"instruction": "Determine the values of m and n based on the prime factorization and the given conditions."
},
{
"task_id": "3",
"dependent_task_ids": ["2"],
"instruction": "Calculate the least possible value of m + n."
}
]
-```
2024-03-15 22:03:07.142 | INFO | metagpt.utils.cost_manager:update_cost:52 - Total running cost: $0.001 | Max budget: $10.000 | Current cost: $0.001, prompt_tokens: 265, completion_tokens: 123
2024-03-15 22:03:07.142 | INFO | metagpt.roles.role:_plan_and_act:494 - ready to take on task task_id='1' dependent_task_ids=[] instruction='Find the prime factorization of 6 and 126.' task_type='' code='' result='' is_success=False is_finished=False
2024-03-15 22:03:07.142 | INFO | metagpt.roles.di.data_interpreter:_write_code:79 - ready to WriteCodeWithoutTools
2024-03-15 22:03:09.482 | INFO | metagpt.utils.cost_manager:update_cost:52 - Total running cost: $0.001 | Max budget: $10.000 | Current cost: $0.001, prompt_tokens: 557, completion_tokens: 66
1 import math
2
3 # Prime factorization of 6
4 prime_factors_6 = [2, 3]
5
6 # Prime factorization of 126
7 prime_factors_126 = [2, 3, 3, 7]
0.00s - Debugger warning: It seems that frozen modules are being used, which may
0.00s - make the debugger miss breakpoints. Please pass -Xfrozen_modules=off
0.00s - to python to disable frozen modules.
0.00s - Note: Debugging will proceed. Set PYDEVD_DISABLE_FILE_VALIDATION=1 to disable this validation.
0.00s - Debugger warning: It seems that frozen modules are being used, which may
0.00s - make the debugger miss breakpoints. Please pass -Xfrozen_modules=off
0.00s - to python to disable frozen modules.
0.00s - Note: Debugging will proceed. Set PYDEVD_DISABLE_FILE_VALIDATION=1 to disable this validation.
2024-03-15 22:03:10.305 | INFO | metagpt.roles.role:_plan_and_act:494 - ready to take on task task_id='2' dependent_task_ids=['1'] instruction='Determine the values of m and n based on the prime factorization and the given conditions.' task_type='' code='' result='' is_success=False is_finished=False
2024-03-15 22:03:10.306 | INFO | metagpt.roles.di.data_interpreter:_write_code:79 - ready to WriteCodeWithoutTools
2024-03-15 22:03:11.936 | INFO | metagpt.utils.cost_manager:update_cost:52 - Total running cost: $0.001 | Max budget: $10.000 | Current cost: $0.001, prompt_tokens: 615, completion_tokens: 21
1 from sympy import *
2024-03-15 22:03:12.123 | INFO | metagpt.roles.role:_plan_and_act:494 - ready to take on task task_id='3' dependent_task_ids=['2'] instruction='Calculate the least possible value of m + n.' task_type='' code='' result='' is_success=False is_finished=False
2024-03-15 22:03:12.123 | INFO | metagpt.roles.di.data_interpreter:_write_code:79 - ready to WriteCodeWithoutTools
2024-03-15 22:03:34.325 | INFO | metagpt.utils.cost_manager:update_cost:52 - Total running cost: $0.001 | Max budget: $10.000 | Current cost: $0.001, prompt_tokens: 612, completion_tokens: 21
1 from sympy import *
MacBook-Pro mytest % [IPKernelApp] WARNING | Parent appears to have exited, shutting down.
```
| null | https://github.com/geekan/MetaGPT/pull/1141 | null | {'base_commit': '61cbee18ae0ea0c20773f7257dc62923d9a42240', 'files': [{'path': 'metagpt/roles/di/data_interpreter.py', 'status': 'modified', 'Loc': {"('DataInterpreter', '_plan_and_act', 88)": {'mod': [89, 90, 91]}}}]} | [] | [] | [] | {
"iss_type": "1",
"iss_reason": "1",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"metagpt/roles/di/data_interpreter.py"
],
"doc": [],
"test": [],
"config": [],
"asset": []
} | 1 |
geekan | MetaGPT | 5e8bd105177e08848990d32b9ea636daa639be19 | https://github.com/geekan/MetaGPT/issues/1290 | Validation Error | ValidationError: 1 validation error for Config llm Field required [type=missing, input_value={'PATH': '/Users/psyb0rg/..._INIT_AT_FORK': 'FALSE'}, input_type=dict] For further information visit https://errors.pydantic.dev/2.7/v/missing | null | https://github.com/geekan/MetaGPT/pull/1324 | null | {'base_commit': '5e8bd105177e08848990d32b9ea636daa639be19', 'files': [{'path': 'metagpt/configs/llm_config.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [15]}, "('LLMConfig', 'check_llm_key', 95)": {'mod': [97]}}}]} | [] | [] | [] | {
"iss_type": "1",
"iss_reason": "2",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"metagpt/configs/llm_config.py"
],
"doc": [],
"test": [],
"config": [],
"asset": []
} | 1 | |
geekan | MetaGPT | b17846401ec7d12b73079fb21f3939ad9e9e2d70 | https://github.com/geekan/MetaGPT/issues/476 | How to use FaissStore? | I saw an example listed under the examples folder, https://github.com/geekan/MetaGPT/blob/ccc4c9e04debfdb8296c342d7a3f9606f407e007/examples/search_kb.py#L14-L16
but the example.json not provided.
I have try to make a fake data that's structure follow the code.
```json
[
{
"source": "Which facial cleanser is good for oily skin?",
"output": "ABC cleanser is preferred by many with oily skin."
},
{
"source": "Which facial cleanser is good for oily skin?",
"output": "For oily skin, consider using DEF facial wash."
},
{
"source": "Which facial cleanser is good for oily skin?",
"output": "XYZ facial cleanser is suitable for oily skin."
},
{
"source": "Which facial cleanser is good for oily skin?",
"output": "XYZ facial cleanser is suitable for oily skin."
},
{
"source": "Which facial cleanser is good for oily skin?",
"output": "XYZ facial cleanser is suitable for oily skin."
},
{
"source": "Is L'Oreal good to use?",
"output": "L'Oreal is a reputable brand and is generally considered good."
},
{
"source": "Is L'Oreal good to use?",
"output": "L'Oreal is a popular brand with many positive reviews."
},
{
"source": "Is L'Oreal good to use?",
"output": "Many people find L'Oreal products effective."
},
{
"source": "Is L'Oreal good to use?",
"output": "L'Oreal is a popular brand with many positive reviews."
},
{
"source": "Is L'Oreal good to use?",
"output": "Many people find L'Oreal products effective."
}
]
```
but the console gives me the below information, it seems my fake data is irrelevant to the SearchAndSummarize.
```shell
(metagpt) yhtao@PC:/mnt/d/github_repo/MetaGPT$ /home/yhtao/anaconda3/envs/metagpt/bin/python /mnt/d/github_repo/MetaGPT/examples/search_kb.py
2023-11-02 13:36:56.963 | INFO | metagpt.config:__init__:44 - Config loading done.
2023-11-02 13:36:57.744 | INFO | __main__:search:20 - User: Which facial cleanser is good for oily skin?
2023-11-02 13:36:57.745 | INFO | metagpt.roles.role:_act:167 - Xiaomei(Sales): ready to SearchAndSummarize
Traceback (most recent call last):
File "/mnt/d/github_repo/MetaGPT/examples/search_kb.py", line 26, in <module>
asyncio.run(search())
File "/home/yhtao/anaconda3/envs/metagpt/lib/python3.10/asyncio/runners.py", line 44, in run
return loop.run_until_complete(main)
File "/home/yhtao/anaconda3/envs/metagpt/lib/python3.10/asyncio/base_events.py", line 641, in run_until_complete
return future.result()
File "/mnt/d/github_repo/MetaGPT/examples/search_kb.py", line 21, in search
result = await role.run(query)
File "/mnt/d/github_repo/MetaGPT/metagpt/roles/role.py", line 240, in run
rsp = await self._react()
File "/mnt/d/github_repo/MetaGPT/metagpt/roles/role.py", line 209, in _react
return await self._act()
File "/mnt/d/github_repo/MetaGPT/metagpt/roles/role.py", line 168, in _act
response = await self._rc.todo.run(self._rc.important_memory)
File "/mnt/d/github_repo/MetaGPT/metagpt/actions/search_and_summarize.py", line 121, in run
query = context[-1].content
IndexError: list index out of range
``` | null | https://github.com/geekan/MetaGPT/pull/501 | null | {'base_commit': 'b17846401ec7d12b73079fb21f3939ad9e9e2d70', 'files': [{'path': 'examples/search_kb.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [7, 11], 'mod': [25]}, "(None, 'search', 14)": {'mod': [15, 18]}}}, {'path': 'metagpt/actions/search_and_summarize.py', 'status': 'modified', 'Loc': {}}, {'path': 'metagpt/document_store/faiss_store.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [7], 'mod': [81, 82, 83, 84, 85]}, "('FaissStore', None, 22)": {'add': [60], 'mod': [23, 53]}}}, {'path': 'metagpt/roles/sales.py', 'status': 'modified', 'Loc': {"('Sales', '__init__', 14)": {'mod': [15, 16, 17, 18, 19, 20, 21, 22, 23, 24]}, "('Sales', '_set_store', 29)": {'mod': [31]}}}]} | [] | [] | [] | {
"iss_type": "3",
"iss_reason": "1",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"metagpt/actions/search_and_summarize.py",
"metagpt/document_store/faiss_store.py",
"metagpt/roles/sales.py",
"examples/search_kb.py"
],
"doc": [],
"test": [],
"config": [],
"asset": []
} | 1 | |
geekan | MetaGPT | bdba23e4225b3b77402c8725854668c2b84c5041 | https://github.com/geekan/MetaGPT/issues/1367 | Dashscope service causing :ValueError: too many values to unpack (expected 11) | https://github.com/geekan/MetaGPT/blob/9f8f0a27fd3e7d6a7f6fcf40103a94829533bdc2/metagpt/provider/dashscope_api.py#L51
When using DashScope service, in this line, the`_get_protocol_params`method returns 13 values but the unpack logic assumes that 11 values are returned, causing the ValueError: too many values to unpack (expected 11).
A proper way that works for me is adding another two values in the unpacking logic:
```python
(
api_protocol,
ws_stream_mode,
is_binary_input,
http_method,
stream,
async_request,
query,
headers,
request_timeout,
form,
resources,
base_address,
flattened_output
) = _get_protocol_params(kwargs)
```
The version of dashscope package is `1.19.3` | null | https://github.com/geekan/MetaGPT/pull/1496 | null | {'base_commit': 'bdba23e4225b3b77402c8725854668c2b84c5041', 'files': [{'path': 'metagpt/provider/dashscope_api.py', 'status': 'modified', 'Loc': {"(None, 'build_api_arequest', 36)": {'add': [50], 'mod': [54, 55, 57]}}}]} | [] | [] | [] | {
"iss_type": "1",
"iss_reason": "1",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"metagpt/provider/dashscope_api.py"
],
"doc": [],
"test": [],
"config": [],
"asset": []
} | 1 | |
geekan | MetaGPT | f201b2f5f32c2d48eab6632bf103e9b3a92fc999 | https://github.com/geekan/MetaGPT/issues/233 | ImportError 当运行 startup.py | 当我尝试运行 `startup.py` 时,我遇到了一个 ImportError。我已按照 `requirements.txt` 中列出的依赖进行了安装,并确认我的Python版本满足项目的要求。
具体的错误消息如下:
2023-08-15 20:31:23.375 | INFO | metagpt.config:init:44 - Config loading done.
Traceback (most recent call last):
File "F:\metaGPT\metagpt\startup.py", line 7, in <module>
from metagpt.roles import Architect, Engineer, ProductManager, ProjectManager, QaEngineer
ImportError: cannot import name 'ProductManager' from 'metagpt.roles' (F:\metaGPT\metagpt\metagpt\roles_init_.py)
我已经尝试重新安装所有依赖项,并确保 `pandas` 和其他必要的库已成功安装,但问题仍然存在。
请问有解决这个问题的建议吗?
环境配置:
- 系统:Windows 10
- Python版本:3.11
谢谢。
| null | https://github.com/geekan/MetaGPT/pull/1253 | null | {'base_commit': 'f201b2f5f32c2d48eab6632bf103e9b3a92fc999', 'files': [{'path': 'metagpt/provider/openai_api.py', 'status': 'modified', 'Loc': {"('OpenAILLM', '_achat_completion_stream', 89)": {'mod': [103]}}}]} | [] | [] | [] | {
"iss_type": "1",
"iss_reason": "1",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"metagpt/provider/openai_api.py"
],
"doc": [],
"test": [],
"config": [],
"asset": []
} | 1 | |
geekan | MetaGPT | 6b70f7b0ed3c2215ffff500772e6ae4f8ce79c5a | https://github.com/geekan/MetaGPT/issues/1257 | stream field in LLMConfig does not work | **Bug description**
~/.metagpt/config2.yaml
```
llm:
stream: False
```
does not affect the way to call llm.aask.
**Bug solved method**
```
class BaseLLM(ABC):
async def aask(
self,
msg: Union[str, list[dict[str, str]]],
system_msgs: Optional[list[str]] = None,
format_msgs: Optional[list[dict[str, str]]] = None,
images: Optional[Union[str, list[str]]] = None,
timeout=USE_CONFIG_TIMEOUT,
stream=None,
) -> str:
if stream is None:
stream = config.llm.stream
rsp = await self.acompletion_text(message, stream=stream, timeout=self.get_timeout(timeout))
```
| null | https://github.com/geekan/MetaGPT/pull/1258 | null | {'base_commit': '6b70f7b0ed3c2215ffff500772e6ae4f8ce79c5a', 'files': [{'path': 'metagpt/configs/llm_config.py', 'status': 'modified', 'Loc': {"('LLMConfig', None, 41)": {'mod': [77]}}}, {'path': 'metagpt/provider/base_llm.py', 'status': 'modified', 'Loc': {"('BaseLLM', 'aask', 128)": {'add': [148], 'mod': [135]}}}]} | [] | [] | [] | {
"iss_type": "2",
"iss_reason": "1",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"metagpt/provider/base_llm.py",
"metagpt/configs/llm_config.py"
],
"doc": [],
"test": [],
"config": [],
"asset": []
} | 1 | |
geekan | MetaGPT | ddf4697381ec6a5e929669eff59e3e4953a6598e | https://github.com/geekan/MetaGPT/issues/278 | Can I use engine mode to connect azure-gpt? | <img width="590" alt="image" src="https://github.com/geekan/MetaGPT/assets/23121539/3ef72f57-b82b-4890-83ca-9c5044927ff1">
```
response = openai.ChatCompletion.create(
engine="gpt-4-chatbot-ui",
messages = [{"role":"system","content":"You are an AI assistant that helps people find information."},
```
Officially supports engine mode, Can I use engine mode to connect azure-gpt?
| null | https://github.com/geekan/MetaGPT/pull/280 | null | {'base_commit': 'ddf4697381ec6a5e929669eff59e3e4953a6598e', 'files': [{'path': 'config/config.yaml', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [19, 23]}}}, {'path': 'metagpt/config.py', 'status': 'modified', 'Loc': {"('Config', '__init__', 41)": {'add': [61]}}}, {'path': 'metagpt/provider/openai_api.py', 'status': 'modified', 'Loc': {"('OpenAIGPTAPI', None, 134)": {'add': [176]}, "('OpenAIGPTAPI', '_achat_completion_stream', 156)": {'mod': [165, 166, 167, 168]}, "('OpenAIGPTAPI', '_cons_kwargs', 176)": {'mod': [178, 179, 180, 181, 182, 183, 184, 185, 187, 188, 189, 190, 191, 192, 193, 194, 195]}}}]} | [] | [] | [] | {
"iss_type": "3",
"iss_reason": "2",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"metagpt/provider/openai_api.py",
"metagpt/config.py"
],
"doc": [],
"test": [],
"config": [
"config/config.yaml"
],
"asset": []
} | 1 | |
geekan | MetaGPT | 5e8bd105177e08848990d32b9ea636daa639be19 | https://github.com/geekan/MetaGPT/issues/1220 | pydantic_core._pydantic_core.ValidationError: 1 validation error for Config | D:\MetaGPT-main\venv\Scripts\python.exe D:/MetaGPT-main/test.py
2024-04-23 17:14:39.661 | INFO | metagpt.const:get_metagpt_package_root:29 - Package root set to D:\MetaGPT-main
Traceback (most recent call last):
File "D:\MetaGPT-main\test.py", line 3, in <module>
repo: ProjectRepo = generate_repo("Create a 2048 game") # or ProjectRepo("<path>")
File "D:\MetaGPT-main\metagpt\software_company.py", line 30, in generate_repo
from metagpt.config2 import config
File "D:\MetaGPT-main\metagpt\config2.py", line 164, in <module>
config = Config.default()
File "D:\MetaGPT-main\metagpt\config2.py", line 106, in default
return Config(**final)
File "D:\MetaGPT-main\venv\lib\site-packages\pydantic\main.py", line 164, in __init__
__pydantic_self__.__pydantic_validator__.validate_python(data, self_instance=__pydantic_self__)
pydantic_core._pydantic_core.ValidationError: 1 validation error for Config
llm.api_key
Value error, Please set your API key in config2.yaml [type=value_error, input_value='YOUR_API_KEY', input_type=str]
For further information visit https://errors.pydantic.dev/2.5/v/value_error
I've written the config as to why this is still the case???

| null | https://github.com/geekan/MetaGPT/pull/1324 | null | {'base_commit': '5e8bd105177e08848990d32b9ea636daa639be19', 'files': [{'path': 'metagpt/configs/llm_config.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [15]}, "('LLMConfig', 'check_llm_key', 95)": {'mod': [97]}}}]} | [] | [] | [] | {
"iss_type": "1",
"iss_reason": "2",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"metagpt/configs/llm_config.py"
],
"doc": [],
"test": [],
"config": [],
"asset": []
} | 1 | |
geekan | MetaGPT | 12948a5482bf4c6c79fb4c84f89bbad3600942e4 | https://github.com/geekan/MetaGPT/issues/1100 | bug | debate example fail to work with gemini | **Bug description**
debate example throws error with gemini-pro 1.5.
Websearch works with gemini-pro
**Bug solved method**
**Environment information**
Python 3.9
Conda
- LLM type and model name: Gemini-Pro
- System version:
- Python version: 3.9
**Screenshots or logs**
python3 debate.py "Talk about Artificial General Intelligence"
2024-03-25 17:57:01.666 | INFO | metagpt.const:get_metagpt_package_root:29 - Package root set to /Users/samsaha2
2024-03-25 17:57:03.800 | INFO | metagpt.team:invest:90 - Investment: $3.0.
2024-03-25 17:57:03.801 | INFO | __main__:_act:63 - Biden(Democrat): to do SpeakAloud(SpeakAloud)
2024-03-25 17:57:06.072 | WARNING | metagpt.utils.common:wrapper:572 - There is a exception in role's execution, in order to resume, we delete the newest role communication message in the role's memory.
2024-03-25 17:57:06.081 | ERROR | metagpt.utils.common:wrapper:554 - Exception occurs, start to serialize the project, exp:
Traceback (most recent call last):
File "/Users/samsaha2/miniconda3/envs/metagpt/lib/python3.9/site-packages/metagpt/utils/common.py", line 563, in wrapper
return await func(self, *args, **kwargs)
File "/Users/samsaha2/miniconda3/envs/metagpt/lib/python3.9/site-packages/metagpt/roles/role.py", line 558, in run
rsp = await self.react()
ValueError: The `response.text` quick accessor only works for simple (single-`Part`) text responses. This response is not simple text.Use the `result.parts` accessor or the full `result.candidates[index].content.parts` lookup instead.
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/samsaha2/miniconda3/envs/metagpt/lib/python3.9/site-packages/metagpt/utils/common.py", line 549, in wrapper
result = await func(self, *args, **kwargs)
File "/Users/samsaha2/miniconda3/envs/metagpt/lib/python3.9/site-packages/metagpt/team.py", line 134, in run
await self.env.run()
Exception: Traceback (most recent call last):
File "/Users/samsaha2/miniconda3/envs/metagpt/lib/python3.9/site-packages/metagpt/utils/common.py", line 563, in wrapper
return await func(self, *args, **kwargs)
File "/Users/samsaha2/miniconda3/envs/metagpt/lib/python3.9/site-packages/metagpt/roles/role.py", line 558, in run
rsp = await self.react()
File "/Users/samsaha2/miniconda3/envs/metagpt/lib/python3.9/site-packages/metagpt/roles/role.py", line 525, in react
rsp = await self._react()
File "/Users/samsaha2/miniconda3/envs/metagpt/lib/python3.9/site-packages/metagpt/roles/role.py", line 471, in _react
rsp = await self._act()
File "/Users/samsaha2/debate.py", line 70, in _act
rsp = await todo.run(context=context, name=self.name, opponent_name=self.opponent_name)
File "/Users/samsaha2/debate.py", line 41, in run
rsp = await self._aask(prompt)
File "/Users/samsaha2/miniconda3/envs/metagpt/lib/python3.9/site-packages/metagpt/actions/action.py", line 93, in _aask
return await self.llm.aask(prompt, system_msgs)
File "/Users/samsaha2/miniconda3/envs/metagpt/lib/python3.9/site-packages/metagpt/provider/base_llm.py", line 89, in aask
rsp = await self.acompletion_text(message, stream=stream, timeout=timeout)
File "/Users/samsaha2/miniconda3/envs/metagpt/lib/python3.9/site-packages/tenacity/_asyncio.py", line 88, in async_wrapped
return await fn(*args, **kwargs)
File "/Users/samsaha2/miniconda3/envs/metagpt/lib/python3.9/site-packages/tenacity/_asyncio.py", line 47, in __call__
do = self.iter(retry_state=retry_state)
File "/Users/samsaha2/miniconda3/envs/metagpt/lib/python3.9/site-packages/tenacity/__init__.py", line 314, in iter
return fut.result()
File "/Users/samsaha2/miniconda3/envs/metagpt/lib/python3.9/concurrent/futures/_base.py", line 439, in result
return self.__get_result()
File "/Users/samsaha2/miniconda3/envs/metagpt/lib/python3.9/concurrent/futures/_base.py", line 391, in __get_result
raise self._exception
File "/Users/samsaha2/miniconda3/envs/metagpt/lib/python3.9/site-packages/tenacity/_asyncio.py", line 50, in __call__
result = await fn(*args, **kwargs)
File "/Users/samsaha2/miniconda3/envs/metagpt/lib/python3.9/site-packages/metagpt/provider/google_gemini_api.py", line 147, in acompletion_text
return await self._achat_completion_stream(messages)
File "/Users/samsaha2/miniconda3/envs/metagpt/lib/python3.9/site-packages/metagpt/provider/google_gemini_api.py", line 127, in _achat_completion_stream
content = chunk.text
File "/Users/samsaha2/miniconda3/envs/metagpt/lib/python3.9/site-packages/google/generativeai/types/generation_types.py", line 328, in text
raise ValueError(
ValueError: The `response.text` quick accessor only works for simple (single-`Part`) text responses. This response is not simple text.Use the `result.parts` accessor or the full `result.candidates[index].content.parts` lookup instead.
| null | https://github.com/geekan/MetaGPT/pull/1105 | null | {'base_commit': '12948a5482bf4c6c79fb4c84f89bbad3600942e4', 'files': [{'path': 'metagpt/provider/google_gemini_api.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [4, 13], 'mod': [6]}, "('GeminiLLM', '_achat_completion_stream', 138)": {'add': [152], 'mod': [144]}}}, {'path': 'requirements.txt', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [63]}}}]} | [] | [] | [] | {
"iss_type": "1",
"iss_reason": "1",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"metagpt/provider/google_gemini_api.py"
],
"doc": [],
"test": [],
"config": [
"requirements.txt"
],
"asset": []
} | 1 |
geekan | MetaGPT | 02f999204009ae5cf78152a0fc47aa6ac98b4aa2 | https://github.com/geekan/MetaGPT/issues/475 | tenacity.RetryError: RetryError[<Future at 0x7faeafc5ffd0 state=finished raised JSONDecodeError>] | When I set up everything and python3 startup.py "Write a cli snake game", I get this error.
""",
"Logic Analysis": [
["main.py","Contains the main game loop and user input handling"],
["game.py","Contains the game logic, including the snake and food classes"],
["snake.py","Contains the Snake class and its methods for moving and eating food"],
["food.py","Contains the Food class and its method for generating new food"]
],
"Task list": [
"main.py",
"game.py",
"snake.py",
"food.py"
],
"Shared Knowledge": """
'game.py' contains the Game class, which manages the game state and controls the snake and food.
'snake.py' contains the Snake class, which represents the snake and its movements.
'food.py' contains the Food class, which represents the food and generates new food when eaten by the snake.
""",
"Anything UNCLEAR": "We need to decide on the game's width and height, which will be specified in the API request when starting a new game."
}
[END]
```
Traceback (most recent call last):
File "/home/user/anaconda3/envs/py11/lib/python3.11/site-packages/tenacity/_asyncio.py", line 50, in __call__
result = await fn(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/user/cx/qianwen/MetaGPT-main/metagpt/actions/action.py", line 78, in _aask_v1
parsed_data = CustomDecoder(strict=False).decode(content)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/user/cx/qianwen/MetaGPT-main/metagpt/utils/custom_decoder.py", line 297, in decode
return super().decode(s)
^^^^^^^^^^^^^^^^^
File "/home/user/anaconda3/envs/py11/lib/python3.11/json/decoder.py", line 337, in decode
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/user/anaconda3/envs/py11/lib/python3.11/json/decoder.py", line 355, in raw_decode
raise JSONDecodeError("Expecting value", s, err.value) from None
json.decoder.JSONDecodeError: Expecting value: line 1 column 1 (char 0)
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/home/user/cx/qianwen/MetaGPT-main/startup.py", line 72, in <module>
fire.Fire(main)
File "/home/user/anaconda3/envs/py11/lib/python3.11/site-packages/fire/core.py", line 141, in Fire
component_trace = _Fire(component, args, parsed_flag_args, context, name)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/user/anaconda3/envs/py11/lib/python3.11/site-packages/fire/core.py", line 466, in _Fire
component, remaining_args = _CallAndUpdateTrace(
^^^^^^^^^^^^^^^^^^^^
File "/home/user/anaconda3/envs/py11/lib/python3.11/site-packages/fire/core.py", line 681, in _CallAndUpdateTrace
component = fn(*varargs, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^
File "/home/user/cx/qianwen/MetaGPT-main/startup.py", line 68, in main
asyncio.run(startup(idea, investment, n_round, code_review, run_tests, implement))
File "/home/user/anaconda3/envs/py11/lib/python3.11/asyncio/runners.py", line 190, in run
return runner.run(main)
^^^^^^^^^^^^^^^^
File "/home/user/anaconda3/envs/py11/lib/python3.11/asyncio/runners.py", line 118, in run
return self._loop.run_until_complete(task)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/user/anaconda3/envs/py11/lib/python3.11/asyncio/base_events.py", line 653, in run_until_complete
return future.result()
^^^^^^^^^^^^^^^
File "/home/user/cx/qianwen/MetaGPT-main/startup.py", line 47, in startup
await company.run(n_round=n_round)
File "/home/user/cx/qianwen/MetaGPT-main/metagpt/software_company.py", line 60, in run
await self.environment.run()
File "/home/user/cx/qianwen/MetaGPT-main/metagpt/environment.py", line 67, in run
await asyncio.gather(*futures)
File "/home/user/cx/qianwen/MetaGPT-main/metagpt/roles/role.py", line 240, in run
rsp = await self._react()
^^^^^^^^^^^^^^^^^^^
File "/home/user/cx/qianwen/MetaGPT-main/metagpt/roles/role.py", line 209, in _react
return await self._act()
^^^^^^^^^^^^^^^^^
File "/home/user/cx/qianwen/MetaGPT-main/metagpt/roles/role.py", line 168, in _act
response = await self._rc.todo.run(self._rc.important_memory)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/user/cx/qianwen/MetaGPT-main/metagpt/actions/project_management.py", line 184, in run
rsp = await self._aask_v1(prompt, "task", OUTPUT_MAPPING, format=format)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/user/anaconda3/envs/py11/lib/python3.11/site-packages/tenacity/_asyncio.py", line 88, in async_wrapped
return await fn(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/user/anaconda3/envs/py11/lib/python3.11/site-packages/tenacity/_asyncio.py", line 47, in __call__
do = self.iter(retry_state=retry_state)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/user/anaconda3/envs/py11/lib/python3.11/site-packages/tenacity/__init__.py", line 326, in iter
raise retry_exc from fut.exception()
tenacity.RetryError: RetryError[<Future at 0x7faeafc5ffd0 state=finished raised JSONDecodeError>]
| null | https://github.com/geekan/MetaGPT/pull/500 | null | {'base_commit': '02f999204009ae5cf78152a0fc47aa6ac98b4aa2', 'files': [{'path': 'config/config.yaml', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [36, 96]}}}, {'path': 'metagpt/actions/action.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [8, 18]}, "('Action', None, 21)": {'mod': [52]}, "('Action', '_aask_v1', 53)": {'mod': [66, 70, 71, 73, 74, 75, 76, 78, 83]}}}, {'path': 'metagpt/config.py', 'status': 'modified', 'Loc': {"('Config', '__init__', 41)": {'add': [48, 71, 95], 'mod': [51, 52]}}}, {'path': 'metagpt/llm.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [14], 'mod': [9]}, "(None, 'LLM', 18)": {'add': [28], 'mod': [23, 24]}}}, {'path': 'metagpt/roles/role.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [21]}, "('Role', '_think', 185)": {'add': [195]}, "('RoleContext', None, 77)": {'mod': [82, 86]}, "('Role', '_init_actions', 123)": {'mod': [130, 131]}}}, {'path': 'tests/metagpt/utils/test_custom_decoder.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [8, 39, 56]}}}]} | [] | [] | [] | {
"iss_type": "1",
"iss_reason": "3",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"metagpt/llm.py",
"metagpt/actions/action.py",
"metagpt/config.py",
"metagpt/roles/role.py"
],
"doc": [],
"test": [
"tests/metagpt/utils/test_custom_decoder.py"
],
"config": [
"config/config.yaml"
],
"asset": []
} | 1 | |
langflow-ai | langflow | 0d1c2914a4a601217fb59316e4bfd600b57fd655 | https://github.com/langflow-ai/langflow/issues/5438 | bug | Tweaks not passing into Text Input Components via API | ### Bug Description
For example flow with multiple text inputs, passing tweaks via API doesn't pass in the variables correctly to the component and flow.
Flow Json: [Multi-input flow test.json](https://github.com/user-attachments/files/18242439/Multi-input.flow.test.json)
API Request Body:
{
"output_type": "text",
"input_type": "text",
"tweaks": {
"TextInput-fDJCN": {
"input_value": "Elon Musk"
},
"TextInput-GHYWO": {
"input_value": "June 28, 1971"
}
}}
Response: [tweaks-api-response.json](https://github.com/user-attachments/files/18242442/tweaks-api-response.json)
Fails only on dev/local, seems to work on DataStax Langflow/cloud (https://astra.datastax.com/langflow/) with the same flow json and api request body.
### Reproduction
1. Clone/Fork latest code from https://github.com/langflow-ai/langflow or via cli (uv pip install langflow)
2. Run langflow with make init from codebase or cli (uv run langflow run)
3. Create new flow and import Flow Json above which has 2 text input components - make sure to set OpenAI API key so flow runs without failure
4. Invoke flow via API and use tweaks, setting the following tweaks for the text input components input_value: "TextInput-ABC": {
"input_value": "Elon Musk"
},
"TextInput-XYZ": {
"input_value": "June 28, 1971"
}
Expected Results:
AI Response provides a Christmas greeting for Elon and mentions his Birthday
Actual Response:
Prompt asks to provide the name and birthday or uses placeholders
### Expected behavior
AI Response provides a Christmas greeting for Elon and mentions his Birthday
### Who can help?
@italojohnny , @oga
### Operating System
Mac OS 15.2
### Langflow Version
1.1.1
### Python Version
3.11
### Screenshot
Local/Dev API Response:
<img width="872" alt="Screenshot 2024-12-24 at 5 19 39 PM" src="https://github.com/user-attachments/assets/33766f09-5a72-43d4-9962-f83ead9ad303" />
DataStax Langflow / Cloud API Response:
<img width="876" alt="Screenshot 2024-12-24 at 5 19 48 PM" src="https://github.com/user-attachments/assets/a23c07f7-57de-42c4-a5c9-eb0d47381f83" />
### Flow File
[Multi-input flow test.json](https://github.com/user-attachments/files/18242478/Multi-input.flow.test.json)
| null | https://github.com/langflow-ai/langflow/pull/5656 | null | {'base_commit': '0d1c2914a4a601217fb59316e4bfd600b57fd655', 'files': [{'path': 'src/backend/base/langflow/api/v1/endpoints.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [91], 'mod': [11, 30, 45, 47]}, "(None, 'validate_input_and_tweaks', 70)": {'mod': [75, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89]}, "(None, 'simple_run_flow', 93)": {'mod': [101, 102, 113, 114, 115, 116, 117, 118, 119]}}}, {'path': 'src/backend/tests/unit/test_endpoints.py', 'status': 'modified', 'Loc': {"(None, 'test_successful_run_no_payload', 275)": {'mod': [290]}, "(None, 'test_successful_run_with_output_type_text', 303)": {'mod': [321]}, "(None, 'test_successful_run_with_output_type_any', 334)": {'mod': [353]}, "(None, 'test_successful_run_with_output_type_debug', 366)": {'mod': [386]}}}]} | [] | [] | [] | {
"iss_type": "2",
"iss_reason": "1",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"src/backend/base/langflow/api/v1/endpoints.py"
],
"doc": [],
"test": [
"src/backend/tests/unit/test_endpoints.py"
],
"config": [],
"asset": []
} | 1 |
langflow-ai | langflow | b2e40ec92f236043684ac542b9be1c77faa664fe | https://github.com/langflow-ai/langflow/issues/2520 | bug | Langflow not loading all required Environment variable mentioned in LANGFLOW_VARIABLES_TO_GET_FROM_ENVIRONMENT | **Describe the bug**
Trying to load 3 global variables from Environment. only the first variable is loaded and not the second
**Browser and Version**
- Browser: Edge
- Version: 126.0.2592.81
**To Reproduce**
Steps to reproduce the behavior:
1. my .env file: includes the lines:
```python
LANGFLOW_STORE_ENVIRONMENT_VARIABLES = true
LANGFLOW_VARIABLES_TO_GET_FROM_ENVIRONMENT='["default_gcp_project", "default_gcp_location", "default_gcp_dataset"]'
```
2. launch Langflow: `python -m langflow run --components-path /src_backend_platform/ --env-file /env/.env`
3. Only the first variable is loaded in Langflow: `default_gcp_project`
**Screenshots**

| null | https://github.com/langflow-ai/langflow/pull/2971 | null | {'base_commit': 'b2e40ec92f236043684ac542b9be1c77faa664fe', 'files': [{'path': 'src/backend/base/langflow/__main__.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [7, 12]}, "(None, 'run', 78)": {'mod': [130, 134, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 153, 154, 155, 156]}}}, {'path': 'src/backend/base/langflow/services/settings/factory.py', 'status': 'modified', 'Loc': {"('SettingsServiceFactory', None, 5)": {'add': [5]}}}]} | [] | [] | [] | {
"iss_type": "2",
"iss_reason": "1",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"src/backend/base/langflow/__main__.py",
"src/backend/base/langflow/services/settings/factory.py"
],
"doc": [],
"test": [],
"config": [],
"asset": []
} | 1 |
langflow-ai | langflow | 395c2d7372dffcf1d4f9577f623a2966183595d9 | https://github.com/langflow-ai/langflow/issues/1995 | bug | Unable to upload file to folder | **Describe the bug**
Unable to upload file to folder due to key error when selecting `folder_name` from the `flow. I see that a recent commit has made it such that each flow will have a default folder name, if none exists, but this not backwards-compatible with existing flows made in <=v1.0.0a38. Also - I would have expected the payload to send _my_ folder that I want the flow created in, not for it to create a folder in my store.
(EDIT: I see that that recent change was just for making sure the flow was _in_ a folder, but does not export that folder name in the json, so that's an invalid concern. Seems we just may need to pass the selected folder to the /folders/upload endpoint)
https://github.com/langflow-ai/langflow/blob/543e8d52afbb8e64ae22255909f4453484b2bb07/src/backend/base/langflow/api/v1/folders.py#L194
**Browser and Version**
- Browser [e.g. chrome, safari] firefox
- Version [e.g. 22] v1.0.0a38
**To Reproduce**
Steps to reproduce the behavior:
1. pip install
2. run
3. create folder
4. upload a flow
5. see logs
**logs**
```
│ langflow/.venv/lib/python3.11/site-packages/langflow/api/v1/folders.py:209 in upload_file │
│ │
│ 206 │ │
│ 207 │ folder_results = session.exec( │
│ 208 │ │ select(Folder).where( │
│ ❱ 209 │ │ │ Folder.name == data["folder_name"], │
│ 210 │ │ │ Folder.user_id == current_user.id, │
│ 211 │ │ ) │
│ 212 │ ) │
╰──────────────────────────────────────────────────────────
KeyError: 'folder_name'
```
**Fix(?)**
* Pass currently selected folder to payload when uploading
* Use that as the folder name in `/folders/upload`
* Add test for `/folders/upload` | null | https://github.com/langflow-ai/langflow/pull/2125 | null | {'base_commit': '395c2d7372dffcf1d4f9577f623a2966183595d9', 'files': [{'path': 'src/frontend/src/components/sidebarComponent/components/sideBarFolderButtons/index.tsx', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [126]}}}]} | [] | [] | [] | {
"iss_type": "1",
"iss_reason": "3",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"src/frontend/src/components/sidebarComponent/components/sideBarFolderButtons/index.tsx"
],
"doc": [],
"test": [],
"config": [],
"asset": []
} | 1 |
langflow-ai | langflow | 9c69d134c9a4a34865d44e6f37a2c513c3a49969 | https://github.com/langflow-ai/langflow/issues/6880 | bug | Traces not being captured anymore | ### Bug Description
Hello there,
The recent changes on the build process caused `end_all_traces` to no longer being called, as a result the final call to send the traces for langwatch, langfuse etc are not being called:
https://github.com/langflow-ai/langflow/pull/5940#issuecomment-2685617499
### Reproduction
1. Set up LANGWATCH_API_KEY
2. Traces are not arriving to langwatch
3. Add print statements on `end_all_traces`
4. Verify it's not being called
### Expected behavior
`end_all_traces` should be called during executions on the langflow canvas, playground and api
### Who can help?
@edwinjosechittilappilly @ogabrielluiz @italojohnny
### Operating System
Mac OSX
### Langflow Version
15.3
### Python Version
3.11
### Screenshot
_No response_
### Flow File
_No response_ | null | https://github.com/langflow-ai/langflow/pull/6991 | null | {'base_commit': '9c69d134c9a4a34865d44e6f37a2c513c3a49969', 'files': [{'path': 'src/backend/base/langflow/api/build.py', 'status': 'modified', 'Loc': {"(None, 'generate_flow_events', 145)": {'add': [427]}}}]} | [] | [] | [] | {
"iss_type": "2",
"iss_reason": "1",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"src/backend/base/langflow/api/build.py"
],
"doc": [],
"test": [],
"config": [],
"asset": []
} | 1 |
langflow-ai | langflow | b1a552fa9ed7d4c4eabb90642f4b81f24775f676 | https://github.com/langflow-ai/langflow/issues/4212 | bug | Langflow traces are not well reflected in Langfuse | ### Bug Description
Not able to see Langflow components inputs and outputs in Langfuse tracing.
For instance, for a given component, you will see the component python code instead of the input data. And the Output is completely empty.
See screenshot below.
### Reproduction
1. create a simple flow with chat input, prompt, and a chat output! flow example [reproduce _error_langfuse (1).json](https://github.com/user-attachments/files/17446337/reproduce._error_langfuse.1.json)
2. connect to Langfuse, and see trace.
3. notice that components input are tracking the python code of the component and not the data inputted
4. notice that the component output is empty
see screenshow:

### Expected behavior
I'm expecting to see input data (not the python code) and output data of each components.
### Who can help?
@italojohnny
### Operating System
Ubuntu
### Langflow Version
1.0.19
### Python Version
3.10
### Screenshot

### Flow File
[reproduce _error_langfuse (1).json](https://github.com/user-attachments/files/17446337/reproduce._error_langfuse.1.json)
| null | https://github.com/langflow-ai/langflow/pull/4669 | null | {'base_commit': 'b1a552fa9ed7d4c4eabb90642f4b81f24775f676', 'files': [{'path': 'src/backend/base/langflow/api/utils.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [144], 'mod': [20]}, "(None, 'build_graph_from_data', 145)": {'add': [146]}}}, {'path': 'src/backend/base/langflow/api/v1/chat.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [13], 'mod': [45]}, "(None, 'build_graph_and_get_order', 161)": {'mod': [169]}}}, {'path': 'src/backend/base/langflow/custom/custom_component/component.py', 'status': 'modified', 'Loc': {"('Component', 'get_trace_as_inputs', 798)": {'mod': [804, 805, 806]}}}, {'path': 'src/backend/base/langflow/services/tracing/langwatch.py', 'status': 'modified', 'Loc': {"('LangWatchTracer', '__init__', 26)": {'add': [43]}}}, {'path': 'src/backend/base/langflow/services/tracing/service.py', 'status': 'modified', 'Loc': {"('TracingService', '_end_traces', 174)": {'add': [186]}, "('TracingService', '_end_all_traces', 188)": {'add': [194]}, "('TracingService', 'end', 196)": {'mod': [198]}, "('TracingService', '_end_and_reset', 235)": {'mod': [239]}}}, {'path': 'src/backend/tests/unit/events/test_event_manager.py', 'status': 'modified', 'Loc': {"('TestEventManager', None, 11)": {'mod': [39, 40, 41, 42, 44, 45, 46, 47, 48, 49, 50, 51, 52]}, "('TestEventManager', 'test_handling_large_number_of_events', 72)": {'mod': [73]}, "('TestEventManager', 'test_performance_impact_frequent_registrations', 136)": {'mod': [137]}}}]} | [] | [] | [] | {
"iss_type": "2",
"iss_reason": "1",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"src/backend/base/langflow/api/v1/chat.py",
"src/backend/base/langflow/api/utils.py",
"src/backend/base/langflow/services/tracing/langwatch.py",
"src/backend/base/langflow/custom/custom_component/component.py",
"src/backend/base/langflow/services/tracing/service.py"
],
"doc": [],
"test": [
"src/backend/tests/unit/events/test_event_manager.py"
],
"config": [],
"asset": []
} | 1 |
langflow-ai | langflow | 06ea6c408bd5da392aa3650f9d04be0804742525 | https://github.com/langflow-ai/langflow/issues/1890 | bug | getting a column size error when using MySQL. | **Describe the bug**
I'm getting a column size error when using MySQL.
It seems to executed when initial setup starter_projects
```
sqlalchemy.exc.DataError: (raised as a result of Query-invoked autoflush; consider using a session.no_autoflush block if this flush is occurring prematurely)
(pymysql.err.DataError) (1406, "Data too long for column 'description' at row 1")
[SQL: INSERT INTO flow (name, description, icon, icon_bg_color, is_component, updated_at, folder, id, data, user_id) VALUES (%(name)s, %(description)s, %(icon)s,
%(icon_bg_color)s, %(is_component)s, %(updated_at)s, %(folder)s, %(id)s, %(data)s, %(user_id)s)]
[parameters: {'name': 'Basic Prompting (Hello, World)', 'description': 'This flow will get you experimenting with the basics of the UI, the Chat and the Prompt component.
\n\nTry changing the Template in it to see how the ... (26 characters truncated) ... change it to this and a Text Input into the `type_of_person` variable : "Answer the
user as if you were a pirate.\n\nUser: {user_input}\n\nAnswer: " ', 'icon': '', 'icon_bg_color': None, 'is_component': 0, 'updated_at': datetime.datetime(2024, 5, 14, 9,
36, 35, 63929, tzinfo=datetime.timezone.utc), 'folder': 'Starter Projects', 'id': '5d216c873f0f4c5a98ec1c85438a90f4', 'data': '{"nodes": [{"id": "Prompt-uxBqP", "type":
"genericNode", "position": {"x": 53.588791333410654, "y": -107.07318910019967}, "data": {"type": "Prompt", ... (24099 characters truncated) ...
153Text\\u0153],\\u0153type\\u0153:\\u0153str\\u0153}"}], "viewport": {"x": 260.58251815500563, "y": 318.2261172111936, "zoom": 0.43514115784696294}}', 'user_id': None}]
(Background on this error at: https://sqlalche.me/e/20/9h9h)
```
**Screenshots**
**Additional context**
plz help
| null | https://github.com/langflow-ai/langflow/pull/3431 | null | {'base_commit': '06ea6c408bd5da392aa3650f9d04be0804742525', 'files': [{'path': 'src/backend/base/langflow/services/database/models/flow/model.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [13]}, "('FlowBase', None, 26)": {'mod': [28]}}}, {'path': 'src/backend/base/langflow/services/database/models/folder/model.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [4, 5]}, "('FolderBase', None, 14)": {'mod': [16]}}}]} | [] | [] | [] | {
"iss_type": "1",
"iss_reason": "1",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"src/backend/base/langflow/services/database/models/flow/model.py",
"src/backend/base/langflow/services/database/models/folder/model.py"
],
"doc": [],
"test": [],
"config": [],
"asset": []
} | 1 |
Significant-Gravitas | AutoGPT | ee8276c6b9ac8abcf847a12bc5e6cb5e66079115 | https://github.com/Significant-Gravitas/AutoGPT/issues/655 | Trivial focused PRs | ### Duplicates
- [X] I have searched the existing issues
### Summary 💡
There are many trivial PRs which are harmless and focused enough, so it's good to make them merged in a batch to reduce the backlog. These are ALL minor mergeable fixes at the moment (2023-04-10T10:57Z)
### Examples 🌈
- #33 - The oldest open PR. Now made focused, only adds documentation strings, with very minor extra whitespace
- #115 - Better error messages if OpenAI keys are missing from the config. A single check at the startup. Safe and focused.
- #126 - Just a typo in Readme. 3 chars diff, 1 line
- #179 - Windows beginners don't like $ in the commandline example. 2 chars 1 line diff.
- #226 - Just a single COPY Dockerfile instruction people repeat in PRs over and over. 1 line added diff
- #317 - A trivial improvement of the Readme.
- #338 - A trivial safety warning in the Readme. 1 line diff
- #378 - A trivial typo (and the file ending change is ok, some editors correctly keep fixing it)
- #421 - Documentation for 2 classes. Minor extra whitespace fixes, can be merged IMO
- #457 - A pretty small addition of the debug option
- #579 - A trivial link mistake. 5 char diff
- #590 - The whitespace fixes everybody keeps repeating.
- #611 - A single line addition to .gitignore everybody keeps repeating
- #615 - A single line 7 character fix to Windows setx invocation in the Readme
- #649 - A minor fixup to already merged #575. 10 chars on one line.
### Motivation 🔦
We need to improve the backlog by merging as many PRs a day as possible. Many PRs were bad but thanks to our efforts are pinpointed now. Also, big changes tend to include these small fixes, so by applying the small ones we will improve the big ones too. | null | https://github.com/Significant-Gravitas/AutoGPT/pull/33 | null | {'base_commit': 'da6c0240de37725780f59eb8da7c36a9e810ae5c', 'files': [{'path': 'scripts/agent_manager.py', 'status': 'modified', 'Loc': {"(None, 'create_agent', 9)": {'add': [9]}, "(None, 'message_agent', 34)": {'add': [34]}, "(None, 'list_agents', 54)": {'add': [54]}, "(None, 'delete_agent', 61)": {'add': [61]}}}, {'path': 'scripts/ai_config.py', 'status': 'modified', 'Loc': {"('AIConfig', None, 5)": {'add': [5, 6, 29, 34]}, "('AIConfig', 'load', 15)": {'mod': [16]}}}, {'path': 'scripts/ai_functions.py', 'status': 'modified', 'Loc': {"(None, 'evaluate_code', 10)": {'add': [10]}, "(None, 'improve_code', 22)": {'add': [22]}, "(None, 'write_tests', 36)": {'add': [36]}}}, {'path': 'scripts/browse.py', 'status': 'modified', 'Loc': {"(None, 'scrape_text', 8)": {'add': [8]}, "(None, 'extract_hyperlinks', 35)": {'add': [35]}, "(None, 'format_hyperlinks', 42)": {'add': [42]}, "(None, 'scrape_links', 49)": {'add': [49]}, "(None, 'split_text', 66)": {'add': [66]}, "(None, 'create_message', 84)": {'add': [84]}, "(None, 'summarize_text', 90)": {'add': [90]}}}, {'path': 'scripts/call_ai_function.py', 'status': 'modified', 'Loc': {"(None, 'call_ai_function', 8)": {'add': [8]}}}, {'path': 'scripts/chat.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [10], 'mod': [7]}, "(None, 'chat_with_ai', 44)": {'add': [50]}}}, {'path': 'scripts/commands.py', 'status': 'modified', 'Loc': {"(None, 'get_command', 27)": {'add': [27]}, "(None, 'execute_command', 55)": {'add': [55]}, "(None, 'get_datetime', 120)": {'add': [120]}, "(None, 'google_search', 125)": {'add': [125]}, "(None, 'google_official_search', 132)": {'add': [132]}, "(None, 'browse_website', 167)": {'add': [167]}, "(None, 'get_text_summary', 180)": {'add': [180]}, "(None, 'get_hyperlinks', 186)": {'add': [186]}, "(None, 'commit_memory', 191)": {'add': [191]}, "(None, 'delete_memory', 197)": {'add': [197]}, "(None, 'overwrite_memory', 208)": {'add': [208]}, "(None, 'shutdown', 234)": {'add': [234]}, "(None, 'start_agent', 239)": {'add': [239]}, "(None, 'message_agent', 262)": {'add': [262]}, "(None, 'list_agents', 280)": {'add': [280]}, "(None, 'delete_agent', 284)": {'add': [284]}}}, {'path': 'scripts/config.py', 'status': 'modified', 'Loc': {"('Singleton', None, 9)": {'add': [16]}, "('Config', None, 28)": {'add': [33, 79, 82, 85, 88, 91, 94, 97, 100, 103, 106, 109, 112, 115]}}}, {'path': 'scripts/data.py', 'status': 'modified', 'Loc': {"(None, 'load_prompt', 4)": {'add': [4]}}}, {'path': 'scripts/execute_code.py', 'status': 'modified', 'Loc': {"(None, 'execute_python_file', 5)": {'add': [5]}}}, {'path': 'scripts/file_operations.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [6]}, "(None, 'safe_join', 11)": {'add': [11]}, "(None, 'read_file', 21)": {'add': [21]}, "(None, 'write_to_file', 31)": {'add': [31]}, "(None, 'append_to_file', 44)": {'add': [44]}, "(None, 'delete_file', 54)": {'add': [54]}}}, {'path': 'scripts/json_parser.py', 'status': 'modified', 'Loc': {"(None, 'fix_and_parse_json', 29)": {'add': [32]}, "(None, 'fix_json', 74)": {'add': [74]}}}, {'path': 'scripts/llm_utils.py', 'status': 'modified', 'Loc': {"(None, 'create_chat_completion', 8)": {'add': [8]}}}, {'path': 'scripts/main.py', 'status': 'modified', 'Loc': {"(None, 'print_to_console', 21)": {'add': [27]}, "(None, 'print_assistant_thoughts', 48)": {'add': [48]}, "(None, 'construct_prompt', 161)": {'add': [161]}, "(None, 'prompt_user', 189)": {'add': [189]}, "(None, 'parse_arguments', 241)": {'add': [241]}, "(None, 'load_variables', 107)": {'mod': [108]}}}, {'path': 'scripts/speak.py', 'status': 'modified', 'Loc': {"(None, 'eleven_labs_speech', 17)": {'add': [17]}}}, {'path': 'scripts/spinner.py', 'status': 'modified', 'Loc': {"('Spinner', None, 7)": {'add': [7, 8, 15, 22, 27]}}}]} | [] | [] | [] | {
"iss_type": "4",
"iss_reason": "2",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"scripts/llm_utils.py",
"scripts/file_operations.py",
"scripts/commands.py",
"scripts/data.py",
"scripts/speak.py",
"scripts/json_parser.py",
"scripts/config.py",
"scripts/main.py",
"scripts/call_ai_function.py",
"scripts/ai_functions.py",
"scripts/chat.py",
"scripts/spinner.py",
"scripts/execute_code.py",
"scripts/ai_config.py",
"scripts/agent_manager.py",
"scripts/browse.py"
],
"doc": [],
"test": [],
"config": [],
"asset": []
} | 1 | |
Significant-Gravitas | AutoGPT | 9150f32f8b8602395534795ddd2d930a1684e419 | https://github.com/Significant-Gravitas/AutoGPT/issues/4634 | Ver 0.4, and still get the error of "This model's maximum context length is 4097 tokens" | ### ⚠️ Search for existing issues first ⚠️
- [X] I have searched the existing issues, and there is no existing issue for my problem
### Which Operating System are you using?
Windows
### Which version of Auto-GPT are you using?
Latest Release
### Do you use OpenAI GPT-3 or GPT-4?
GPT-3.5
### Which area covers your issue best?
Commands
### Describe your issue.
AutoGPT crashes in the same scenario as v0.3.1 when working with "large" local files.
### Upload Activity Log Content
_No response_
### Upload Error Log Content
Traceback (most recent call last): File "/usr/local/lib/python3.10/runpy.py", line 196, in _run_module_as_main return _run_code(code, main_globals, None, File "/usr/local/lib/python3.10/runpy.py", line 86, in _run_code exec(code, run_globals) File "/workspace/Auto-GPT/autogpt/__main__.py", line 5, in <module> autogpt.cli.main() File "/home/vscode/.local/lib/python3.10/site-packages/click/core.py", line 1130, in __call__ return self.main(*args, **kwargs) File "/home/vscode/.local/lib/python3.10/site-packages/click/core.py", line 1055, in main rv = self.invoke(ctx) File "/home/vscode/.local/lib/python3.10/site-packages/click/core.py", line 1635, in invoke rv = super().invoke(ctx) File "/home/vscode/.local/lib/python3.10/site-packages/click/core.py", line 1404, in invoke return ctx.invoke(self.callback, **ctx.params) File "/home/vscode/.local/lib/python3.10/site-packages/click/core.py", line 760, in invoke return __callback(*args, **kwargs) File "/home/vscode/.local/lib/python3.10/site-packages/click/decorators.py", line 26, in new_func return f(get_current_context(), *args, **kwargs) File "/workspace/Auto-GPT/autogpt/cli.py", line 96, in main run_auto_gpt( File "/workspace/Auto-GPT/autogpt/main.py", line 197, in run_auto_gpt agent.start_interaction_loop() File "/workspace/Auto-GPT/autogpt/agent/agent.py", line 130, in start_interaction_loop assistant_reply = chat_with_ai( File "/workspace/Auto-GPT/autogpt/llm/chat.py", line 112, in chat_with_ai new_summary_message, trimmed_messages = agent.history.trim_messages( File "/workspace/Auto-GPT/autogpt/memory/message_history.py", line 79, in trim_messages new_summary_message = self.update_running_summary( File "/workspace/Auto-GPT/autogpt/memory/message_history.py", line 194, in update_running_summary self.summary = create_chat_completion(prompt) File "/workspace/Auto-GPT/autogpt/llm/utils/__init__.py", line 53, in metered_func return func(*args, **kwargs) File "/workspace/Auto-GPT/autogpt/llm/utils/__init__.py", line 87, in _wrapped return func(*args, **kwargs) File "/workspace/Auto-GPT/autogpt/llm/utils/__init__.py", line 235, in create_chat_completion response = api_manager.create_chat_completion( File "/workspace/Auto-GPT/autogpt/llm/api_manager.py", line 61, in create_chat_completion response = openai.ChatCompletion.create( File "/home/vscode/.local/lib/python3.10/site-packages/openai/api_resources/chat_completion.py", line 25, in create return super().create(*args, **kwargs) File "/home/vscode/.local/lib/python3.10/site-packages/openai/api_resources/abstract/engine_api_resource.py", line 153, in create response, _, api_key = requestor.request( File "/home/vscode/.local/lib/python3.10/site-packages/openai/api_requestor.py", line 226, in request resp, got_stream = self._interpret_response(result, stream) File "/home/vscode/.local/lib/python3.10/site-packages/openai/api_requestor.py", line 619, in _interpret_response self._interpret_response_line( File "/home/vscode/.local/lib/python3.10/site-packages/openai/api_requestor.py", line 682, in _interpret_response_line raise self.handle_error_response( openai.error.InvalidRequestError: This model's maximum context length is 4097 tokens. However, your messages resulted in 4904 tokens. Please reduce the length of the messages. Press any key to continue... | null | https://github.com/Significant-Gravitas/AutoGPT/pull/4652 | null | {'base_commit': '9150f32f8b8602395534795ddd2d930a1684e419', 'files': [{'path': 'autogpt/memory/message_history.py', 'status': 'modified', 'Loc': {"('MessageHistory', 'update_running_summary', 123)": {'add': [169], 'mod': [172, 179, 180, 181, 182, 183, 204]}, '(None, None, None)': {'mod': [17]}}}]} | [] | [] | [] | {
"iss_type": "1",
"iss_reason": "2",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"autogpt/memory/message_history.py"
],
"doc": [],
"test": [],
"config": [],
"asset": []
} | 1 | |
Significant-Gravitas | AutoGPT | a0ecb969589ac5f5172fb543190ca7ecf4803059 | https://github.com/Significant-Gravitas/AutoGPT/issues/8887 | good first issue | Move to a single source of truth for docs — Remove duplicate info from readme | autogpt_platform\\backend\\README.advanced.md and autogpt_platform\\backend\\README.md. We should just point people to the docs directory (docs/platform/advanced_setup|getting-started) in these. Check the content is all in that file, and the normal getting started, then remove these two files and replace with a link to the docs site — the dev-docs.agpt.co and docs.agpt.co. Call out both and their branch match for released master vs dev branch | null | https://github.com/Significant-Gravitas/AutoGPT/pull/9580 | null | {'base_commit': 'a0ecb969589ac5f5172fb543190ca7ecf4803059', 'files': [{'path': 'autogpt_platform/backend/README.advanced.md', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [1, 3, 5, 7, 9, 10, 11, 12, 14, 15, 16, 17, 19, 21, 22, 23, 25, 27, 28, 29, 31, 33, 34, 35, 37, 39, 40, 41, 44, 45, 46, 47, 48, 49, 50, 51, 53, 55, 56, 57, 58, 60, 62, 63, 64, 65, 67, 69, 71, 73, 74, 75]}}}, {'path': 'autogpt_platform/backend/README.md', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [1, 3, 4, 6, 8, 10, 12, 14, 15, 16, 17, 19, 20, 21, 22, 24, 26, 27, 28, 30, 32, 33, 34, 36, 38, 39, 40, 42, 44, 45, 46, 49, 50, 51, 52, 53, 54, 55, 56, 58, 60, 61, 62, 63, 65, 67, 69, 71, 72, 73, 75, 77, 78, 79, 80, 81, 83, 85, 87, 88, 89, 91, 93, 94, 95, 97, 99, 100, 101, 103, 105, 106, 107, 109, 111, 112, 113, 115, 117, 119, 120, 121, 123, 125, 126, 128, 129, 130, 131, 133, 134, 135, 136, 138, 139, 140, 141, 143, 145, 147, 149, 151, 153, 154, 155, 156, 157, 158, 159, 161, 163, 164, 165, 166, 168, 170, 171, 172, 174, 176, 177, 178, 179, 181, 183, 185, 186, 187, 189, 190, 192, 195, 197, 198, 199, 201, 203, 204, 205, 206, 207, 208, 209, 210]}}}, {'path': 'docs/content/platform/advanced_setup.md', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [66]}}}, {'path': 'docs/content/platform/getting-started.md', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [63, 148], 'mod': [26, 45, 132]}}}]} | [] | [] | [] | {
"iss_type": "4",
"iss_reason": "2",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [],
"doc": [
"autogpt_platform/backend/README.md",
"docs/content/platform/getting-started.md",
"autogpt_platform/backend/README.advanced.md",
"docs/content/platform/advanced_setup.md"
],
"test": [],
"config": [],
"asset": []
} | 1 |
Significant-Gravitas | AutoGPT | f9d8f728fa3c60dba80cc1c69dfef8bf748eaec4 | https://github.com/Significant-Gravitas/AutoGPT/issues/711 | Not creating pinecone index | ### Duplicates
- [X] I have searched the existing issues
### Steps to reproduce 🕹
I by mistake deleted my pinecone index. When i fire up auto-gpt it does not make me a new one but functions correctly. I have tried starting fresh, but same issue. Works fine but is not triggering pinecone.
### Current behavior 😯
i download a fresh copy and fill in the .env and it starts but does not create a pinecone index.
### Expected behavior 🤔
Every other time ive loaded it it creates the index and then communicates with it.
### Your prompt 📝
```yaml
# Paste your prompt here
``` | null | https://github.com/Significant-Gravitas/AutoGPT/pull/794 | null | {'base_commit': 'f9d8f728fa3c60dba80cc1c69dfef8bf748eaec4', 'files': [{'path': '.env.template', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [19]}}}, {'path': 'README.md', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [232]}}}]} | [] | [] | [] | {
"iss_type": "2",
"iss_reason": "2",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [],
"doc": [
"README.md"
],
"test": [],
"config": [
".env.template"
],
"asset": []
} | 1 | |
Significant-Gravitas | AutoGPT | c6d90227fecec8acc1481c486a91337b07e8a820 | https://github.com/Significant-Gravitas/AutoGPT/issues/402 | Pinecone Error | ### Duplicates
- [X] I have searched the existing issues
### Steps to reproduce 🕹
`
Traceback (most recent call last):
File "scripts/main.py", line 286, in <module>
memory = PineconeMemory()
File "/Users/areebpasha/Desktop/Auto GPT copy/Auto-GPT/scripts/config.py", line 17, in __call__
cls._instances[cls] = super(
File "/Users/areebpasha/Desktop/Auto GPT copy/Auto-GPT/scripts/memory.py", line 30, in __init__
if table_name not in pinecone.list_indexes():
File "/opt/anaconda3/lib/python3.8/site-packages/pinecone/manage.py", line 185, in list_indexes
response = api_instance.list_indexes()
File "/opt/anaconda3/lib/python3.8/site-packages/pinecone/core/client/api_client.py", line 776, in __call__
return self.callable(self, *args, **kwargs)
File "/opt/anaconda3/lib/python3.8/site-packages/pinecone/core/client/api/index_operations_api.py", line 1132, in __list_indexes
return self.call_with_http_info(**kwargs)
File "/opt/anaconda3/lib/python3.8/site-packages/pinecone/core/client/api_client.py", line 838, in call_with_http_info
return self.api_client.call_api(
File "/opt/anaconda3/lib/python3.8/site-packages/pinecone/core/client/api_client.py", line 413, in call_api
return self.__call_api(resource_path, method,
File "/opt/anaconda3/lib/python3.8/site-packages/pinecone/core/client/api_client.py", line 200, in __call_api
response_data = self.request(
File "/opt/anaconda3/lib/python3.8/site-packages/pinecone/core/client/api_client.py", line 439, in request
return self.rest_client.GET(url,
File "/opt/anaconda3/lib/python3.8/site-packages/pinecone/core/client/rest.py", line 236, in GET
return self.request("GET", url,
File "/opt/anaconda3/lib/python3.8/site-packages/pinecone/core/client/rest.py", line 202, in request
r = self.pool_manager.request(method, url,
File "/opt/anaconda3/lib/python3.8/site-packages/urllib3/request.py", line 74, in request
return self.request_encode_url(
File "/opt/anaconda3/lib/python3.8/site-packages/urllib3/request.py", line 96, in request_encode_url
return self.urlopen(method, url, **extra_kw)
File "/opt/anaconda3/lib/python3.8/site-packages/urllib3/poolmanager.py", line 376, in urlopen
response = conn.urlopen(method, u.request_uri, **kw)
File "/opt/anaconda3/lib/python3.8/site-packages/urllib3/connectionpool.py", line 703, in urlopen
httplib_response = self._make_request(
File "/opt/anaconda3/lib/python3.8/site-packages/urllib3/connectionpool.py", line 398, in _make_request
conn.request(method, url, **httplib_request_kw)
File "/opt/anaconda3/lib/python3.8/site-packages/urllib3/connection.py", line 239, in request
super(HTTPConnection, self).request(method, url, body=body, headers=headers)
File "/opt/anaconda3/lib/python3.8/http/client.py", line 1255, in request
self._send_request(method, url, body, headers, encode_chunked)
File "/opt/anaconda3/lib/python3.8/http/client.py", line 1296, in _send_request
self.putheader(hdr, value)
File "/opt/anaconda3/lib/python3.8/site-packages/urllib3/connection.py", line 224, in putheader
_HTTPConnection.putheader(self, header, *values)
File "/opt/anaconda3/lib/python3.8/http/client.py", line 1232, in putheader
if _is_illegal_header_value(values[i]):
TypeError: expected string or bytes-like object
`
### Current behavior 😯
Does not produce output.
### Expected behavior 🤔
Should work as shown in the demo. Any assistance is greatly appreciated.
### Your prompt 📝
```yaml
# Paste your prompt here
``` | null | https://github.com/Significant-Gravitas/AutoGPT/pull/440 | null | {'base_commit': 'c6d90227fecec8acc1481c486a91337b07e8a820', 'files': [{'path': 'README.md', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [61]}}}, {'path': 'scripts/main.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [283]}}}]} | [] | [] | [] | {
"iss_type": "1",
"iss_reason": "1",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"scripts/main.py"
],
"doc": [
"README.md"
],
"test": [],
"config": [],
"asset": []
} | 1 | |
Significant-Gravitas | AutoGPT | ad7cefa10c0647feee85114d58559fcf83ba6743 | https://github.com/Significant-Gravitas/AutoGPT/issues/1821 | SYSTEM: Command browse_website returned: Error: Message: Service /home/appuser/.wdm/drivers/chromedriver/linux64/112.0.5615.49/chromedriver unexpectedly exited. Status code was: 127 | ### Duplicates
- [X] I have searched the existing issues
### Steps to reproduce 🕹
1. Build a dockerized version of aouto-gpt
2. Give it any instruction that leads to it needing to browse_website
### Current behavior 😯
SYSTEM: Command browse_website returned: Error: Message: Service /home/appuser/.wdm/drivers/chromedriver/linux64/112.0.5615.49/chromedriver unexpectedly exited. Status code was: 127
### Expected behavior 🤔
Should be bale to browse website properly
### Your prompt 📝
```yaml
# It really could be any prompt
```
| null | https://github.com/Significant-Gravitas/AutoGPT/pull/1857 | null | {'base_commit': 'ad7cefa10c0647feee85114d58559fcf83ba6743', 'files': [{'path': 'Dockerfile', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [6]}}}]} | [] | [] | [] | {
"iss_type": "1",
"iss_reason": "1",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [],
"doc": [],
"test": [],
"config": [
"Dockerfile"
],
"asset": []
} | 1 | |
fastapi | fastapi | 04494991884d1eee3e111349cff5d98f37830522 | https://github.com/fastapi/fastapi/issues/119 | question
question-migrate | Enriching the auto-generated OpenAPI-Spec | **Description**
Hi there, new to FastAPI so sorry if this question has been asked elsewhere:
What is the best way to enrich the auto-generated OpenAPI-Spec generated by FastAPI. It currently seems to support only a few things (like changing the title or description), but if I would want to add tags to group my endpoints or do sth more fancy like adding a logo (which is supported by ReDoc via x-logo) it seems to me that I would want to use FastAPI to generate the base specification and then add some scripts that will enrich this spec in a structured way?
Has anyone encountered this issue before?
**Additional context**
I already looked into the code a little bit and it seems that the ReDoc that gets shown when serving the API is generated on the fly (as is the OpenAPI schema) => this seems to make a custom processing of the specification to make use of cool ReDoc features harder
| null | https://github.com/fastapi/fastapi/pull/126 | null | {'base_commit': '04494991884d1eee3e111349cff5d98f37830522', 'files': [{'path': 'mkdocs.yml', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [69]}}}]} | [] | [] | [] | {
"iss_type": "3",
"iss_reason": "2",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [],
"doc": [
"mkdocs.yml"
],
"test": [],
"config": [],
"asset": []
} | 1 |
fastapi | fastapi | 42a4ed7a1804f631f971d05f3302d54361ebe10e | https://github.com/fastapi/fastapi/issues/3910 | question
reviewed
question-migrate | Would be nice to be able to route request using header's Accept field (or generic header's field) | ### First Check
- [X] I added a very descriptive title to this issue.
- [X] I used the GitHub search to find a similar issue and didn't find it.
- [X] I searched the FastAPI documentation, with the integrated search.
- [X] I already searched in Google "How to X in FastAPI" and didn't find any information.
- [X] I already read and followed all the tutorial in the docs and didn't find an answer.
- [X] I already checked if it is not related to FastAPI but to [Pydantic](https://github.com/samuelcolvin/pydantic).
- [X] I already checked if it is not related to FastAPI but to [Swagger UI](https://github.com/swagger-api/swagger-ui).
- [X] I already checked if it is not related to FastAPI but to [ReDoc](https://github.com/Redocly/redoc).
### Commit to Help
- [X] I commit to help with one of those options 👆
### Example Code
```python
N.A.
```
### Description
In some cases would be nice to specify header's field as routing rules.
One important example is to support API versioning based on Header's Accept field
### Wanted Solution
Ability to specify some header's fields in the `.get()`, `.post()` .... decorators
### Wanted Code
```python
from fastapi import FastAPI
app = FastAPI()
@app.get("/", accept="application/json;version=1.0")
async def root():
return {"message": "Hello World v1.0"}
@app.get("/", accept="application/json;version=1.1")
async def root():
return {"message": "Hello World v1.1"}
```
### Alternatives
from fastapi import FastAPI
app = FastAPI()
@app.get("/", headers={"accept": "application/json;version=1.0"})
async def root():
return {"message": "Hello World v1.0"}
@app.get("/", headers={"accept": "application/json;version=1.1"})
async def root():
return {"message": "Hello World v1.1"}
### Operating System
macOS
### Operating System Details
_No response_
### FastAPI Version
python -c "import fastapi; print(fastapi.__version__)"
### Python Version
Python 3.9.7
### Additional Context
_No response_ | null | https://github.com/fastapi/fastapi/pull/4727 | null | {'base_commit': '42a4ed7a1804f631f971d05f3302d54361ebe10e', 'files': [{'path': 'fastapi/openapi/utils.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [2], 'mod': [5]}, "(None, 'get_openapi', 393)": {'add': [448], 'mod': [434]}}}]} | [] | [] | [] | {
"iss_type": "4",
"iss_reason": "3",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"fastapi/openapi/utils.py"
],
"doc": [],
"test": [],
"config": [],
"asset": []
} | 1 |
fastapi | fastapi | ea8d7f689efcb0ddf28f4686fa3be90c2154503b | https://github.com/fastapi/fastapi/issues/1628 | feature
answered
reviewed | pass method return value as positional parameters to Response | Recently I've noticed that `status_code` parameter of the next statement is useless:
```python3
@router.get('/callback/{provider}', status_code=303,
response_description='Redirect to the application login',
response_class=RedirectResponse)
```
Because I always have to create `RedirectResponse` objects manually:
```python3
return RedirectResponse(target.include_query_params(error=f'{provider}_{error_reason}'),
status_code=303)
```
I've tried to play with return values and noticed that return value is always passed to `response_class` instance as a `content` parameter:
```python3
@app.get('/test', status_code=303, response_class=RedirectResponse)
async def test():
return '/'
```
this snippet produces the next exception:
```
INFO: 127.0.0.1:43878 - "GET /test HTTP/1.1" 500 Internal Server Error
ERROR: Exception in ASGI application
Traceback (most recent call last):
File "/home/dmig/.pyenv/versions/fastapi-sandbox/lib/python3.8/site-packages/uvicorn/protocols/http/httptools_impl.py", line 385, in run_asgi
result = await app(self.scope, self.receive, self.send)
File "/home/dmig/.pyenv/versions/fastapi-sandbox/lib/python3.8/site-packages/uvicorn/middleware/proxy_headers.py", line 45, in __call__
return await self.app(scope, receive, send)
File "/home/dmig/.pyenv/versions/fastapi-sandbox/lib/python3.8/site-packages/fastapi/applications.py", line 171, in __call__
await super().__call__(scope, receive, send)
File "/home/dmig/.pyenv/versions/fastapi-sandbox/lib/python3.8/site-packages/starlette/applications.py", line 102, in __call__
await self.middleware_stack(scope, receive, send)
File "/home/dmig/.pyenv/versions/fastapi-sandbox/lib/python3.8/site-packages/starlette/middleware/errors.py", line 181, in __call__
raise exc from None
File "/home/dmig/.pyenv/versions/fastapi-sandbox/lib/python3.8/site-packages/starlette/middleware/errors.py", line 159, in __call__
await self.app(scope, receive, _send)
File "/home/dmig/.pyenv/versions/fastapi-sandbox/lib/python3.8/site-packages/starlette/exceptions.py", line 82, in __call__
raise exc from None
File "/home/dmig/.pyenv/versions/fastapi-sandbox/lib/python3.8/site-packages/starlette/exceptions.py", line 71, in __call__
await self.app(scope, receive, sender)
File "/home/dmig/.pyenv/versions/fastapi-sandbox/lib/python3.8/site-packages/starlette/routing.py", line 550, in __call__
await route.handle(scope, receive, send)
File "/home/dmig/.pyenv/versions/fastapi-sandbox/lib/python3.8/site-packages/starlette/routing.py", line 227, in handle
await self.app(scope, receive, send)
File "/home/dmig/.pyenv/versions/fastapi-sandbox/lib/python3.8/site-packages/starlette/routing.py", line 41, in app
response = await func(request)
File "/home/dmig/.pyenv/versions/fastapi-sandbox/lib/python3.8/site-packages/fastapi/routing.py", line 217, in app
response = response_class(
TypeError: __init__() got an unexpected keyword argument 'content'
```
I didn't dive into the code yet, so this is not a PR but a request. But the idea is simple:
```python3
response = response_class(*(endpoint_result if isinstance(endpoint_result, (Tuple, List)) else tuple(endpoint_result)))
```
Or maybe even more complex logic: pass `**endpoint_result` if `endpoint_result` is a `Mapping`, pass `*endpoint_result` if it is an `Iterable` or else pass it as a `*tuple(endpoint_result)` | null | https://github.com/fastapi/fastapi/pull/3457 | null | {'base_commit': 'ea8d7f689efcb0ddf28f4686fa3be90c2154503b', 'files': [{'path': 'docs/en/docs/advanced/custom-response.md', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [163, 167, 205]}}}, {'path': 'docs_src/custom_response/tutorial006.py', 'status': 'modified', 'Loc': {"(None, 'read_typer', 8)": {'mod': [8]}}}, {'path': 'fastapi/applications.py', 'status': 'modified', 'Loc': {"('FastAPI', 'add_api_route', 203)": {'mod': [209]}, "('FastAPI', 'api_route', 256)": {'mod': [261]}, "('FastAPI', 'get', 349)": {'mod': [354]}, "('FastAPI', 'put', 398)": {'mod': [403]}, "('FastAPI', 'post', 447)": {'mod': [452]}, "('FastAPI', 'delete', 496)": {'mod': [501]}, "('FastAPI', 'options', 545)": {'mod': [550]}, "('FastAPI', 'head', 594)": {'mod': [599]}, "('FastAPI', 'patch', 643)": {'mod': [648]}, "('FastAPI', 'trace', 692)": {'mod': [697]}}}, {'path': 'fastapi/openapi/utils.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [1]}, "(None, 'get_openapi_path', 168)": {'mod': [221]}}}, {'path': 'fastapi/routing.py', 'status': 'modified', 'Loc': {"(None, 'get_request_handler', 154)": {'mod': [157]}, "(None, 'app', 176)": {'mod': [235, 236, 237, 238, 239]}, "('APIRoute', '__init__', 290)": {'mod': [296]}, "('APIRouter', 'add_api_route', 466)": {'mod': [472]}, "('APIRouter', 'api_route', 539)": {'mod': [544]}, "('APIRouter', 'get', 717)": {'mod': [722]}, "('APIRouter', 'put', 767)": {'mod': [772]}, "('APIRouter', 'post', 817)": {'mod': [822]}, "('APIRouter', 'delete', 867)": {'mod': [872]}, "('APIRouter', 'options', 917)": {'mod': [922]}, "('APIRouter', 'head', 967)": {'mod': [972]}, "('APIRouter', 'patch', 1017)": {'mod': [1022]}, "('APIRouter', 'trace', 1067)": {'mod': [1072]}}}, {'path': 'tests/test_tutorial/test_custom_response/test_tutorial006.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [7]}}}]} | [] | [] | [] | {
"iss_type": "4",
"iss_reason": "2",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"docs_src/custom_response/tutorial006.py",
"fastapi/openapi/utils.py",
"fastapi/routing.py",
"fastapi/applications.py"
],
"doc": [
"docs/en/docs/advanced/custom-response.md"
],
"test": [
"tests/test_tutorial/test_custom_response/test_tutorial006.py"
],
"config": [],
"asset": []
} | 1 |
fastapi | fastapi | 4638b2c64e259b90bef6a44748e00e405825a111 | https://github.com/fastapi/fastapi/issues/5646 | question
question-migrate | Bad encoding in query parameters with new TestClient using httpx.Client | ### First Check
- [X] I added a very descriptive title to this issue.
- [X] I used the GitHub search to find a similar issue and didn't find it.
- [X] I searched the FastAPI documentation, with the integrated search.
- [X] I already searched in Google "How to X in FastAPI" and didn't find any information.
- [X] I already read and followed all the tutorial in the docs and didn't find an answer.
- [X] I already checked if it is not related to FastAPI but to [Pydantic](https://github.com/samuelcolvin/pydantic).
- [X] I already checked if it is not related to FastAPI but to [Swagger UI](https://github.com/swagger-api/swagger-ui).
- [X] I already checked if it is not related to FastAPI but to [ReDoc](https://github.com/Redocly/redoc).
### Commit to Help
- [X] I commit to help with one of those options 👆
### Example Code
```python
import logging
from fastapi import FastAPI
app = FastAPI()
@app.get("/example")
async def _show_encoding_error(look_for: str):
return {"found": look_for}
if __name__ == '__main__':
from fastapi.testclient import TestClient
with TestClient(app) as client:
params = {"look_for": "plain text"}
resp = client.get("/example", params=params).json()
logging.warning(resp)
assert resp["found"] == "plain text"
params = {"look_for": "España"}
resp = client.get("/example", params=params).json()
logging.warning(resp)
assert resp["found"] == "España", resp["found"]
```
### Description
After the change to `httpx` for the `TestClient` in **v0.87.0**, the query parameters are not properly encoded? when sending requests with it, and strings are corrupted when received in the endpoints.
The example app works as expected if called from the SwaggerUI or from another python process using a _plain_ `httpx.Client`, so it appears something broke with the new wrapping for `TestClient` 🥲
```python
import httpx
params = {"look_for": "España"}
with httpx.Client(base_url="http://localhost:8000/") as client:
resp = client.get("/example", params=params).json()
assert resp["found"] == "España"
```
### Operating System
macOS
### Operating System Details
M1, running arm64 arch
### FastAPI Version
0.87.0
### Python Version
Python 3.10.5
### Additional Context
starlette-0.21
httpx-0.23.0
Discovered when trying to migrate the test suite for a ~big project previously using fastapi-0.85.1 + starlette-0.20.4.
All minor syntax changes from old `requests` to new `httpx` were under control, but in one unit test, **a string with an accent** was making some search to fail without results (test is sending "Formalización" but endpoint is receiving **"Formalización"** 😱), and I was getting crazy 😅 | null | https://github.com/fastapi/fastapi/pull/5659 | null | {'base_commit': '4638b2c64e259b90bef6a44748e00e405825a111', 'files': [{'path': 'pyproject.toml', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [42]}}}, {'path': 'tests/test_starlette_urlconvertors.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [21, 47], 'mod': [1]}}}]} | [] | [] | [] | {
"iss_type": "2",
"iss_reason": "1",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [],
"doc": [],
"test": [
"tests/test_starlette_urlconvertors.py"
],
"config": [
"pyproject.toml"
],
"asset": []
} | 1 |
fastapi | fastapi | 3127bc4e05b72e39d1681735ec1ee49844b7dc88 | https://github.com/fastapi/fastapi/issues/1972 | feature
lang-all
lang-fr | French translations | # Bonjour 🇫🇷
Welcome to the issue that coordinates the French translation effort.
## Purpose
- Avoiding several people working on the same document at the same time.
The first person who declares that he/she is working on a translation gets the responsibility to carry it out. If a PR seems to be stalled, we can discuss a transfer of responsibility here.
- Enforcing best practices
Best practices are listed later in this description. You can propose your practice at any time, ideally with a supporting source and an example.
Defining and sharing best practices will help to avoid common mistakes and will allow faster and easier reviews.
- Help and build the community
Do not hesitate to ask any questions regarding the french translation effort here. The stronger the community, the more effective we will be and the more we will enjoy.
- Provide a french translation for this awesome library (last but not least)
If you are here, you probably like **FastAPI**, and maybe you even speak French. Giving more people the opportunity to get started using the documentation in its native language will encourage adoption. In that spirit, let's contribute to the magic of open source in this way.
## How to contribute
### Review
Keep in mind that the easiest way to participate is to review the PRs. We need to avoid accumulating PRs waiting for review.
### Translate
If you are not familiar with contributing to open source projects have a look at https://github.com/firstcontributions/first-contributions.
In any case, take a look at the documentation section related to the [contribution](https://fastapi.tiangolo.com/contributing/#development-contributing) and more precisely the part about the [documentation](https://fastapi.tiangolo.com/contributing/#docs).
Once you are decided to translate a document, make yourself known here by leaving a message here (eg. https://github.com/tiangolo/fastapi/issues/1972#issuecomment-702956335).
### Organize
If you wish, your energy is welcome to help with the organization. Bringing together motivated people and helping them get the job done is essential. Moreover, we can surely learn a lot from the translation work of other languages that are much more advanced and we can have a significant impact if we put good processes in place that can help the whole community
## Good practices
- technical terms
Technical terms do not need to be translated. It is also a question of common sense, in certain conditions English can be preferred because the French version is not in use.
See: https://github.com/tiangolo/fastapi/issues/1972#issuecomment-715500921
- punctuation and typography
For example, missing whitespace before/after punctuation. You can rely on [this page](https://leconjugueur.lefigaro.fr/ukponctuationtypographie.php) to help you.
see: https://github.com/tiangolo/fastapi/pull/1973#issuecomment-1186304199
- structure the PR by commit
Splitting the commit will ease the review and helps to track the change on the original documentation while the PR is open.
(see: [example](https://github.com/tiangolo/fastapi/pull/2234/commits)).
The first commit should only contain the copy of the English version of the document to the french one. With the exact same content (eg. 30f1dd6966ceedd9e8bea2d7aac7bbded9bbc568).
The second one is dedicated the index update (eg. 8ff5f7a6d4510819f95d570ac6a1d3279e2595ed)
And starting from this point you can start the translation. Notice that, thanks to this structure we can directly compare the two languages (eg. 3729f5b1c2bc858b15266aa4eae21bce07eb04c0).
Also, if the English document got updated, we just have to update the first commit and the conflicts will reveal updated part of the document 🪄
## Recommended tools
- https://www.deepl.com
- https://www.linguee.fr
- https://www.wordreference.com
- https://french.stackexchange.com | null | https://github.com/fastapi/fastapi/pull/3103 | null | {'base_commit': '3127bc4e05b72e39d1681735ec1ee49844b7dc88', 'files': [{'path': 'docs/fr/mkdocs.yml', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [58]}}}]} | [] | [] | [] | {
"iss_type": "4",
"iss_reason": "2",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [],
"doc": [
"docs/fr/mkdocs.yml"
],
"test": [],
"config": [],
"asset": []
} | 1 |
fastapi | fastapi | 3ff504f03fb5ba852def5a0a41653c6bed7efb1b | https://github.com/fastapi/fastapi/issues/713 | feature
confirmed
answered
reviewed | Support body in GET and other methods with undefined behavior | **Describe the bug**
In new version of rest api specification get methods can have body but fastapi not add it to swagger spec.
**To Reproduce**
Steps to reproduce the behavior:
Create get method with some body parameters.
**Expected behavior**
Body parameters present in the specification.
**Environment:**
- OS: Windows
- FastAPI Version: 0.42.0
- Python version, get it with: 3.8 | null | https://github.com/fastapi/fastapi/pull/1626 | null | {'base_commit': '3ff504f03fb5ba852def5a0a41653c6bed7efb1b', 'files': [{'path': 'docs/en/docs/tutorial/body.md', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [12, 14]}}}, {'path': 'fastapi/dependencies/utils.py', 'status': 'modified', 'Loc': {"(None, 'get_typed_annotation', 246)": {'mod': [249]}}}, {'path': 'fastapi/openapi/constants.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [1]}}}]} | [] | [] | [] | {
"iss_type": "4",
"iss_reason": "2",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"fastapi/openapi/constants.py",
"fastapi/dependencies/utils.py"
],
"doc": [
"docs/en/docs/tutorial/body.md"
],
"test": [],
"config": [],
"asset": []
} | 1 |
fastapi | fastapi | d03678dfbbdee0018252af3f5899716e824d6e87 | https://github.com/fastapi/fastapi/issues/110 | question
answered
reviewed
question-migrate | be able to host statics myself | **Is your feature request related to a problem? Please describe.**
I am using FastAPI on my private network and can't connect to the internet. but the `/docs` use cdn. I cannot visit that page.
Although I can rewrite /docs router to replace the html template , and add a /staitc route , but I feel a bit ugly. I wish there is some config for it.
**Describe the solution you'd like**
add two variable for FastAPI
```
api = FastAPI(static_prefix="/statics", static_url="/data/swagger-dist/")
```
with static_prefix="/statics", the /docs html may look like:
```html
<link type="text/css" rel="stylesheet" href="/statics/swagger-ui.css">
```
with static_url="/data/swagger-dist/" , FastAPI will add router for static_prefix automatically, like
```python
self.router.get(self.static_prefix, response_description=PlainTextResponse, ...)
```
**Describe alternatives you've considered**
If you don't want to handle statics, just ignore ``static_url`` , I can create it myself, or put it behind nginx.
If do so, you may need to write in the document how to download all static files .
**Additional context**
| null | https://github.com/fastapi/fastapi/pull/112 | null | {'base_commit': 'd03678dfbbdee0018252af3f5899716e824d6e87', 'files': [{'path': 'fastapi/openapi/docs.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [43]}, "(None, 'get_swagger_ui_html', 4)": {'mod': [4, 5, 6, 10, 11, 12, 13, 14, 15, 16, 21, 25, 26, 27, 28, 34, 35, 37, 42]}, "(None, 'get_redoc_html', 45)": {'mod': [45, 46, 47, 49, 50, 51, 52, 53, 54, 55, 60, 66, 69, 71, 72, 73, 74, 75, 76, 77, 78, 80]}}}]} | [] | [] | [] | {
"iss_type": "4",
"iss_reason": "2",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"fastapi/openapi/docs.py"
],
"doc": [],
"test": [],
"config": [],
"asset": []
} | 1 |
fastapi | fastapi | 4fdcdf341c106d345e6d0c349091cfb208f9c792 | https://github.com/fastapi/fastapi/issues/2237 | question
question-migrate | Add __all__ to __init__.py files to silence mypy(strict) errors | Strict mypy mode gives such errors:
```
base/api/users/controllers.py:4: error: Module 'fastapi' has no attribute 'Depends'
base/api/users/controllers.py:4: error: Module 'fastapi' has no attribute 'HTTPException'
```
on such import statement:
```
from fastapi import Depends, HTTPException
```
Tried using
```
from fastapi import Depends as Depends
from fastapi import HTTPException as HTTPException
```
as per recommendations in https://github.com/tiangolo/typer/issues/112 discussion. But the errors remain.
It seems that adding __all__ to the __init__.py files for the stuff that's reexported is a way to go (as per https://github.com/python/mypy/issues/7042 discussion).
Thanks for considering this!
| null | https://github.com/fastapi/fastapi/pull/2547 | null | {'base_commit': '4fdcdf341c106d345e6d0c349091cfb208f9c792', 'files': [{'path': 'docs_src/openapi_callbacks/tutorial001.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [29]}}}, {'path': 'fastapi/__init__.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25]}}}, {'path': 'fastapi/applications.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [19], 'mod': [1, 27]}, "('FastAPI', '__init__', 31)": {'mod': [47, 50, 52, 53, 58, 59, 63, 64, 77, 85, 86, 109]}, "('FastAPI', None, 30)": {'mod': [119, 306, 307]}, "('FastAPI', 'add_api_route', 194)": {'mod': [197]}, "('FastAPI', 'api_route', 247)": {'mod': [271, 272]}, "('FastAPI', 'add_api_websocket_route', 301)": {'mod': [302]}, "('FastAPI', 'include_router', 313)": {'mod': [321, 324]}, "('FastAPI', 'get', 338)": {'mod': [361, 362]}, "('FastAPI', 'put', 387)": {'mod': [410, 411]}, "('FastAPI', 'post', 436)": {'mod': [459, 460]}, "('FastAPI', 'delete', 485)": {'mod': [508, 509]}, "('FastAPI', 'options', 534)": {'mod': [557, 558]}, "('FastAPI', 'head', 583)": {'mod': [606, 607]}, "('FastAPI', 'patch', 632)": {'mod': [655, 656]}, "('FastAPI', 'trace', 681)": {'mod': [704, 705]}}}, {'path': 'fastapi/background.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [1]}}}, {'path': 'fastapi/concurrency.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [3, 4, 5, 22, 25, 30, 33, 38]}, "(None, '_fake_asynccontextmanager', 14)": {'mod': [14]}}}, {'path': 'fastapi/datastructures.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [2]}, "('UploadFile', None, 6)": {'mod': [8]}}}, {'path': 'fastapi/dependencies/models.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [1]}, "('Dependant', '__init__', 16)": {'mod': [27]}}}, {'path': 'fastapi/dependencies/utils.py', 'status': 'modified', 'Loc': {"(None, 'check_file_field', 88)": {'mod': [93, 98]}, "(None, 'get_sub_dependant', 133)": {'mod': [136]}, '(None, None, None)': {'mod': [166]}, "(None, 'get_typed_signature', 243)": {'mod': [243]}, "(None, 'get_typed_annotation', 259)": {'mod': [262, 263, 264]}, "(None, 'get_dependant', 281)": {'mod': [284]}, "(None, 'is_coroutine_callable', 426)": {'mod': [426]}, "(None, 'is_async_gen_callable', 435)": {'mod': [435]}, "(None, 'is_gen_callable', 442)": {'mod': [442]}, "(None, 'solve_generator', 449)": {'mod': [450]}, "(None, 'solve_dependencies', 467)": {'mod': [475, 481, 488, 489, 490, 495, 497]}}}, {'path': 'fastapi/encoders.py', 'status': 'modified', 'Loc': {"(None, 'generate_encoders_by_class_tuples', 14)": {'mod': [15, 16, 17]}, "(None, 'jsonable_encoder', 26)": {'mod': [34, 46, 47]}}}, {'path': 'fastapi/middleware/__init__.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [1]}}}, {'path': 'fastapi/middleware/cors.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [1]}}}, {'path': 'fastapi/middleware/gzip.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [1]}}}, {'path': 'fastapi/middleware/httpsredirect.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [1]}}}, {'path': 'fastapi/middleware/trustedhost.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [1]}}}, {'path': 'fastapi/middleware/wsgi.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [1]}}}, {'path': 'fastapi/openapi/docs.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [2]}, "(None, 'get_swagger_ui_html', 8)": {'mod': [16]}}}, {'path': 'fastapi/openapi/models.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [8]}, "('EmailStr', None, 14)": {'mod': [16]}}}, {'path': 'fastapi/openapi/utils.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [16]}, "(None, 'get_openapi_security_definitions', 67)": {'mod': [67]}, "(None, 'get_openapi_operation_parameters', 82)": {'mod': [91, 97]}, "(None, 'get_openapi_operation_request_body', 108)": {'mod': [112, 116, 118]}, "(None, 'get_openapi_operation_metadata', 143)": {'mod': [143]}, "(None, 'get_openapi_path', 156)": {'mod': [157, 158, 164, 172, 199, 200, 201, 202]}, "(None, 'get_openapi', 326)": {'mod': [335, 342, 343, 345, 346, 347, 349, 371]}}}, {'path': 'fastapi/param_functions.py', 'status': 'modified', 'Loc': {"(None, 'Depends', 241)": {'mod': [242]}, "(None, 'Security', 247)": {'mod': [248]}}}, {'path': 'fastapi/params.py', 'status': 'modified', 'Loc': {"('Depends', '__init__', 317)": {'mod': [318]}, "('Security', '__init__', 330)": {'mod': [332]}}}, {'path': 'fastapi/responses.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [3, 4, 5, 6, 7, 8, 9, 10]}}}, {'path': 'fastapi/routing.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [18], 'mod': [5, 33]}, "('APIRouter', 'include_router', 585)": {'add': [665], 'mod': [594, 595, 669]}, "(None, 'get_request_handler', 140)": {'mod': [153]}, "(None, 'app', 162)": {'mod': [210]}, "(None, 'get_websocket_app', 220)": {'mod': [222]}, "('APIWebSocketRoute', '__init__', 240)": {'mod': [243]}, "('APIRoute', '__init__', 262)": {'mod': [265, 290, 301]}, "('APIRoute', None, 261)": {'mod': [378]}, "('APIRouter', '__init__', 396)": {'mod': [404, 410, 411, 412, 416, 418, 419, 420]}, "('APIRouter', 'add_api_route', 438)": {'mod': [441, 466]}, "('APIRouter', 'api_route', 511)": {'mod': [535, 536, 537]}, "('APIRouter', 'add_api_websocket_route', 567)": {'mod': [568]}, "('APIRouter', None, 395)": {'mod': [578, 579]}, "('APIRouter', 'get', 686)": {'mod': [709, 710]}, "('APIRouter', 'put', 736)": {'mod': [759, 760]}, "('APIRouter', 'post', 786)": {'mod': [809, 810]}, "('APIRouter', 'delete', 836)": {'mod': [859, 860]}, "('APIRouter', 'options', 886)": {'mod': [909, 910]}, "('APIRouter', 'head', 936)": {'mod': [959, 960]}, "('APIRouter', 'patch', 986)": {'mod': [1009, 1010]}, "('APIRouter', 'trace', 1036)": {'mod': [1059, 1060]}}}, {'path': 'fastapi/security/__init__.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17]}}}, {'path': 'fastapi/security/oauth2.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [1]}, "('OAuth2', '__init__', 116)": {'mod': [119]}, "('OAuth2PasswordBearer', '__init__', 140)": {'mod': [144]}, "('OAuth2AuthorizationCodeBearer', '__init__', 168)": {'mod': [174]}}}, {'path': 'fastapi/staticfiles.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [1]}}}, {'path': 'fastapi/templating.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [1]}}}, {'path': 'fastapi/testclient.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [1]}}}, {'path': 'fastapi/utils.py', 'status': 'modified', 'Loc': {"(None, 'get_model_definitions', 17)": {'mod': [22, 24, 26]}, "(None, 'create_cloned_field', 73)": {'mod': [83]}, "(None, 'deep_dict_update', 130)": {'mod': [130]}}}, {'path': 'fastapi/websockets.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [1, 2]}}}]} | [] | [] | [] | {
"iss_type": "4",
"iss_reason": "2",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"fastapi/websockets.py",
"fastapi/middleware/httpsredirect.py",
"fastapi/middleware/cors.py",
"fastapi/routing.py",
"fastapi/templating.py",
"fastapi/middleware/__init__.py",
"fastapi/middleware/wsgi.py",
"fastapi/params.py",
"fastapi/middleware/gzip.py",
"fastapi/openapi/models.py",
"fastapi/security/__init__.py",
"fastapi/responses.py",
"fastapi/middleware/trustedhost.py",
"fastapi/datastructures.py",
"fastapi/openapi/utils.py",
"fastapi/dependencies/utils.py",
"fastapi/security/oauth2.py",
"fastapi/dependencies/models.py",
"fastapi/openapi/docs.py",
"fastapi/applications.py",
"fastapi/__init__.py",
"fastapi/staticfiles.py",
"docs_src/openapi_callbacks/tutorial001.py",
"fastapi/encoders.py",
"fastapi/param_functions.py",
"fastapi/utils.py",
"fastapi/concurrency.py",
"fastapi/background.py"
],
"doc": [],
"test": [
"fastapi/testclient.py"
],
"config": [],
"asset": []
} | 1 |
fastapi | fastapi | c09e950bd2efb81f82931469bee6856c72e54357 | https://github.com/fastapi/fastapi/issues/2996 | question
question-migrate | Please support latest SQLAlchemy or pin it | Hi @tiangolo, fastapi tests are currently failing and therefore causing pydantic tests to fail.
See https://github.com/samuelcolvin/pydantic/pull/2584, fastapi is not compatible with the v1.4 of SQLAlchemy which was released earlier in March, I've had to pin to `SQLAlchemy==1.3.23`
Please could we fix fastapi (the incompatibility looks like it might be trivial) or pin the dependency?
Once master of fastapi is fixed, we'll need to remember to also remove the hack from pydantic. | null | https://github.com/fastapi/fastapi/pull/3001 | null | {'base_commit': 'c09e950bd2efb81f82931469bee6856c72e54357', 'files': [{'path': 'pyproject.toml', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [56]}}}]} | [] | [] | [] | {
"iss_type": "4",
"iss_reason": "3",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [],
"doc": [],
"test": [],
"config": [
"pyproject.toml"
],
"asset": []
} | 1 |
oobabooga | text-generation-webui | d6bd71db7f3200c2b1ef46123c07374848aed86a | https://github.com/oobabooga/text-generation-webui/issues/5533 | Insecure argument passed to cURL | ## Expected Behavior
a secure download of the Miniconda install script
## Current Behavior
install script is cURL'd with `-k` resulting in an insecure transfer and possible arbitrary code execution on my machine.
## Steps to Reproduce
1. run any of these without miniconda installed:
- `wsl.sh`
- `start_windows.bat`
- `start_macos.sh`
- `start_linux.sh`
2. pwned
## Possible Solution
don't curl executable things insecurely
## Context
Below is a copy of the text I had put in a security advisory on both this repository and ParisNeo/lollms-webui as of early December. The response from the maintainers has been radio silence, so I am publishing the text here so that people can know/protect themselves.
# Security Advisory
### Summary
An unsafe command line argument being passed to cURL allows the Miniconda installer download to be MITM'd.
This downloaded script is subsequently run, potentially resulting in arbitrary code execution on user machines.
### Details
Here's an example from `start_linux.sh`
```bash
mkdir -p "$INSTALL_DIR"
curl -Lk "$MINICONDA_DOWNLOAD_URL" > "$INSTALL_DIR/miniconda_installer.sh"
```
It passes the `-k` argument to cURL.
cURL man page documentation for `-k`:
```
-k, --insecure
(TLS SFTP SCP) By default, every secure connection curl makes is
verified to be secure before the transfer takes place. This
option makes curl skip the verification step and proceed without
checking.
When this option is not used for protocols using TLS, curl
verifies the server's TLS certificate before it continues: that
the certificate contains the right name which matches the host
name used in the URL and that the certificate has been signed by
a CA certificate present in the cert store. See this online
resource for further details:
https://curl.se/docs/sslcerts.html
For SFTP and SCP, this option makes curl skip the known_hosts
verification. known_hosts is a file normally stored in the
user's home directory in the ".ssh" subdirectory, which contains
host names and their public keys.
WARNING: using this option makes the transfer insecure.
```
The operative line is at the end:
**` WARNING: using this option makes the transfer insecure.`**
### Impact
All users of the following installer scripts are affected:
- `wsl.sh`
- `start_windows.bat`
- `start_macos.sh`
- `start_linux.sh`
| null | https://github.com/oobabooga/text-generation-webui/pull/5535 | null | {'base_commit': 'd6bd71db7f3200c2b1ef46123c07374848aed86a', 'files': [{'path': 'start_linux.sh', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [34]}}}, {'path': 'start_macos.sh', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [34]}}}, {'path': 'start_windows.bat', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [40]}}}, {'path': 'wsl.sh', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [61]}}}]} | [] | [] | [] | {
"iss_type": "4",
"iss_reason": "2",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [],
"doc": [],
"test": [],
"config": [],
"asset": [
"start_windows.bat",
"start_linux.sh",
"wsl.sh",
"start_macos.sh"
]
} | 1 | |
oobabooga | text-generation-webui | 8f6405d2fa1c704edbcd2f4371ac21c3491d162b | https://github.com/oobabooga/text-generation-webui/issues/4015 | enhancement
stale | Adding flash attention to one click installer | **Description**
Adding flash attention to one click installer, for usage with exllamaV2
**Additional Context**
Me and others not so tech savvy people are having issues installing it manually on windows | null | https://github.com/oobabooga/text-generation-webui/pull/4235 | null | {'base_commit': '8f6405d2fa1c704edbcd2f4371ac21c3491d162b', 'files': [{'path': 'README.md', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [61, 81, 89, 93, 101, 143, 145, 147, 148]}}}, {'path': 'docker/Dockerfile', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [1, 24]}}}, {'path': 'modules/exllamav2.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [1, 26]}}}, {'path': 'one_click.py', 'status': 'modified', 'Loc': {"(None, 'install_webui', 146)": {'add': [173], 'mod': [175, 192]}, "(None, 'update_requirements', 198)": {'mod': [239, 241, 272]}}}, {'path': 'requirements.txt', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84]}}}, {'path': 'requirements_noavx2.txt', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84]}}}, {'path': 'start_linux.sh', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [46]}}}, {'path': 'start_macos.sh', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [46]}}}, {'path': 'start_windows.bat', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [53]}}}, {'path': 'wsl.sh', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [73]}}}]} | [] | [] | [] | {
"iss_type": "4",
"iss_reason": "2",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"modules/exllamav2.py",
"one_click.py"
],
"doc": [
"docker/Dockerfile",
"README.md"
],
"test": [],
"config": [
"requirements.txt",
"requirements_noavx2.txt"
],
"asset": [
"wsl.sh",
"start_windows.bat",
"start_macos.sh",
"start_linux.sh"
]
} | 1 |
oobabooga | text-generation-webui | c8a59d79befd208bc341491d79eb4a2f8d25bb74 | https://github.com/oobabooga/text-generation-webui/issues/3043 | bug | always "llama_tokenize: too many tokens" (even 1 char input) in latest commit(b6643e5039ae210dbc54ae6aa0f4dcf90b2269a8) | ### Describe the bug
Load model(vicuna-chinese) OK.
Chat error(console): llama_tokenize: too many tokens
Reduce input to 1 char: llama_tokenize: too many tokens
Caculate Token: llama_tokenize: too many tokens
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Reproduction
Install.(last commit)
Run server.
Load model(vicuna-chinese) OK.
Chat Is Good.
...
git pull -> latest(b6643e5039ae210dbc54ae6aa0f4dcf90b2269a8)
Load model(vicuna-chinese) OK.
Chat error(console): llama_tokenize: too many tokens
Reduce input to 1 char: llama_tokenize: too many tokens
Caculate Token: llama_tokenize: too many tokens
### Screenshot
_No response_
### Logs
```shell
# Server Log
2023-07-08 03:06:08 INFO:Loaded the model in 1.56 seconds.
llama_tokenize: too many tokens
llama_tokenize: too many tokens
llama_tokenize: too many tokens
Output generated in 0.19 seconds (0.00 tokens/s, 0 tokens, context 58, seed 2000)
llama_tokenize: too many tokens
llama_tokenize: too many tokens
llama_tokenize: too many tokens
Output generated in 0.19 seconds (0.00 tokens/s, 0 tokens, context 49, seed 2000)
llama_tokenize: too many tokens
llama_tokenize: too many tokens
llama_tokenize: too many tokens
Output generated in 0.20 seconds (0.00 tokens/s, 0 tokens, context 47, seed 2000)
llama_tokenize: too many tokens
llama_tokenize: too many tokens
llama_tokenize: too many tokens
Output generated in 0.20 seconds (0.00 tokens/s, 0 tokens, context 2, seed 2000)
llama_tokenize: too many tokens
llama_tokenize: too many tokens
llama_tokenize: too many tokens
Output generated in 0.20 seconds (0.00 tokens/s, 0 tokens, context 2, seed 2000)
llama_tokenize: too many tokens
llama_tokenize: too many tokens
llama_tokenize: too many tokens
llama_tokenize: too many tokens
llama_tokenize: too many tokens
llama_tokenize: too many tokens
llama_tokenize: too many tokens
llama_tokenize: too many tokens
llama_tokenize: too many tokens
# Git log
commit b6643e5039ae210dbc54ae6aa0f4dcf90b2269a8 (HEAD -> main, origin/main, origin/HEAD)
Author: oobabooga <112222186+oobabooga@users.noreply.github.com>
Date: Fri Jul 7 09:11:30 2023 -0700
Add decode functions to llama.cpp/exllama
commit 1ba2e88551f968cd74478fd02218a62869336ac5
Author: oobabooga <112222186+oobabooga@users.noreply.github.com>
Date: Fri Jul 7 09:09:23 2023 -0700
Add truncation to exllama
```
### System Info
```shell
Mac m2 / macOS 13.4.1
```
| null | https://github.com/oobabooga/text-generation-webui/pull/3400 | null | {'base_commit': 'c8a59d79befd208bc341491d79eb4a2f8d25bb74', 'files': [{'path': 'modules/llamacpp_model.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [8]}, "('LlamaCppModel', 'generate', 76)": {'add': [77]}}}, {'path': 'modules/text_generation.py', 'status': 'modified', 'Loc': {"(None, 'encode', 38)": {'mod': [42]}}}]} | [] | [] | [] | {
"iss_type": "1",
"iss_reason": "1",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"modules/llamacpp_model.py",
"modules/text_generation.py"
],
"doc": [],
"test": [],
"config": [],
"asset": []
} | 1 |
oobabooga | text-generation-webui | d87ca8f2af2458e8b57b1ec9915c72a4ca5ca19f | https://github.com/oobabooga/text-generation-webui/issues/1578 | bug | New precise prompts break eachadea_ggml-vicuna-13b-1.1 | ### Describe the bug
These changes seem to break a "default" install of eachadea/ggml-vicuna-13b-1.1-q4, acquired via the UI, both _0 and _2. I now receive blank responses in cai-chat, chat, and instruct modes using both the vicuna and vicuna v0 templates.
Confirmed it was caused in commit a777c05 by testing commit a840942 which resolves the issue.
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Reproduction
Pull commit a777c05 or later. Start the application with
`python server.py --chat --model eachadea_ggml-vicuna-13b-1.1 --auto-devices --gpu-memory 8`
Use the chat window.
### Screenshot

### Logs
```shell
None.
```
### System Info
```shell
Windows 11 using WSL Ubuntu 22.04
Ryzen 3700x
32gb ram
nvidia 2060 Super 8gb
```
| null | https://github.com/oobabooga/text-generation-webui/pull/1579 | null | {'base_commit': 'd87ca8f2af2458e8b57b1ec9915c72a4ca5ca19f', 'files': [{'path': 'characters/instruction-following/Vicuna-v0.yaml', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [4]}}}, {'path': 'characters/instruction-following/Vicuna.yaml', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [4]}}}, {'path': 'models/config.yaml', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [29]}}}]} | [] | [] | [] | {
"iss_type": "2",
"iss_reason": "2",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [],
"doc": [],
"test": [],
"config": [
"models/config.yaml",
"characters/instruction-following/Vicuna.yaml",
"characters/instruction-following/Vicuna-v0.yaml"
],
"asset": []
} | 1 |
oobabooga | text-generation-webui | 1d7e893fa199d6e0f868c383782aba9dada7d911 | https://github.com/oobabooga/text-generation-webui/issues/177 | enhancement | GPTQ quantization(3 or 4 bit quantization) support for LLaMa | [GPTQ](https://arxiv.org/abs/2210.17323) is currently the SOTA one shot quantization method for LLMs.
GPTQ supports amazingly low 3-bit and 4-bit weight quantization. And it can be applied to LLaMa.
I've actually confirmed that this works well in LLaMa 7b.
I haven't tested the memory usage(n-bit cuda kernel), but I think it should work.
| Model([LLaMa-7B](https://arxiv.org/abs/2302.13971)) | Bits | group-size | Wikitext2 | PTB | C4 |
| --------- | ---- | ---------- | --------- | --------- | ------- |
| FP16 | 16 | - | 5.67 | 8.79 | 7.05 |
| RTN | 4 | - | 6.28 | 9.68 | 7.70 |
| [GPTQ](https://arxiv.org/abs/2210.17323) | 4 | 64 | **6.16** | **9.66** | **7.52** |
| RTN | 3 | - | 25.66 | 61.25 | 28.19 |
| [GPTQ](https://arxiv.org/abs/2210.17323) | 3 | 64 | **12.24** | **16.77** | **9.55** |
code: https://github.com/qwopqwop200/GPTQ-for-LLaMa | null | https://github.com/oobabooga/text-generation-webui/pull/219 | null | {'base_commit': '1d7e893fa199d6e0f868c383782aba9dada7d911', 'files': [{'path': 'modules/models.py', 'status': 'modified', 'Loc': {"(None, 'load_model', 38)": {'mod': [113]}}}]} | [] | [] | [] | {
"iss_type": "4",
"iss_reason": "2",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"modules/models.py"
],
"doc": [],
"test": [],
"config": [],
"asset": []
} | 1 |
oobabooga | text-generation-webui | 6627f7feb9afe106df89e0b290adde21b1f8c914 | https://github.com/oobabooga/text-generation-webui/issues/2390 | bug
stale | Cannot download a huggingface model due to the authorization. | ### Describe the bug
I tied to download a new model which is visible in huggingface: bigcode/starcoder
But failed due to the "Unauthorized". I have a access token from hugginface how can I add it to the downlaod_model.py
File “/home/ahnlab/GPT/text-generation-webui/download-model.py”, line 102, in get_download_links_from_huggingface r.raise_for_status() File “/home/ahnlab/miniconda3/envs/vicuna/lib/python3.11/site-packages/requests/models.py”, line 1021, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 401 Client Error: Unauthorized for url: [https://huggingface.co/api/models/ bigcode/starcoder/tree/main](https://huggingface.co/api/models/%20bigcode/starcoder/tree/main)
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Reproduction
python download-model.py bigcode/starcoder
### Screenshot
_No response_
### Logs
```shell
File “/home/ahnlab/GPT/text-generation-webui/download-model.py”, line 102, in get_download_links_from_huggingface r.raise_for_status() File “/home/ahnlab/miniconda3/envs/vicuna/lib/python3.11/site-packages/requests/models.py”, line 1021, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 401 Client Error: Unauthorized for url: [https://huggingface.co/api/models/ bigcode/starcoder/tree/main](https://huggingface.co/api/models/%20bigcode/starcoder/tree/main)
```
### System Info
```shell
Ubuntu
```
| null | https://github.com/oobabooga/text-generation-webui/pull/2408 | null | {'base_commit': '6627f7feb9afe106df89e0b290adde21b1f8c914', 'files': [{'path': 'README.md', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [159]}}}, {'path': 'download-model.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [14, 258], 'mod': [261, 267, 270, 274, 277]}, "(None, 'sanitize_model_and_branch_names', 73)": {'mod': [73, 74, 75, 76, 77, 78, 79, 80, 81, 83, 86, 87, 88, 89, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 105, 106, 107, 109, 110, 111, 112, 114, 115, 116, 117, 118, 119, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 143, 144, 145, 147, 148, 149, 150, 151, 153, 156, 157, 158, 160, 161, 162, 163, 164, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 183, 184, 185, 186, 187, 188, 189, 190, 193, 194, 197, 198, 199, 200, 201, 202, 203, 204, 205]}, "(None, 'download_model_files', 197)": {'mod': [207, 208, 209, 211, 212, 213, 216, 217, 218, 219, 220, 222, 223, 224, 225, 227, 228, 229, 230, 231]}, "(None, 'check_model_files', 216)": {'mod': [233, 234, 236, 237, 238, 239]}}}, {'path': 'server.py', 'status': 'modified', 'Loc': {"(None, 'download_model_wrapper', 185)": {'mod': [187]}}}]} | [] | [] | [] | {
"iss_type": "1",
"iss_reason": "2",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"server.py",
"download-model.py"
],
"doc": [
"README.md"
],
"test": [],
"config": [],
"asset": []
} | 1 |
oobabooga | text-generation-webui | 02db4b0d06e9573de9e399b49006f882b996571b | https://github.com/oobabooga/text-generation-webui/issues/6395 | bug | Backslashes are writen doubled in monospaced blocks | ### Describe the bug
Without monospaced markdown there is a single backslash, but if it is written with either a single backtick, or tripple on multiline blocks, it gets doubled.
If I write:
```
'\'
```
or
```
'''
\
'''
```
(replaced the backticks in the example with aposthrophes because I couldn't figure out how to them escaped correctly here)
On the webgui it is written doubled, and it is not just visual, if you click the copy button it does get copied doubled. Like this:
`\\`
and
```
\\
```
But looking at the console it is not internally seen as doubled; so I don't think it's a tokenizer issue.
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Reproduction
Already described in the description.
### Screenshot
_No response_
### Logs
```shell
(if any), well there is no errors, so it shouldn't be a required field, that red asterisks is annoying.
```
### System Info
```shell
Since it's a issue with the HTML itself, I suspect the system specs isn't relevant, lemme know if it somehow makes any difference.
```
| null | https://github.com/oobabooga/text-generation-webui/pull/6648 | null | {'base_commit': '02db4b0d06e9573de9e399b49006f882b996571b', 'files': [{'path': 'modules/html_generator.py', 'status': 'modified', 'Loc': {"(None, 'convert_to_markdown', 149)": {'add': [241]}}}]} | [] | [] | [] | {
"iss_type": "2",
"iss_reason": "2",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"modules/html_generator.py"
],
"doc": [],
"test": [],
"config": [],
"asset": []
} | 1 |
hacksider | Deep-Live-Cam | ed7a21687c4de9f32659c30a17571ce568c30b47 | https://github.com/hacksider/Deep-Live-Cam/issues/845 | Indentation Error | Just updated after the changes to the face enhancer and I'm getting this error whenever I try to turn it on.
I'm not smart enough to know what this is but I'm hoping this helps someone figure it out!
```IndentationError: unindent does not match any outer indentation level
Exception in Tkinter callback
Traceback (most recent call last):
File "C:\Python310\lib\tkinter\__init__.py", line 1921, in __call__
return self.func(*args)
File "C:\Python310\lib\site-packages\customtkinter\windows\widgets\ctk_switch.py", line 413, in toggle
self._command()
File "D:\Software\Deep-Live-Cam\modules\ui.py", line 205, in <lambda>
update_tumbler("face_enhancer", enhancer_value.get()),
File "D:\Software\Deep-Live-Cam\modules\ui.py", line 561, in update_tumbler
frame_processors = get_frame_processors_modules(
File "D:\Software\Deep-Live-Cam\modules\processors\frame\core.py", line 40, in get_frame_processors_modules
set_frame_processors_modules_from_ui(frame_processors)
File "D:\Software\Deep-Live-Cam\modules\processors\frame\core.py", line 47, in set_frame_processors_modules_from_ui
frame_processor_module = load_frame_processor_module(frame_processor)
File "D:\Software\Deep-Live-Cam\modules\processors\frame\core.py", line 23, in load_frame_processor_module
frame_processor_module = importlib.import_module(f'modules.processors.frame.{frame_processor}')
File "C:\Python310\lib\importlib\__init__.py", line 126, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 1050, in _gcd_import
File "<frozen importlib._bootstrap>", line 1027, in _find_and_load
File "<frozen importlib._bootstrap>", line 1006, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 688, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 879, in exec_module
File "<frozen importlib._bootstrap_external>", line 1017, in get_code
File "<frozen importlib._bootstrap_external>", line 947, in source_to_code
File "<frozen importlib._bootstrap>", line 241, in _call_with_frames_removed
File "D:\Software\Deep-Live-Cam\modules\processors\frame\face_enhancer.py", line 61
FACE_ENHANCER = gfpgan.GFPGANer(model_path=model_path, upscale=1, device=mps_device) # type: ignore[attr-defined]
``` | null | https://github.com/hacksider/Deep-Live-Cam/pull/846 | null | {'base_commit': 'ed7a21687c4de9f32659c30a17571ce568c30b47', 'files': [{'path': 'modules/processors/frame/face_enhancer.py', 'status': 'modified', 'Loc': {"(None, 'get_face_enhancer', 51)": {'mod': [57, 58, 59, 60, 61, 62, 63]}}}]} | [] | [] | [] | {
"iss_type": "1",
"iss_reason": "1",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"modules/processors/frame/face_enhancer.py"
],
"doc": [],
"test": [],
"config": [],
"asset": []
} | 1 | |
hacksider | Deep-Live-Cam | 87081e78d0175c79bab4f1b50d41a9741920e1c4 | https://github.com/hacksider/Deep-Live-Cam/issues/916 | Issues with pip install -r requirement.txt | INFO: pip is looking at multiple versions of opencv-python to determine which version is compatible with other requirements. This could take a while.
ERROR: Cannot install -r requirements.txt (line 14) and torch==2.5.1 because these package versions have conflicting dependencies.
The conflict is caused by:
The user requested torch==2.5.1
torchvision 0.20.1+cu121 depends on torch==2.5.1+cu121
To fix this you could try to:
loosen the range of package versions you've specified
remove package versions to allow pip attempt to solve the dependency conflict
ERROR: ResolutionImpossible: for help visit https://pip.pypa.io/en/latest/user_guide/#fixing-conflicting-dependencies
PLEASE THIS IS MY OUTPUT, PLEASE HELP | null | https://github.com/hacksider/Deep-Live-Cam/pull/917 | null | {'base_commit': '87081e78d0175c79bab4f1b50d41a9741920e1c4', 'files': [{'path': 'requirements.txt', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [13]}}}]} | [] | [] | [] | {
"iss_type": "1",
"iss_reason": "1",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [],
"doc": [],
"test": [],
"config": [
"requirements.txt"
],
"asset": []
} | 1 | |
hacksider | Deep-Live-Cam | 48742826420e786266593252179a6ad94c3b7d48 | https://github.com/hacksider/Deep-Live-Cam/issues/770 | 不使用ui运行代码,使用face_enhancer模式时无法运行。 | 您好,碰到一个bug。我不使用ui界面,直接用运行脚本就会报错:
# ------------------------ run.py文件 ------------------------
>“
>#!/usr/bin/env python3
>import os
>import sys
>from modules import core
>os.environ["CUDA_VISIBLE_DEVICES"] = "1"
>
>#模拟命令行参数,添加--execution-provider cuda
>sys.argv += ["--execution-provider", "cuda"]
>sys.argv += ["-s", "input/images/picture1.png", "-t", "input/videos/real_001.mp4", "-o", "output/output03.mp4"]
>#choices=['face_swapper', 'face_enhancer']
>sys.argv += ["--frame-processor", "face_swapper"]
>
>if __name__ == '__main__':
> print(" ===============> 开始run ")
> core.run()
>”
# ------------------------ issues ------------------------
如果--frame-processor选择“sys.argv += ["--frame-processor", "face_swapper"]”是可以正常执行。
如果--frame-processor变成模式sys.argv += ["--frame-processor", "face_enhancer"]。就会报错!
但是如果使用ui界面来处理,打开face_enhancer模式就能正常运行。想问是什么原因呢? | null | https://github.com/hacksider/Deep-Live-Cam/pull/773 | null | {'base_commit': '48742826420e786266593252179a6ad94c3b7d48', 'files': [{'path': 'modules/processors/frame/face_enhancer.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [23], 'mod': [14]}, "(None, 'pre_check', 25)": {'mod': [26]}, "(None, 'get_face_enhancer', 45)": {'mod': [50, 51, 52, 53, 54]}}}, {'path': 'modules/processors/frame/face_swapper.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [17, 22], 'mod': [13]}, "(None, 'pre_check', 24)": {'mod': [25]}, "(None, 'get_face_swapper', 52)": {'mod': [57]}}}]} | [] | [] | [] | {
"iss_type": "1",
"iss_reason": "1",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"modules/processors/frame/face_enhancer.py",
"modules/processors/frame/face_swapper.py"
],
"doc": [],
"test": [],
"config": [],
"asset": []
} | 1 | |
Textualize | rich | cefafdc12e0220d139c704522979a0dc9b3f889b | https://github.com/Textualize/rich/issues/178 | bug
accepted | [BUG] One trailing newline ignored by rich.print in some cases | Hi, thanks for this great library.
Not sure if this is expected behavior or a bug. In certain cases, `rich.print` handles newlines in a slightly different manner than the `print` built-in.
Example:
```
>>> for i in range(3): print('Hey' + '\n' * i)
...
Hey
Hey
Hey
>>> from rich import print
>>> for i in range(3): print('Hey' + '\n' * i)
...
Hey
Hey
Hey
>>>
```
Apparently, when the printed string contains at least one trailing newline (i.e. when `i == 1` or `i == 2`), one newline is ignored by `rich.print`.
A screenshot of a similar example:

(rich 3.3.2, Python 3.8.0, GNOME Terminal 3.18.3 on Linux Mint 18.2 64-bit)
| null | https://github.com/Textualize/rich/pull/180 | null | {'base_commit': 'cefafdc12e0220d139c704522979a0dc9b3f889b', 'files': [{'path': '.github/workflows/pythonpackage.yml', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [3, 9, 11]}}}, {'path': 'CHANGELOG.md', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [7]}}}, {'path': 'docs/source/reference/emoji.rst', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [1, 2]}}}, {'path': 'pyproject.toml', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [5]}}}, {'path': 'rich/__main__.py', 'status': 'modified', 'Loc': {"(None, 'make_test_card', 34)": {'mod': [78]}}}, {'path': 'rich/_palettes.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [4], 'mod': [8, 9, 10, 11, 12, 13]}}}, {'path': 'rich/color.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [0], 'mod': [5, 6, 8]}, "('Color', 'get_ansi_codes', 384)": {'add': [394], 'mod': [390]}, "('Color', 'get_truecolor', 289)": {'mod': [300, 301, 302, 303]}, "('Color', 'parse', 337)": {'mod': [350, 365]}, "('Color', 'downgrade', 405)": {'mod': [456]}}}, {'path': 'rich/console.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [75]}, "('Console', '_detect_color_system', 392)": {'mod': [409, 410, 411, 412, 413]}, "('Console', 'export_text', 1044)": {'mod': [1049, 1067]}}}, {'path': 'rich/markdown.py', 'status': 'modified', 'Loc': {"('Heading', '__rich_console__', 146)": {'mod': [159]}, "('CodeBlock', '__rich_console__', 178)": {'mod': [183]}}}, {'path': 'rich/palette.py', 'status': 'modified', 'Loc': {"('Palette', 'match', 19)": {'mod': [31, 44, 45, 46]}, "('Palette', 'get_color_distance', 31)": {'mod': [33]}}}, {'path': 'rich/progress.py', 'status': 'modified', 'Loc': {"(None, 'iter_track', 57)": {'mod': [58, 74, 90]}, "('Progress', 'stop', 631)": {'mod': [652]}, "('Progress', 'track', 663)": {'mod': [710]}}}, {'path': 'rich/syntax.py', 'status': 'modified', 'Loc': {"('Syntax', '__rich_console__', 220)": {'add': [231]}}}, {'path': 'rich/text.py', 'status': 'modified', 'Loc': {"('Text', 'split', 761)": {'add': [765], 'mod': [775]}, "('Text', None, 104)": {'mod': [761]}, "('Text', 'wrap', 860)": {'mod': [889, 890]}}}, {'path': 'tests/_card_render.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [1]}}}, {'path': 'tests/_markdown.py', 'status': 'removed', 'Loc': {}}, {'path': 'tests/test_card.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [0], 'mod': [3, 5, 6, 7, 8]}}}, {'path': 'tests/test_color.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [29, 139]}, "(None, 'test_truecolor', 30)": {'mod': [40]}, "(None, 'test_parse_success', 44)": {'mod': [47, 48, 49]}, "(None, 'test_get_ansi_codes', 85)": {'mod': [90, 91]}, "(None, 'test_downgrade', 96)": {'mod': [98, 121]}}}, {'path': 'tests/test_console.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [2, 26], 'mod': [7]}}}, {'path': 'tests/test_log.py', 'status': 'modified', 'Loc': {"(None, 'test_log', 29)": {'mod': [30]}}}, {'path': 'tests/test_markdown.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [10], 'mod': [3, 5, 7]}, "(None, 'test_markdown_render', 11)": {'mod': [14]}}}, {'path': 'tests/test_markdown_no_hyperlinks.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [10], 'mod': [3, 5, 7]}, "(None, 'test_markdown_render', 11)": {'mod': [14]}}}, {'path': 'tests/test_progress.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [13, 45, 187]}}}, {'path': 'tests/test_rich_print.py', 'status': 'modified', 'Loc': {"(None, 'test_rich_print', 12)": {'add': [12], 'mod': [19]}}}, {'path': 'tests/test_rule.py', 'status': 'modified', 'Loc': {"(None, 'test_rule', 10)": {'mod': [18, 19]}}}]} | [] | [] | [] | {
"iss_type": "2",
"iss_reason": "1",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"rich/text.py",
"rich/console.py",
"rich/palette.py",
"rich/__main__.py",
"rich/_palettes.py",
"tests/_card_render.py",
"rich/syntax.py",
"rich/color.py",
"rich/progress.py",
"rich/markdown.py",
"tests/_markdown.py"
],
"doc": [
"CHANGELOG.md",
"docs/source/reference/emoji.rst"
],
"test": [
"tests/test_console.py",
"tests/test_color.py",
"tests/test_log.py",
"tests/test_progress.py",
"tests/test_card.py",
"tests/test_markdown_no_hyperlinks.py",
"tests/test_rule.py",
"tests/test_rich_print.py",
"tests/test_markdown.py"
],
"config": [
".github/workflows/pythonpackage.yml",
"pyproject.toml"
],
"asset": []
} | 1 |
Textualize | rich | 489fafc63e4ab85cacde60ade1a15099d6c08ca8 | https://github.com/Textualize/rich/issues/2150 | Needs triage | [BUG] ImportError OrderedDict | You may find a solution to your problem in the [docs](https://rich.readthedocs.io/en/latest/introduction.html) or [issues](https://github.com/willmcgugan/rich/issues).
First: Thanks for your awesome project and the work you take to make devs lifes better :)!
**Describe the bug**
Some updates seem to have mixed up dependencies.
```bash
> rich README.md
Traceback (most recent call last):
File "/home/graeter/.local/bin/rich", line 5, in <module>
from rich_cli.__main__ import run
File "/home/graeter/.local/lib/python3.8/site-packages/rich_cli/__main__.py", line 7, in <module>
from rich.console import Console, RenderableType
File "/home/graeter/.local/lib/python3.8/site-packages/rich/console.py", line 46, in <module>
from ._log_render import FormatTimeCallable, LogRender
File "/home/graeter/.local/lib/python3.8/site-packages/rich/_log_render.py", line 5, in <module>
from .text import Text, TextType
File "/home/graeter/.local/lib/python3.8/site-packages/rich/text.py", line 5, in <module>
from rich.emoji import EmojiVariant
File "/home/graeter/.local/lib/python3.8/site-packages/rich/emoji.py", line 4, in <module>
from .jupyter import JupyterMixin
File "/home/graeter/.local/lib/python3.8/site-packages/rich/jupyter.py", line 4, in <module>
from .segment import Segment
File "/home/graeter/.local/lib/python3.8/site-packages/rich/segment.py", line 19, in <module>
from .cells import (
File "/home/graeter/.local/lib/python3.8/site-packages/rich/cells.py", line 6, in <module>
from ._lru_cache import LRUCache
File "/home/graeter/.local/lib/python3.8/site-packages/rich/_lru_cache.py", line 8, in <module>
from typing_extensions import OrderedDict
ImportError: cannot import name 'OrderedDict' from 'typing_extensions' (/home/graeter/.local/lib/python3.8/site-packages/typing_extensions.py)
```
Can you point me to a working configuration?
Up to now I used rich very happily and would miss it a lot ;)
**Platform**
- Ubuntu 20.04
- python 3.8.10
- pip 22.0.4
- zsh with starship
<details>
```
python -m rich.diagnose
Traceback (most recent call last):
File "/usr/lib/python3.8/runpy.py", line 194, in _run_module_as_main
return _run_code(code, main_globals, None,
File "/usr/lib/python3.8/runpy.py", line 87, in _run_code
exec(code, run_globals)
File "/home/graeter/.local/lib/python3.8/site-packages/rich/diagnose.py", line 5, in <module>
from rich.console import Console, get_windows_console_features
File "/home/graeter/.local/lib/python3.8/site-packages/rich/console.py", line 46, in <module>
from ._log_render import FormatTimeCallable, LogRender
File "/home/graeter/.local/lib/python3.8/site-packages/rich/_log_render.py", line 5, in <module>
from .text import Text, TextType
File "/home/graeter/.local/lib/python3.8/site-packages/rich/text.py", line 5, in <module>
from rich.emoji import EmojiVariant
File "/home/graeter/.local/lib/python3.8/site-packages/rich/emoji.py", line 4, in <module>
from .jupyter import JupyterMixin
File "/home/graeter/.local/lib/python3.8/site-packages/rich/jupyter.py", line 4, in <module>
from .segment import Segment
File "/home/graeter/.local/lib/python3.8/site-packages/rich/segment.py", line 19, in <module>
from .cells import (
File "/home/graeter/.local/lib/python3.8/site-packages/rich/cells.py", line 6, in <module>
from ._lru_cache import LRUCache
File "/home/graeter/.local/lib/python3.8/site-packages/rich/_lru_cache.py", line 8, in <module>
from typing_extensions import OrderedDict
ImportError: cannot import name 'OrderedDict' from 'typing_extensions' (/home/graeter/.local/lib/python3.8/site-packages/typing_extensions.py)
pip freeze | grep rich
rich==12.1.0
rich-cli==1.6.1
rich-rst==1.1.7
```
</details>
| null | https://github.com/Textualize/rich/pull/2157 | null | {'base_commit': '489fafc63e4ab85cacde60ade1a15099d6c08ca8', 'files': [{'path': 'poetry.lock', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [588, 1068, 1468, 1469]}}}, {'path': 'pyproject.toml', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [30]}}}]} | [] | [] | [] | {
"iss_type": "1",
"iss_reason": "1",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [],
"doc": [],
"test": [],
"config": [
"poetry.lock",
"pyproject.toml"
],
"asset": []
} | 1 |
Textualize | rich | 42988b834f9c76b63145dd8d8142a94243e71375 | https://github.com/Textualize/rich/issues/2566 | Needs triage | Drop Python 3.6 as a supported version of Python | Time to drop Python 3.6 as a version of Python supported by Rich. The reasons for doing so include:
- Python 3.6 [reached end-of-life on 2021-12-03](https://devguide.python.org/versions/)
- The Poetry installer used for our GitHub actions [recently dropped 3.6](https://github.com/python-poetry/install.python-poetry.org).
Actions to take include:
- [x] Consider the significance of [this TODO in `_null_file.py`](https://github.com/Textualize/rich/blob/84e628655a2981ee90413ca3f35001ec3954161d/rich/_null_file.py#L7).
- [x] Drop `dataclasses` as a dependency.
- [x] Look at dropping [the special-casing of `isascii` in `rule.py`](https://github.com/Textualize/rich/blob/84e628655a2981ee90413ca3f35001ec3954161d/rich/rule.py#L54).
- [x] Drop Python 3.6 from `pythonpackage.yml`.
- [x] Drop mention of Python 3.6 in `pyproject.toml` -> `[tool.poetry]` -> `classifiers`.
- [x] Bump the major version of Rich.
- [x] Update the `Compatibility` section of `README.md` (and all translations).
| null | https://github.com/Textualize/rich/pull/2567 | null | {'base_commit': '42988b834f9c76b63145dd8d8142a94243e71375', 'files': [{'path': '.github/workflows/pythonpackage.yml', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [11]}}}, {'path': 'CHANGELOG.md', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [7]}}}, {'path': 'README.md', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [41]}}}, {'path': 'pyproject.toml', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [5, 18, 30, 32, 47]}}}, {'path': 'rich/_null_file.py', 'status': 'modified', 'Loc': {"('NullFile', None, 5)": {'mod': [7, 9, 10, 11, 13, 14, 15, 17, 18]}}}, {'path': 'rich/highlighter.py', 'status': 'modified', 'Loc': {"('ReprHighlighter', None, 80)": {'mod': [85]}}}, {'path': 'rich/rule.py', 'status': 'modified', 'Loc': {"('Rule', '__rich_console__', 49)": {'mod': [54, 55, 56, 57, 60]}}}, {'path': 'tests/test_null_file.py', 'status': 'modified', 'Loc': {"(None, 'test_null_file', 4)": {'mod': [8, 9, 10]}}}]} | [] | [] | [] | {
"iss_type": "4",
"iss_reason": "2",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"rich/highlighter.py",
"rich/rule.py",
"rich/_null_file.py"
],
"doc": [
"README.md",
"CHANGELOG.md"
],
"test": [
"tests/test_null_file.py"
],
"config": [
"pyproject.toml",
".github/workflows/pythonpackage.yml"
],
"asset": []
} | 1 |
Textualize | rich | c478588f3c228a4e86741a057c42b452d7bc6bce | https://github.com/Textualize/rich/issues/3027 | Needs triage | [BUG] Extra Space above Certain Markdown Tables | - [x] I've checked [docs](https://rich.readthedocs.io/en/latest/introduction.html) and [closed issues](https://github.com/Textualize/rich/issues?q=is%3Aissue+is%3Aclosed) for possible solutions.
- [x] I can't find my issue in the [FAQ](https://github.com/Textualize/rich/blob/master/FAQ.md).
**Describe the bug**
Certain markdown tables contain extra newlines above them in Rich 13.4.2.
```python
from rich.console import Console
from rich.markdown import Markdown
MD = """
| Temperature | | | | | |
|--------------:|:-------|:-------|:-------|:-------|:----------|
| 0.01 | sam | sam | sam | sam | sam |
| 0.1 | sam | sam | sam | sam | sam |
| 0.25 | sam | sam | sam | sammy | sammy |
| 0.5 | lilly | sam | sammy | sammy | taffy |
| 0.75 | bambi | lola | snoopy | taffy | taz |
| 0.9 | bella | harper | millie | molly | sweetie |
| 1 | Anna | molly | shaker | sydney | wheessie |
| 1.25 | Finley | funny | gertie | gladi | road kill |
""".strip()
console = Console()
markdown = Markdown(MD)
print('--')
console.print(markdown)
print('--')
```

**Platform**
<details>
<summary>Click to expand</summary>
Windows 10.
```
┌───────────────────────── <class 'rich.console.Console'> ─────────────────────────┐
│ A high level console interface. │
│ │
│ ┌──────────────────────────────────────────────────────────────────────────────┐ │
│ │ <console width=148 ColorSystem.WINDOWS> │ │
│ └──────────────────────────────────────────────────────────────────────────────┘ │
│ │
│ color_system = 'windows' │
│ encoding = 'utf-8' │
│ file = <_io.TextIOWrapper name='<stdout>' mode='w' encoding='utf-8'> │
│ height = 56 │
│ is_alt_screen = False │
│ is_dumb_terminal = False │
│ is_interactive = True │
│ is_jupyter = False │
│ is_terminal = True │
│ legacy_windows = True │
│ no_color = False │
│ options = ConsoleOptions( │
│ size=ConsoleDimensions(width=148, height=56), │
│ legacy_windows=True, │
│ min_width=1, │
│ max_width=148, │
│ is_terminal=True, │
│ encoding='utf-8', │
│ max_height=56, │
│ justify=None, │
│ overflow=None, │
│ no_wrap=False, │
│ highlight=None, │
│ markup=None, │
│ height=None │
│ ) │
│ quiet = False │
│ record = False │
│ safe_box = True │
│ size = ConsoleDimensions(width=148, height=56) │
│ soft_wrap = False │
│ stderr = False │
│ style = None │
│ tab_size = 8 │
│ width = 148 │
└──────────────────────────────────────────────────────────────────────────────────┘
┌─── <class 'rich._windows.WindowsConsoleFeatures'> ────┐
│ Windows features available. │
│ │
│ ┌───────────────────────────────────────────────────┐ │
│ │ WindowsConsoleFeatures(vt=False, truecolor=False) │ │
│ └───────────────────────────────────────────────────┘ │
│ │
│ truecolor = False │
│ vt = False │
└───────────────────────────────────────────────────────┘
┌────── Environment Variables ───────┐
│ { │
│ 'TERM': None, │
│ 'COLORTERM': None, │
│ 'CLICOLOR': None, │
│ 'NO_COLOR': None, │
│ 'TERM_PROGRAM': None, │
│ 'COLUMNS': None, │
│ 'LINES': None, │
│ 'JUPYTER_COLUMNS': None, │
│ 'JUPYTER_LINES': None, │
│ 'JPY_PARENT_PID': None, │
│ 'VSCODE_VERBOSE_LOGGING': None │
│ } │
└────────────────────────────────────┘
platform="Windows"
```
</details>
| null | https://github.com/Textualize/rich/pull/3469 | null | {'base_commit': 'c478588f3c228a4e86741a057c42b452d7bc6bce', 'files': [{'path': 'CHANGELOG.md', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [21]}}}, {'path': 'rich/markdown.py', 'status': 'modified', 'Loc': {"('Markdown', '__rich_console__', 569)": {'mod': [680]}}}, {'path': 'tests/test_markdown.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [176]}, "(None, 'test_markdown_render', 99)": {'mod': [102]}}}]} | [] | [] | [] | {
"iss_type": "2",
"iss_reason": "1",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"rich/markdown.py"
],
"doc": [
"CHANGELOG.md"
],
"test": [
"tests/test_markdown.py"
],
"config": [],
"asset": []
} | 1 |
Textualize | rich | 9c0f164f8bbb8811f6e3ef8a69ac77c5e4464f36 | https://github.com/Textualize/rich/issues/2668 | Needs triage | [BUG] rich.live does not redirect stdout with fileno | **Describe the bug**
When using `rich.live.Live` with default settings (`redirect_stdout=True`), `sys.stdout` does not have a `fileno` which breaks some stdlib python code which expects it, for example:
```python
from rich.live import Live
import subprocess
import sys
with Live():
subprocess.Popen(["echo hello world"], stdout=sys.stdout).communicate()
```
which errors with
```
Traceback (most recent call last):
File "/Users/kratsg/mario-mapyde/live.py", line 6, in <module>
subprocess.Popen(["echo hello world"], stdout=sys.stdout).communicate()
File "/usr/local/Cellar/python@3.9/3.9.15/Frameworks/Python.framework/Versions/3.9/lib/python3.9/subprocess.py", line 829, in __init__
errread, errwrite) = self._get_handles(stdin, stdout, stderr)
File "/usr/local/Cellar/python@3.9/3.9.15/Frameworks/Python.framework/Versions/3.9/lib/python3.9/subprocess.py", line 1598, in _get_handles
c2pwrite = stdout.fileno()
io.UnsupportedOperation: fileno
```
**Platform**
<details>
<summary>Click to expand</summary>
```
╭───────────────────────── <class 'rich.console.Console'> ─────────────────────────╮
│ A high level console interface. │
│ │
│ ╭──────────────────────────────────────────────────────────────────────────────╮ │
│ │ <console width=119 ColorSystem.EIGHT_BIT> │ │
│ ╰──────────────────────────────────────────────────────────────────────────────╯ │
│ │
│ color_system = '256' │
│ encoding = 'utf-8' │
│ file = <_io.TextIOWrapper name='<stdout>' mode='w' encoding='utf-8'> │
│ height = 61 │
│ is_alt_screen = False │
│ is_dumb_terminal = False │
│ is_interactive = True │
│ is_jupyter = False │
│ is_terminal = True │
│ legacy_windows = False │
│ no_color = False │
│ options = ConsoleOptions( │
│ size=ConsoleDimensions(width=119, height=61), │
│ legacy_windows=False, │
│ min_width=1, │
│ max_width=119, │
│ is_terminal=True, │
│ encoding='utf-8', │
│ max_height=61, │
│ justify=None, │
│ overflow=None, │
│ no_wrap=False, │
│ highlight=None, │
│ markup=None, │
│ height=None │
│ ) │
│ quiet = False │
│ record = False │
│ safe_box = True │
│ size = ConsoleDimensions(width=119, height=61) │
│ soft_wrap = False │
│ stderr = False │
│ style = None │
│ tab_size = 8 │
│ width = 119 │
╰──────────────────────────────────────────────────────────────────────────────────╯
╭─── <class 'rich._windows.WindowsConsoleFeatures'> ────╮
│ Windows features available. │
│ │
│ ╭───────────────────────────────────────────────────╮ │
│ │ WindowsConsoleFeatures(vt=False, truecolor=False) │ │
│ ╰───────────────────────────────────────────────────╯ │
│ │
│ truecolor = False │
│ vt = False │
╰───────────────────────────────────────────────────────╯
╭──────── Environment Variables ────────╮
│ { │
│ 'TERM': 'xterm-256color', │
│ 'COLORTERM': None, │
│ 'CLICOLOR': None, │
│ 'NO_COLOR': None, │
│ 'TERM_PROGRAM': 'Apple_Terminal', │
│ 'COLUMNS': None, │
│ 'LINES': None, │
│ 'JUPYTER_COLUMNS': None, │
│ 'JUPYTER_LINES': None, │
│ 'JPY_PARENT_PID': None, │
│ 'VSCODE_VERBOSE_LOGGING': None │
│ } │
╰───────────────────────────────────────╯
platform="Darwin"
```
```
rich==12.6.0
```
</details>
**What am I trying to do?**
<details>
<summary>Click to expand</summary>
Well, I was following something like [this SO post](https://stackoverflow.com/questions/71077706/redirect-print-and-or-logging-to-panel) and #1720 to run subprocess, and have the output of `sys.stdout` redirect to *some* renderable. Code that does not error, but also does not do as expected (due to python picking up system `stdout` rather than the redirected `stdout` is similar:
```python
from rich.live import Live
import subprocess
import sys
with Live(screen=True):
subprocess.Popen(["echo hello world"]).communicate()
```
but this puts the output on the original screen, not the alternate screen. Perhaps there is a better way to pass in a file handler through `subprocess` to auto-redirect into a `Console` or similar, but it's not obvious to me how this can be done. This is the only way I can think of:
```python
from rich.console import Console
import os
class ConsolePanel(Console):
def __init__(self,*args,**kwargs):
console_file = open(os.devnull,'w')
super().__init__(record=True,file=console_file,*args,**kwargs)
def __rich_console__(self,console,options):
texts = self.export_text(clear=False).split('\n')
for line in texts[-options.height:]:
yield line
if __name__=='__main__':
from rich.layout import Layout
from rich.live import Live
import time
from datetime import datetime
import subprocess
class Interface():
def __init__(self) -> None:
self.console:list[ConsolePanel] = [ConsolePanel() for _ in range(2)]
def get_renderable(self):
layout = Layout()
layout.split_column(
Layout(self.console[0],name='top'),
Layout(self.console[1],name='bottom',size=6)
)
layout.children[0]
return layout
# comment out the below line to get wildly different behavior
proc = subprocess.Popen(["watch", "-n1", "echo hello world"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
db = Interface()
with Live(get_renderable=db.get_renderable):
while True:
time.sleep(1)
db.console[0].print(datetime.now().ctime()+'='*100)
db.console[1].print(datetime.now().ctime())
```
</details> | null | https://github.com/Textualize/rich/pull/2683 | null | {'base_commit': '9c0f164f8bbb8811f6e3ef8a69ac77c5e4464f36', 'files': [{'path': 'CHANGELOG.md', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [18]}}}, {'path': 'rich/file_proxy.py', 'status': 'modified', 'Loc': {"('FileProxy', 'flush', 50)": {'add': [54]}}}]} | [] | [] | [] | {
"iss_type": "1",
"iss_reason": "1",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"rich/file_proxy.py"
],
"doc": [
"CHANGELOG.md"
],
"test": [],
"config": [],
"asset": []
} | 1 |
Textualize | rich | 9f620dc50c0008c35e9f8493f198e6e593574a70 | https://github.com/Textualize/rich/issues/3104 | Needs triage | [BUG] `font-family` ignored in `html_export` due to user agent stylesheet for `<code>` | - [X] I've checked [docs](https://rich.readthedocs.io/en/latest/introduction.html) and [closed issues](https://github.com/Textualize/rich/issues?q=is%3Aissue+is%3Aclosed) for possible solutions.
- [X] I can't find my issue in the [FAQ](https://github.com/Textualize/rich/blob/master/FAQ.md).
**Describe the bug**
Run this code:
```py
import rich.console
try:
test = 1
raise Exception()
except Exception:
console = rich.console.Console(record=True)
console.print_exception(show_locals=True)
html = console.export_html(inline_styles=True)
with open("test.html", "w") as html_file:
html_file.write(html)
```
You will get an `test.html` output file. Open it in Chrome.
I'm on macOS, and it shows up like this:

Notice the lines are not aligned properly on the right side. Here is why:

As you can see, Chrome's user agent stylesheet causes the `<code>` element to reset the `font-family` on the `<pre>` element back to `monospace`. All we need is to have Rich add a `font-family: inherit;` on the `<code>` element and everything is fine:

**Platform**
<details>
<summary>Click to expand</summary>
What platform (Win/Linux/Mac) are you running on? What terminal software are you using?
Mac with Chrome
```
❯ python -m rich.diagnose
╭───────────────────────── <class 'rich.console.Console'> ─────────────────────────╮
│ A high level console interface. │
│ │
│ ╭──────────────────────────────────────────────────────────────────────────────╮ │
│ │ <console width=148 ColorSystem.TRUECOLOR> │ │
│ ╰──────────────────────────────────────────────────────────────────────────────╯ │
│ │
│ color_system = 'truecolor' │
│ encoding = 'utf-8' │
│ file = <_io.TextIOWrapper name='<stdout>' mode='w' encoding='utf-8'> │
│ height = 87 │
│ is_alt_screen = False │
│ is_dumb_terminal = False │
│ is_interactive = True │
│ is_jupyter = False │
│ is_terminal = True │
│ legacy_windows = False │
│ no_color = False │
│ options = ConsoleOptions( │
│ size=ConsoleDimensions(width=148, height=87), │
│ legacy_windows=False, │
│ min_width=1, │
│ max_width=148, │
│ is_terminal=True, │
│ encoding='utf-8', │
│ max_height=87, │
│ justify=None, │
│ overflow=None, │
│ no_wrap=False, │
│ highlight=None, │
│ markup=None, │
│ height=None │
│ ) │
│ quiet = False │
│ record = False │
│ safe_box = True │
│ size = ConsoleDimensions(width=148, height=87) │
│ soft_wrap = False │
│ stderr = False │
│ style = None │
│ tab_size = 8 │
│ width = 148 │
╰──────────────────────────────────────────────────────────────────────────────────╯
╭─── <class 'rich._windows.WindowsConsoleFeatures'> ────╮
│ Windows features available. │
│ │
│ ╭───────────────────────────────────────────────────╮ │
│ │ WindowsConsoleFeatures(vt=False, truecolor=False) │ │
│ ╰───────────────────────────────────────────────────╯ │
│ │
│ truecolor = False │
│ vt = False │
╰───────────────────────────────────────────────────────╯
╭────── Environment Variables ───────╮
│ { │
│ 'TERM': 'xterm-256color', │
│ 'COLORTERM': 'truecolor', │
│ 'CLICOLOR': None, │
│ 'NO_COLOR': None, │
│ 'TERM_PROGRAM': 'vscode', │
│ 'COLUMNS': None, │
│ 'LINES': None, │
│ 'JUPYTER_COLUMNS': None, │
│ 'JUPYTER_LINES': None, │
│ 'JPY_PARENT_PID': None, │
│ 'VSCODE_VERBOSE_LOGGING': None │
│ } │
╰────────────────────────────────────╯
platform="Darwin"
❯ python -m pip freeze | grep rich
rich==13.4.2
```
</details>
| null | https://github.com/Textualize/rich/pull/3105 | null | {'base_commit': '9f620dc50c0008c35e9f8493f198e6e593574a70', 'files': [{'path': 'CHANGELOG.md', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [13]}}}, {'path': 'CONTRIBUTORS.md', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [75]}}}, {'path': 'rich/_export_format.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [15]}}}, {'path': 'tests/test_console.py', 'status': 'modified', 'Loc': {"(None, 'test_export_html', 527)": {'mod': [532]}, "(None, 'test_export_html_inline', 536)": {'mod': [541]}, "(None, 'test_save_html', 593)": {'mod': [594]}}}]} | [] | [] | [] | {
"iss_type": "4",
"iss_reason": "2",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"rich/_export_format.py"
],
"doc": [
"CONTRIBUTORS.md",
"CHANGELOG.md"
],
"test": [
"tests/test_console.py"
],
"config": [],
"asset": []
} | 1 |
Textualize | rich | a05a5a1c2f95f25db70ac3657e99f0bab652e2cd | https://github.com/Textualize/rich/issues/1180 | Needs triage | [BUG] No `Optional` typing in args that accept `None` in `Console`. | Some arguments to `rich.console.Console`—like `width`—accept `None` as an argument and are documented as `Optional` but are typed as only `int`, raising some type checking errors if `width=None` is passed.
https://github.com/willmcgugan/rich/blob/a05a5a1c2f95f25db70ac3657e99f0bab652e2cd/rich/console.py#L577
| null | https://github.com/Textualize/rich/pull/1182 | null | {'base_commit': 'a05a5a1c2f95f25db70ac3657e99f0bab652e2cd', 'files': [{'path': 'CONTRIBUTING.md', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [53]}}}, {'path': 'Makefile', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [8, 10]}}}, {'path': 'rich/__init__.py', 'status': 'modified', 'Loc': {"(None, 'print', 45)": {'mod': [45]}, "(None, 'inspect', 63)": {'mod': [66, 67]}}}, {'path': 'rich/_inspect.py', 'status': 'modified', 'Loc': {"('Inspect', '__init__', 43)": {'mod': [47]}}}, {'path': 'rich/_log_render.py', 'status': 'modified', 'Loc': {"('LogRender', '__call__', 32)": {'mod': [36, 37, 39, 40, 41]}}}, {'path': 'rich/_ratio.py', 'status': 'modified', 'Loc': {"(None, 'ratio_distribute', 108)": {'mod': [109]}}}, {'path': 'rich/align.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [2]}, "('Align', '__init__', 36)": {'mod': [40, 42, 44, 45]}, "('Align', 'left', 67)": {'mod': [70, 72, 74, 75]}, "('Align', 'center', 89)": {'mod': [92, 94, 96, 97]}, "('Align', 'right', 111)": {'mod': [114, 116, 118, 119]}, "('VerticalCenter', '__init__', 242)": {'mod': [245]}}}, {'path': 'rich/bar.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [1]}, "('Bar', '__init__', 29)": {'mod': [35]}}}, {'path': 'rich/color.py', 'status': 'modified', 'Loc': {"('Color', 'get_truecolor', 307)": {'mod': [308]}}}, {'path': 'rich/columns.py', 'status': 'modified', 'Loc': {"('Columns', '__init__', 31)": {'mod': [33, 36, 41, 42]}}}, {'path': 'rich/console.py', 'status': 'modified', 'Loc': {"('PagerContext', '__init__', 323)": {'mod': [326]}, "('ScreenContext', None, 354)": {'mod': [365]}, "('Console', '__init__', 563)": {'mod': [569, 570, 571, 573, 575, 577, 578, 579, 580, 590, 592, 593, 594]}, "('Console', 'pager', 955)": {'mod': [956]}, "('Console', 'screen', 1074)": {'mod': [1075]}, "('Console', 'render', 1088)": {'mod': [1089]}, "('Console', 'render_str', 1191)": {'mod': [1196, 1197, 1198, 1199, 1200, 1201]}, "('Console', 'get_style', 1243)": {'mod': [1244]}, "('Console', '_collect_renderables', 1273)": {'mod': [1279, 1280, 1281, 1282]}, "('Console', 'out', 1386)": {'mod': [1391, 1392]}, "('Console', 'print', 1418)": {'mod': [1423, 1424, 1425, 1426, 1427, 1428, 1429, 1430, 1431, 1433]}, "('Console', 'update_screen', 1508)": {'mod': [1512, 1513]}, "('Console', 'log', 1589)": {'mod': [1594, 1595, 1596, 1597, 1598]}, "('Console', 'input', 1730)": {'mod': [1737]}, "('Console', 'export_html', 1816)": {'mod': [1819, 1821]}, "('Console', 'save_html', 1895)": {'mod': [1899]}}}, {'path': 'rich/containers.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [5]}, "('Renderables', None, 28)": {'mod': [31]}}}, {'path': 'rich/layout.py', 'status': 'modified', 'Loc': {"('Layout', '__init__', 155)": {'mod': [157, 159, 160, 164]}}}, {'path': 'rich/live.py', 'status': 'modified', 'Loc': {"('Live', '__init__', 50)": {'mod': [52, 54, 62]}}}, {'path': 'rich/logging.py', 'status': 'modified', 'Loc': {"('RichHandler', '__init__', 58)": {'mod': [61, 68]}}}, {'path': 'rich/markdown.py', 'status': 'modified', 'Loc': {"('MarkdownContext', '__init__', 346)": {'mod': [351]}, "('Markdown', '__init__', 418)": {'mod': [422, 425, 426]}}}, {'path': 'rich/measure.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [2]}, "('Measurement', None, 11)": {'mod': [59]}}}, {'path': 'rich/panel.py', 'status': 'modified', 'Loc': {"('Panel', '__init__', 38)": {'mod': [43]}, "('Panel', 'fit', 68)": {'mod': [73]}}}, {'path': 'rich/pretty.py', 'status': 'modified', 'Loc': {"(None, 'install', 44)": {'mod': [45, 49, 50]}, "('Pretty', '__init__', 154)": {'mod': [157, 160, 164, 165]}, "(None, 'traverse', 416)": {'mod': [416]}, "(None, 'pretty_repr', 587)": {'mod': [592, 593]}, "(None, 'pprint', 622)": {'mod': [625, 627, 628]}}}, {'path': 'rich/progress.py', 'status': 'modified', 'Loc': {"(None, 'track', 83)": {'mod': [90]}, "('ProgressColumn', None, 151)": {'mod': [156]}, "('RenderableColumn', None, 193)": {'mod': [200]}, "('SpinnerColumn', '__init__', 218)": {'mod': [224]}, "('TextColumn', '__init__', 261)": {'mod': [267, 268]}, "('BarColumn', '__init__', 299)": {'mod': [306]}, "('DownloadColumn', None, 375)": {'mod': [382]}, "('Progress', '__init__', 568)": {'mod': [571, 578]}, "('Progress', 'update', 729)": {'mod': [734, 735, 736, 737]}}}, {'path': 'rich/progress_bar.py', 'status': 'modified', 'Loc': {"('ProgressBar', '__init__', 33)": {'mod': [37, 43]}, "('ProgressBar', None, 18)": {'mod': [114]}}}, {'path': 'rich/prompt.py', 'status': 'modified', 'Loc': {"('PromptBase', '__init__', 53)": {'mod': [57, 59]}, "('PromptBase', 'ask', 77)": {'mod': [81, 83, 87]}, "('PromptBase', 'ask', 93)": {'mod': [97, 99, 102]}, "('PromptBase', 'ask', 107)": {'mod': [111, 113, 117]}, "('PromptBase', 'get_input', 186)": {'mod': [191]}, "('PromptBase', None, 30)": {'mod': [253, 262]}, "('PromptBase', '__call__', 257)": {'mod': [258]}}}, {'path': 'rich/scope.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [2]}, "(None, 'render_scope', 14)": {'mod': [17, 20, 21]}}}, {'path': 'rich/screen.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [1]}, "('Screen', '__init__', 21)": {'mod': [22]}}}, {'path': 'rich/segment.py', 'status': 'modified', 'Loc': {"('Segment', 'apply_style', 82)": {'mod': [85, 86]}, "('Segment', 'split_and_crop_lines', 168)": {'mod': [172]}, "('Segment', 'adjust_line_length', 215)": {'mod': [216]}, "('Segment', 'set_shape', 282)": {'mod': [286, 287]}}}, {'path': 'rich/spinner.py', 'status': 'modified', 'Loc': {"('Spinner', '__init__', 14)": {'mod': [15]}}}, {'path': 'rich/status.py', 'status': 'modified', 'Loc': {"('Status', '__init__', 23)": {'mod': [27]}}}, {'path': 'rich/style.py', 'status': 'modified', 'Loc': {"('Style', '__init__', 93)": {'mod': [96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111]}, "('Style', None, 29)": {'mod': [179, 495, 578]}}}, {'path': 'rich/syntax.py', 'status': 'modified', 'Loc': {"('Syntax', '__init__', 224)": {'mod': [233, 234, 238]}, "('Syntax', 'from_path', 260)": {'mod': [267, 269, 273]}, "('Syntax', None, 190)": {'mod': [354]}}}, {'path': 'rich/table.py', 'status': 'modified', 'Loc': {"('Table', '__init__', 151)": {'mod': [154, 155, 156, 157, 170, 173, 174, 175]}, "('Table', 'add_column', 328)": {'mod': [333, 334, 335, 338, 339, 340, 341]}, "('Table', 'add_row', 379)": {'mod': [382]}}}]} | [] | [] | [] | {
"iss_type": "4",
"iss_reason": "2",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"rich/columns.py",
"rich/scope.py",
"rich/console.py",
"rich/bar.py",
"rich/layout.py",
"rich/pretty.py",
"rich/spinner.py",
"rich/style.py",
"rich/_inspect.py",
"rich/align.py",
"rich/logging.py",
"rich/table.py",
"rich/screen.py",
"rich/__init__.py",
"rich/syntax.py",
"rich/segment.py",
"rich/progress_bar.py",
"rich/live.py",
"rich/color.py",
"rich/panel.py",
"rich/progress.py",
"rich/measure.py",
"rich/containers.py",
"rich/_log_render.py",
"rich/prompt.py",
"rich/markdown.py",
"rich/_ratio.py",
"rich/status.py"
],
"doc": [
"CONTRIBUTING.md"
],
"test": [],
"config": [
"Makefile"
],
"asset": []
} | 1 |
Textualize | rich | aa7926c1431eebfb2ccaab9f3b63a4ac6cd8dfe6 | https://github.com/Textualize/rich/issues/2291 | bug | [BUG] Invalid markup in a ProgressBar causes the entire Python script to exit abnormally. | **Describe the bug**
**NOTE: I found some more details on this. The issue isn't that an exception isn't raised, it's that you can't ever see any console output from that exception. Alternate screen issue?** See EDIT below. Original bug report follows.
If you try to create a ProgressBar object, and within your fields you have some invalid markup (example: a closing [/color] tag without a corresponding opening tag), starting the progress bar with `start()` will *cause the entire Python interpreter to exit with return code 1*.
No error message is printed. Wrapping the code in a try/except block does not trap the error, the entire script still exits.
The following is a minimal working example illustrating the bug.
from rich.progress import (
BarColumn,
Progress,
TaskID,
TextColumn,
TimeRemainingColumn,
)
import time
def test():
print("I will now make Rich fail horribly...")
pbar = Progress(
"[green]Status",
TimeRemainingColumn(),
"Running[/red]" # THIS LINE HAS INVALID MARKUP. IT WILL CAUSE THE SCRIPT TO CRASH WITH NO ERROR.
)
task_id = pbar.add_task("test",start=False, total=10)
# PROGRAM EXECUTION ABORTS HERE WITH NO ERROR MESSAGES.
# Python interpreter returns code 1.
pbar.start()
print("We should make it here, but we don't.")
for _ in range(10):
time.sleep()
pbar.update(task_id, advance=1)
pbar.stop()
if __name__ == "__main__":
# Even wrapping the test in a try/catch block does not prevent Python from exiting!
try:
test()
except Exception as e:
print(f"I caught an exception! {e}") # This is NOT called, NO exception is raised.
print("I made it through the test!") # This is also NEVER reached. The script EXITS when pbar.start() is called.
Example run (Not much to see...):
dev@devbox:~$ python3 richbug.py
dev@devbox:~$ echo $?
1
dev@devbox:~$
Note that I have not tested this further to determine if it happens in other areas of Rich, but I know for sure it happens with ProgressBar.
**What should happen?**
If there's invalid markup, a normal exception should get thrown somewhere.
Even if for some reason the app needs to fully exit, printing an error message would still be useful. I spent over an hour tracking down what I thought was a bug or a forgotten exit() call in my own code before realizing the exact line where things failed was `pbar.start()`.
I have a *suspicion* that this might have to do with the alternate screen - perhaps an exception is printed but it's done on the alternate screen so you never see it? I haven't spent much time looking at Rich's code, but I'd imagine perhaps wrapping code in try blocks with code to exit the alternate screen followed by re-raising the exception might work?
One more point: after the code exits, the cursor is missing - I have to use `reset` to bring it back. Again, suggests that we're switching into the alternate screen, crashing and then not getting back out to print errors.
**EDIT: I discovered this is indeed the case. If I add the line `open("exception.txt","w").write(str(e))` to the except block, the exception does get printed and indeed does include the correct markup error. So therefore this bug should perhaps be named "App does not exit alternate screen before crashing"?**
**Platform**
```
dev@devbox:~$ python -m rich.diagnose
╭───────────────────────── <class 'rich.console.Console'> ─────────────────────────╮
│ A high level console interface. │
│ │
│ ╭──────────────────────────────────────────────────────────────────────────────╮ │
│ │ <console width=148 ColorSystem.STANDARD> │ │
│ ╰──────────────────────────────────────────────────────────────────────────────╯ │
│ │
│ color_system = 'standard' │
│ encoding = 'utf-8' │
│ file = <_io.TextIOWrapper name='<stdout>' mode='w' encoding='utf-8'> │
│ height = 30 │
│ is_alt_screen = False │
│ is_dumb_terminal = False │
│ is_interactive = True │
│ is_jupyter = False │
│ is_terminal = True │
│ legacy_windows = False │
│ no_color = False │
│ options = ConsoleOptions( │
│ size=ConsoleDimensions(width=148, height=30), │
│ legacy_windows=False, │
│ min_width=1, │
│ max_width=148, │
│ is_terminal=True, │
│ encoding='utf-8', │
│ max_height=30, │
│ justify=None, │
│ overflow=None, │
│ no_wrap=False, │
│ highlight=None, │
│ markup=None, │
│ height=None │
│ ) │
│ quiet = False │
│ record = False │
│ safe_box = True │
│ size = ConsoleDimensions(width=148, height=30) │
│ soft_wrap = False │
│ stderr = False │
│ style = None │
│ tab_size = 8 │
│ width = 148 │
╰──────────────────────────────────────────────────────────────────────────────────╯
╭─── <class 'rich._windows.WindowsConsoleFeatures'> ────╮
│ Windows features available. │
│ │
│ ╭───────────────────────────────────────────────────╮ │
│ │ WindowsConsoleFeatures(vt=False, truecolor=False) │ │
│ ╰───────────────────────────────────────────────────╯ │
│ │
│ truecolor = False │
│ vt = False │
╰───────────────────────────────────────────────────────╯
╭────── Environment Variables ───────╮
│ { │
│ 'TERM': 'screen', │
│ 'COLORTERM': None, │
│ 'CLICOLOR': None, │
│ 'NO_COLOR': None, │
│ 'TERM_PROGRAM': None, │
│ 'COLUMNS': None, │
│ 'LINES': None, │
│ 'JPY_PARENT_PID': None, │
│ 'VSCODE_VERBOSE_LOGGING': None │
│ } │
╰────────────────────────────────────╯
platform="Linux"
dev@devbox:~$ pip freeze | grep rich
rich==12.4.4
```
The above was run while SSH'ed into the devbox from Windows terminal. Same issue will occur no matter what client is being used though. Happens no matter how I run the code, whether it be on a local terminal, via SSH, etc.
Devbox is running Ubuntu Linux 22.04.
| null | https://github.com/Textualize/rich/pull/2305 | null | {'base_commit': 'aa7926c1431eebfb2ccaab9f3b63a4ac6cd8dfe6', 'files': [{'path': 'CHANGELOG.md', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [7]}}}, {'path': 'rich/live.py', 'status': 'modified', 'Loc': {"('Live', 'start', 104)": {'mod': [121]}}}]} | [] | [] | [] | {
"iss_type": "2",
"iss_reason": "2",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"rich/live.py"
],
"doc": [
"CHANGELOG.md"
],
"test": [],
"config": [],
"asset": []
} | 1 |
Textualize | rich | a972ca05522577de2f98eb7c957deead9c87b38f | https://github.com/Textualize/rich/issues/3123 | Needs triage | [BUG] Plain code blocks do not render correctly on a light background | - [x] I've checked [docs](https://rich.readthedocs.io/en/latest/introduction.html) and [closed issues](https://github.com/Textualize/rich/issues?q=is%3Aissue+is%3Aclosed) for possible solutions.
- [x] I can't find my issue in the [FAQ](https://github.com/Textualize/rich/blob/master/FAQ.md).
**Describe the bug**
Markdown code *blocks* are rendering illegibly and ignoring any styling in the theme given to the console. The code has a black background and the text, rather than being a cyan, is just unstyled text. So on bright backgrounds this is effectively black on black (one can barely make out letter shapes); on dark backgrounds you see the text but not as styled. Inline markdown code displays fine and changes styles as expected. But neither the default code_block theme or any new theme attached to the console seems to change the output from none on black.
I've attached an image showing this. You can see the inline markdown code and the code block.
<img width="1574" alt="Screen Shot 2023-09-04 at 11 07 08" src="https://github.com/Textualize/rich/assets/198177/bdb3acae-d8a2-400d-a0ac-1e377ae44b95">
**Platform**
<details>
<summary>Click to expand</summary>
What platform (Win/Linux/Mac) are you running on? What terminal software are you using?
Running on Mac OS 12.6.1. The same thing happens on standard Terminal and on ITerm2.
Note that the styles show up in `python -m rich.default_styles`, so it is not that the styles
are unable to display.
I may ask you to copy and paste the output of the following commands. It may save some time if you do it now.
If you're using Rich in a terminal:
```
python -m rich.diagnose
pip freeze | grep rich
```
The output of the second one is 'rich==13.5.2'
The output of the first is
╭───────────────────────── <class 'rich.console.Console'> ─────────────────────────╮
│ A high level console interface. │
│ │
│ ╭──────────────────────────────────────────────────────────────────────────────╮ │
│ │ <console width=100 ColorSystem.TRUECOLOR> │ │
│ ╰──────────────────────────────────────────────────────────────────────────────╯ │
│ │
│ color_system = 'truecolor' │
│ encoding = 'utf-8' │
│ file = <_io.TextIOWrapper name='<stdout>' mode='w' encoding='utf-8'> │
│ height = 32 │
│ is_alt_screen = False │
│ is_dumb_terminal = False │
│ is_interactive = True │
│ is_jupyter = False │
│ is_terminal = True │
│ legacy_windows = False │
│ no_color = False │
│ options = ConsoleOptions( │
│ size=ConsoleDimensions(width=100, height=32), │
│ legacy_windows=False, │
│ min_width=1, │
│ max_width=100, │
│ is_terminal=True, │
│ encoding='utf-8', │
│ max_height=32, │
│ justify=None, │
│ overflow=None, │
│ no_wrap=False, │
│ highlight=None, │
│ markup=None, │
│ height=None │
│ ) │
│ quiet = False │
│ record = False │
│ safe_box = True │
│ size = ConsoleDimensions(width=100, height=32) │
│ soft_wrap = False │
│ stderr = False │
│ style = None │
│ tab_size = 8 │
│ width = 100 │
╰──────────────────────────────────────────────────────────────────────────────────╯
╭─── <class 'rich._windows.WindowsConsoleFeatures'> ────╮
│ Windows features available. │
│ │
│ ╭───────────────────────────────────────────────────╮ │
│ │ WindowsConsoleFeatures(vt=False, truecolor=False) │ │
│ ╰───────────────────────────────────────────────────╯ │
│ │
│ truecolor = False │
│ vt = False │
╰───────────────────────────────────────────────────────╯
╭────── Environment Variables ───────╮
│ { │
│ 'TERM': 'xterm-256color', │
│ 'COLORTERM': 'truecolor', │
│ 'CLICOLOR': None, │
│ 'NO_COLOR': None,
│
│ 'TERM_PROGRAM': 'iTerm.app', │
│ 'COLUMNS': None, │
│ 'LINES': None, │
│ 'JUPYTER_COLUMNS': None, │
│ 'JUPYTER_LINES': None, │
│ 'JPY_PARENT_PID': None, │
│ 'VSCODE_VERBOSE_LOGGING': None │
│ } │
╰────────────────────────────────────╯
platform="Darwin"
<img width="1574" alt="Screen Shot 2023-09-04 at 11 07 08" src="https://github.com/Textualize/rich/assets/198177/bdb3acae-d8a2-400d-a0ac-1e377ae44b95">
</details>
| null | https://github.com/Textualize/rich/pull/3132 | null | {'base_commit': 'a972ca05522577de2f98eb7c957deead9c87b38f', 'files': [{'path': '.pre-commit-config.yaml', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [11, 43]}}}, {'path': 'CHANGELOG.md', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [12], 'mod': [8]}}}, {'path': 'rich/console.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [280]}}}, {'path': 'rich/markdown.py', 'status': 'modified', 'Loc': {"('CodeBlock', 'create', 175)": {'mod': [178]}}}, {'path': 'rich/syntax.py', 'status': 'modified', 'Loc': {"('Syntax', None, 227)": {'add': [441]}, "('Syntax', 'highlight', 442)": {'mod': [470]}}}, {'path': 'rich/text.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [40]}}}, {'path': 'tests/test_markdown.py', 'status': 'modified', 'Loc': {"(None, 'test_markdown_render', 99)": {'mod': [102]}}}, {'path': 'tests/test_markdown_no_hyperlinks.py', 'status': 'modified', 'Loc': {"(None, 'test_markdown_render', 92)": {'mod': [96]}}}]} | [] | [] | [] | {
"iss_type": "2",
"iss_reason": "2",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"rich/text.py",
"rich/syntax.py",
"rich/console.py",
"rich/markdown.py"
],
"doc": [
"CHANGELOG.md"
],
"test": [
"tests/test_markdown_no_hyperlinks.py",
"tests/test_markdown.py"
],
"config": [
".pre-commit-config.yaml"
],
"asset": []
} | 1 |
ytdl-org | youtube-dl | e6a836d54ca1d3cd02f3ee45ef707a46f23e8291 | https://github.com/ytdl-org/youtube-dl/issues/31164 | broken-IE | [YouTube] When running without --verbose, … (No terminating paren } in {var b=a.split(""),… | ## Checklist
<!--
Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2021.12.17. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
- Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
- Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in http://yt-dl.org/escape.
- Search the bugtracker for similar issues: http://yt-dl.org/search-issues. DO NOT post duplicates.
- Finally, put x into all relevant boxes (like this [x])
-->
- [x] I'm reporting a broken site support
- [x] I've verified that I'm running youtube-dl version **2021.12.17**
- [x] I've checked that all provided URLs are alive and playable in a browser
- [x] I've checked that all URLs and arguments with special characters are properly quoted or escaped
- [x] I've searched the bugtracker for similar issues including closed ones
## Verbose log
Half no. Here's the version info from verbose mode:
```text
[debug] System config: []
[debug] User config: []
[debug] Custom config: []
[debug] Command-line args: ['--no-call-home', '--abort-on-error', '--no-overwrites', '--keep-video', '--fixup=warn', '--restrict-filenames', '--output', '%(upload_dat>
[debug] Encodings: locale UTF-8, fs utf-8, out utf-8, pref UTF-8
[debug] youtube-dl version 2021.12.17
[debug] Git HEAD: e6a836d54
[debug] Python version 3.8.10 (CPython) - Linux-5.8.0-44-lowlatency-x86_64-with-glibc2.29
[debug] exe versions: ffmpeg 4.2.4, ffprobe 4.2.4
```
When running without --verbose, [youtube] Unable to decode n-parameter: … (No terminating paren } in {var b=a.split(""),…
However, fortunately, using `--verbose` works around the problem. :+1:
With `--verbose`, I get a few dozen lines of debug messages and then the download starts.
When running without `--verbose`, I see
```text
[youtube] 37gJCuf6UMY: Downloading webpage
[youtube] 37gJCuf6UMY: Downloading player 324f67b9
WARNING: [youtube] Unable to decode n-parameter: download likely to be throttled (No terminating paren } in {var b=a.split(""),
c=[1070485609,7,function(d,e){d.push(e)},
```
and then the terminal is busy spewing lots more seemingly minified JS code, until I send SIGINT.
When I redirect stdout and stderr to a file (`… |& tee -- nparam.log`), it writes the first two lines and then seems stuck for about a minute, after which I gave up and sent SIGINT.
## Description
On Ubuntu focal, downloading from YouTube stopped, and instead, my terminal is flooded.
It worked fine a few hours ago.
Downloading from Twitch works as expected, so probably not a network problem.
Thanks for still maintaining compatible with ancient pythons! | null | https://github.com/ytdl-org/youtube-dl/pull/31170 | null | {'base_commit': 'e6a836d54ca1d3cd02f3ee45ef707a46f23e8291', 'files': [{'path': 'test/test_jsinterp.py', 'status': 'modified', 'Loc': {"('TestJSInterpreter', 'test_basic', 15)": {'add': [21]}, "('TestJSInterpreter', None, 14)": {'add': [53, 64, 106]}, "('TestJSInterpreter', 'test_call', 107)": {'add': [113], 'mod': [110]}, "('TestJSInterpreter', 'test_comma', 175)": {'add': [179]}, "('TestJSInterpreter', 'test_array_access', 54)": {'mod': [55]}, "('TestJSInterpreter', 'test_for_loop', 115)": {'mod': [118]}, "('TestJSInterpreter', 'test_for_loop_continue', 157)": {'mod': [159]}, "('TestJSInterpreter', 'test_for_loop_break', 163)": {'mod': [165]}, "('TestJSInterpreter', 'test_literal_list', 169)": {'mod': [171]}}}, {'path': 'test/test_utils.py', 'status': 'modified', 'Loc': {"('TestUtil', 'test_unified_timestamps', 349)": {'add': [372]}}}, {'path': 'test/test_youtube_signature.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [92]}, "('TestPlayerInfo', 'test_youtube_extract_player_info', 97)": {'add': [98]}}}, {'path': 'youtube_dl/compat.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [2997, 3033, 3053]}}}, {'path': 'youtube_dl/jsinterp.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [2, 3, 7, 8, 9, 16, 39], 'mod': [12, 15, 22, 28, 30, 31, 33, 34, 35, 37]}, "('JSInterpreter', None, 86)": {'add': [86, 131, 544], 'mod': [102, 125, 126, 127, 160, 161, 162, 163]}, "('JSInterpreter', 'eval_method', 372)": {'add': [383], 'mod': [373, 374, 375, 376, 377, 378, 379, 385, 386, 387, 388, 399, 403, 404, 450, 460, 461, 462, 463]}, "('JSInterpreter', 'interpret_expression', 160)": {'add': [468], 'mod': [167, 169, 171, 175, 176, 177, 179, 188, 189, 194, 196, 197, 199, 200, 201, 204, 207, 208, 209, 215, 216, 217, 223, 224, 226, 229, 231, 236, 237, 238, 248, 257, 264, 268, 269, 270, 273, 274, 282, 284, 285, 286, 287, 288, 289, 290, 291, 293, 294, 295, 296, 297, 298, 299, 300, 301, 302, 303, 304, 305, 306, 308, 309, 311, 316, 317, 318, 319, 320, 323, 327, 328, 329, 332, 334, 335, 337, 338, 341, 342, 343, 344, 345, 346, 347, 348, 349, 350, 351, 353, 354, 355, 356, 357, 358, 360, 466, 470, 472, 473, 474, 475, 476, 477, 479, 482, 484, 485]}, "('JSInterpreter', 'extract_object', 487)": {'add': [496], 'mod': [501, 502, 505]}, "('Nonlocal', None, 18)": {'mod': [18, 19]}, "('LocalNameSpace', None, 52)": {'mod': [52, 53, 54, 56, 57, 58, 59, 60, 74, 75, 76, 77, 79, 80]}, "('LocalNameSpace', '__setitem__', 62)": {'mod': [63, 66, 67, 68, 69]}, "('LocalNameSpace', '__repr__', 82)": {'mod': [83]}, "('JSInterpreter', '__init__', 87)": {'mod': [88, 89, 90, 91, 92, 93]}, "('JSInterpreter', '_named_object', 95)": {'mod': [97]}, "('JSInterpreter', '_separate', 102)": {'mod': [106, 108, 109, 110, 111, 112, 113, 115]}, "('JSInterpreter', '_separate_at_paren', 126)": {'mod': [129]}, "('JSInterpreter', 'interpret_statement', 132)": {'mod': [134, 136, 137, 139, 140, 141, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 157, 158]}, "('JSInterpreter', 'assertion', 367)": {'mod': [370]}, "('JSInterpreter', 'extract_function_code', 510)": {'mod': [513, 514, 516, 520, 521]}, "('JSInterpreter', 'extract_function_from_code', 526)": {'mod': [537]}, "('JSInterpreter', 'build_function', 545)": {'mod': [547, 549, 550, 551, 552, 553, 554, 555, 556, 557]}}}, {'path': 'youtube_dl/utils.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [1698, 1737, 1740, 1743, 1755, 1765]}, "(None, 'extract_timezone', 2967)": {'mod': [2969, 2970, 2972]}, "(None, 'unified_timestamp', 3036)": {'mod': [3040, 3066]}, "(None, 'int_or_none', 3672)": {'mod': [3676, 3677, 3678, 3682]}}}]} | [] | [] | [] | {
"iss_type": "2",
"iss_reason": "1",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"youtube_dl/jsinterp.py",
"youtube_dl/utils.py",
"youtube_dl/compat.py"
],
"doc": [],
"test": [
"test/test_utils.py",
"test/test_jsinterp.py",
"test/test_youtube_signature.py"
],
"config": [],
"asset": []
} | 1 |
ytdl-org | youtube-dl | 0f6422590e44e99e9b81cf2367666efe89fae3aa | https://github.com/ytdl-org/youtube-dl/issues/30166 | problem parsing site ceskatelevize.cz while trying to downloading video | ## Checklist
- [x ] I'm reporting a broken site support
- [ x] I've verified that I'm running youtube-dl version **2021.06.06**
- [ x] I've checked that all provided URLs are alive and playable in a browser
- [ x] I've checked that all URLs and arguments with special characters are properly quoted or escaped
- [ x] I've searched the bugtracker for similar issues including closed ones
## Verbose log
```
c:\YoutubeDL>youtube-dl.exe --no-check-certificate --no-mtime -F https://www.ceskatelevize.cz/ivysilani/19796-pumpari-od-zlate-podkovy/29238360846/ -v
[debug] System config: []
[debug] User config: []
[debug] Custom config: []
[debug] Command-line args: ['--no-check-certificate', '--no-mtime', '-F', 'https://www.ceskatelevize.cz/ivysilani/19796-pumpari-od-zlate-podkovy/29238360846/', '-v']
[debug] Encodings: locale cp1252, fs mbcs, out cp437, pref cp1252
[debug] youtube-dl version 2021.06.06
[debug] Python version 3.4.4 (CPython) - Windows-10-10.0.19041
[debug] exe versions: ffmpeg N-90920-ge07b1913fc, ffprobe N-90920-ge07b1913fc
[debug] Proxy map: {}
[CeskaTelevize] 29238360846: Downloading webpage
[CeskaTelevize] 29238360846: Downloading JSON metadata
Traceback (most recent call last):
File "__main__.py", line 19, in <module>
File "C:\Users\dst\AppData\Roaming\Build archive\youtube-dl\ytdl-org\tmpkqxnwl31\build\youtube_dl\__init__.py", line 475, in main
File "C:\Users\dst\AppData\Roaming\Build archive\youtube-dl\ytdl-org\tmpkqxnwl31\build\youtube_dl\__init__.py", line 465, in _real_main
File "C:\Users\dst\AppData\Roaming\Build archive\youtube-dl\ytdl-org\tmpkqxnwl31\build\youtube_dl\YoutubeDL.py", line 2069, in download
File "C:\Users\dst\AppData\Roaming\Build archive\youtube-dl\ytdl-org\tmpkqxnwl31\build\youtube_dl\YoutubeDL.py", line 808, in extract_info
File "C:\Users\dst\AppData\Roaming\Build archive\youtube-dl\ytdl-org\tmpkqxnwl31\build\youtube_dl\YoutubeDL.py", line 815, in wrapper
File "C:\Users\dst\AppData\Roaming\Build archive\youtube-dl\ytdl-org\tmpkqxnwl31\build\youtube_dl\YoutubeDL.py", line 836, in __extract_info
File "C:\Users\dst\AppData\Roaming\Build archive\youtube-dl\ytdl-org\tmpkqxnwl31\build\youtube_dl\extractor\common.py", line 534, in extract
File "C:\Users\dst\AppData\Roaming\Build archive\youtube-dl\ytdl-org\tmpkqxnwl31\build\youtube_dl\extractor\ceskatelevize.py", line 130, in _real_extract
File "C:\Users\dst\AppData\Roaming\Build archive\youtube-dl\ytdl-org\tmpkqxnwl31\build\youtube_dl\utils.py", line 2158, in sanitized_Request
File "C:\Python\Python34\lib\urllib\request.py", line 267, in __init__
File "C:\Python\Python34\lib\urllib\request.py", line 293, in full_url
File "C:\Python\Python34\lib\urllib\request.py", line 322, in _parse
ValueError: unknown url type: 'Error'
```
## Description
Download form ceskatelevize.cz is not working - parsing error
| null | https://github.com/ytdl-org/youtube-dl/pull/30713 | null | {'base_commit': '0f6422590e44e99e9b81cf2367666efe89fae3aa', 'files': [{'path': 'youtube_dl/extractor/ceskatelevize.py', 'status': 'modified', 'Loc': {"('CeskaTelevizeIE', None, 22)": {'add': [54, 60, 70], 'mod': [23, 25, 27, 29, 30, 32, 39, 41, 43, 44, 45, 46, 53, 65, 67]}, "('CeskaTelevizeIE', '_real_extract', 71)": {'add': [202], 'mod': [74, 78, 103, 111, 133, 134, 170, 184, 185]}, '(None, None, None)': {'mod': [15, 16]}, "('CeskaTelevizePoradyIE', None, 241)": {'mod': [241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 277, 278, 280, 282, 283, 284, 285, 286, 287, 289]}}}, {'path': 'youtube_dl/extractor/extractors.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [211, 212, 213, 214]}}}]} | [] | [] | [] | {
"iss_type": "1",
"iss_reason": "1",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"youtube_dl/extractor/extractors.py",
"youtube_dl/extractor/ceskatelevize.py"
],
"doc": [],
"test": [],
"config": [],
"asset": []
} | 1 | |
ytdl-org | youtube-dl | c6c0e23a32ffb9f2e5657aceaede7da1fb44e490 | https://github.com/ytdl-org/youtube-dl/issues/474 | Is there any way to determine the length of a video without downloading it? | I was looking at the output of --write-info-json but could not determine the parameter (if there is any) that says the length of a video.
| null | https://github.com/ytdl-org/youtube-dl/pull/486 | null | {'base_commit': 'c6c0e23a32ffb9f2e5657aceaede7da1fb44e490', 'files': [{'path': 'youtube_dl/InfoExtractors.py', 'status': 'modified', 'Loc': {"('YoutubeIE', '_real_extract', 289)": {'add': [416], 'mod': [483]}}}]} | [] | [] | [] | {
"iss_type": "3",
"iss_reason": "2",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"youtube_dl/InfoExtractors.py"
],
"doc": [],
"test": [],
"config": [],
"asset": []
} | 1 | |
localstack | localstack | 0bdfa27ab6cce6f82243470d1e48d283e01aa84c | https://github.com/localstack/localstack/issues/7109 | type: bug
aws:sns
status: confirmed | bug: InvalidParameterException when sending to SNS topic since version 1.2 | ### Is there an existing issue for this?
- [X] I have searched the existing issues
### Current Behavior
I'm using localstack in my current build. Except since version 1.2 I get the following exception (Java 17 & Spring Boot 2.7.1):
```
com.amazonaws.services.sns.model.InvalidParameterValueException: The message attribute 'timestamp' has an invalid message attribute type, the set of supported type prefixes is Binary, Number, and String. (Service: AmazonSNS; Status Code: 400; Error Code: ParameterValueInvalid; Request ID: E8OZ22XIRX11DTY2PWOGI5FB55U5J0S11VC8YJK6ES9UKCVL0DY1; Proxy: null)
at com.amazonaws.http.AmazonHttpClient$RequestExecutor.handleErrorResponse(AmazonHttpClient.java:1862) ~[aws-java-sdk-core-1.12.132.jar:na]
at com.amazonaws.http.AmazonHttpClient$RequestExecutor.handleServiceErrorResponse(AmazonHttpClient.java:1415) ~[aws-java-sdk-core-1.12.132.jar:na]
at com.amazonaws.http.AmazonHttpClient$RequestExecutor.executeOneRequest(AmazonHttpClient.java:1384) ~[aws-java-sdk-core-1.12.132.jar:na]
at com.amazonaws.http.AmazonHttpClient$RequestExecutor.executeHelper(AmazonHttpClient.java:1154) ~[aws-java-sdk-core-1.12.132.jar:na]
at com.amazonaws.http.AmazonHttpClient$RequestExecutor.doExecute(AmazonHttpClient.java:811) ~[aws-java-sdk-core-1.12.132.jar:na]
at com.amazonaws.http.AmazonHttpClient$RequestExecutor.executeWithTimer(AmazonHttpClient.java:779) ~[aws-java-sdk-core-1.12.132.jar:na]
at com.amazonaws.http.AmazonHttpClient$RequestExecutor.execute(AmazonHttpClient.java:753) ~[aws-java-sdk-core-1.12.132.jar:na]
at com.amazonaws.http.AmazonHttpClient$RequestExecutor.access$500(AmazonHttpClient.java:713) ~[aws-java-sdk-core-1.12.132.jar:na]
at com.amazonaws.http.AmazonHttpClient$RequestExecutionBuilderImpl.execute(AmazonHttpClient.java:695) ~[aws-java-sdk-core-1.12.132.jar:na]
at com.amazonaws.http.AmazonHttpClient.execute(AmazonHttpClient.java:559) ~[aws-java-sdk-core-1.12.132.jar:na]
at com.amazonaws.http.AmazonHttpClient.execute(AmazonHttpClient.java:539) ~[aws-java-sdk-core-1.12.132.jar:na]
at com.amazonaws.services.sns.AmazonSNSClient.doInvoke(AmazonSNSClient.java:3545) ~[aws-java-sdk-sns-1.12.132.jar:na]
at com.amazonaws.services.sns.AmazonSNSClient.invoke(AmazonSNSClient.java:3512) ~[aws-java-sdk-sns-1.12.132.jar:na]
at com.amazonaws.services.sns.AmazonSNSClient.invoke(AmazonSNSClient.java:3501) ~[aws-java-sdk-sns-1.12.132.jar:na]
at com.amazonaws.services.sns.AmazonSNSClient.executePublish(AmazonSNSClient.java:2475) ~[aws-java-sdk-sns-1.12.132.jar:na]
at com.amazonaws.services.sns.AmazonSNSClient.publish(AmazonSNSClient.java:2444) ~[aws-java-sdk-sns-1.12.132.jar:na]
at io.awspring.cloud.messaging.core.TopicMessageChannel.sendInternal(TopicMessageChannel.java:91) ~[spring-cloud-aws-messaging-2.4.0.jar:2.4.0]
at org.springframework.messaging.support.AbstractMessageChannel.send(AbstractMessageChannel.java:139) ~[spring-messaging-5.3.21.jar:5.3.21]
at org.springframework.messaging.support.AbstractMessageChannel.send(AbstractMessageChannel.java:125) ~[spring-messaging-5.3.21.jar:5.3.21]
at io.awspring.cloud.messaging.core.support.AbstractMessageChannelMessagingSendingTemplate.doSend(AbstractMessageChannelMessagingSendingTemplate.java:59) ~[spring-cloud-aws-messaging-2.4.0.jar:2.4.0]
at io.awspring.cloud.messaging.core.support.AbstractMessageChannelMessagingSendingTemplate.doSend(AbstractMessageChannelMessagingSendingTemplate.java:44) ~[spring-cloud-aws-messaging-2.4.0.jar:2.4.0]
at org.springframework.messaging.core.AbstractMessageSendingTemplate.send(AbstractMessageSendingTemplate.java:109) ~[spring-messaging-5.3.21.jar:5.3.21]
at org.springframework.messaging.core.AbstractMessageSendingTemplate.send(AbstractMessageSendingTemplate.java:99) ~[spring-messaging-5.3.21.jar:5.3.21]
at com.polovyi.ivan.tutorials.service.PurchaseTransactionService.processRequest(PurchaseTransactionService.java:36) ~[classes/:na]
at com.polovyi.ivan.tutorials.controller.PurchaseTransactionController.acceptPurchaseTransaction(PurchaseTransactionController.java:17) ~[classes/:na]
at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[na:na]
at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[na:na]
at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[na:na]
at java.base/java.lang.reflect.Method.invoke(Method.java:568) ~[na:na]
at org.springframework.web.method.support.InvocableHandlerMethod.doInvoke(InvocableHandlerMethod.java:205) ~[spring-web-5.3.21.jar:5.3.21]
at org.springframework.web.method.support.InvocableHandlerMethod.invokeForRequest(InvocableHandlerMethod.java:150) ~[spring-web-5.3.21.jar:5.3.21]
at org.springframework.web.servlet.mvc.method.annotation.ServletInvocableHandlerMethod.invokeAndHandle(ServletInvocableHandlerMethod.java:117) ~[spring-webmvc-5.3.21.jar:5.3.21]
at org.springframework.web.servlet.mvc.method.annotation.RequestMappingHandlerAdapter.invokeHandlerMethod(RequestMappingHandlerAdapter.java:895) ~[spring-webmvc-5.3.21.jar:5.3.21]
at org.springframework.web.servlet.mvc.method.annotation.RequestMappingHandlerAdapter.handleInternal(RequestMappingHandlerAdapter.java:808) ~[spring-webmvc-5.3.21.jar:5.3.21]
at org.springframework.web.servlet.mvc.method.AbstractHandlerMethodAdapter.handle(AbstractHandlerMethodAdapter.java:87) ~[spring-webmvc-5.3.21.jar:5.3.21]
at org.springframework.web.servlet.DispatcherServlet.doDispatch(DispatcherServlet.java:1067) ~[spring-webmvc-5.3.21.jar:5.3.21]
at org.springframework.web.servlet.DispatcherServlet.doService(DispatcherServlet.java:963) ~[spring-webmvc-5.3.21.jar:5.3.21]
at org.springframework.web.servlet.FrameworkServlet.processRequest(FrameworkServlet.java:1006) ~[spring-webmvc-5.3.21.jar:5.3.21]
at org.springframework.web.servlet.FrameworkServlet.doPost(FrameworkServlet.java:909) ~[spring-webmvc-5.3.21.jar:5.3.21]
at javax.servlet.http.HttpServlet.service(HttpServlet.java:681) ~[tomcat-embed-core-9.0.64.jar:4.0.FR]
at org.springframework.web.servlet.FrameworkServlet.service(FrameworkServlet.java:883) ~[spring-webmvc-5.3.21.jar:5.3.21]
at javax.servlet.http.HttpServlet.service(HttpServlet.java:764) ~[tomcat-embed-core-9.0.64.jar:4.0.FR]
at org.apache.catalina.core.ApplicationFilterChain.internalDoFilter(ApplicationFilterChain.java:227) ~[tomcat-embed-core-9.0.64.jar:9.0.64]
at org.apache.catalina.core.ApplicationFilterChain.doFilter(ApplicationFilterChain.java:162) ~[tomcat-embed-core-9.0.64.jar:9.0.64]
at org.apache.tomcat.websocket.server.WsFilter.doFilter(WsFilter.java:53) ~[tomcat-embed-websocket-9.0.64.jar:9.0.64]
at org.apache.catalina.core.ApplicationFilterChain.internalDoFilter(ApplicationFilterChain.java:189) ~[tomcat-embed-core-9.0.64.jar:9.0.64]
at org.apache.catalina.core.ApplicationFilterChain.doFilter(ApplicationFilterChain.java:162) ~[tomcat-embed-core-9.0.64.jar:9.0.64]
at org.springframework.web.filter.RequestContextFilter.doFilterInternal(RequestContextFilter.java:100) ~[spring-web-5.3.21.jar:5.3.21]
at org.springframework.web.filter.OncePerRequestFilter.doFilter(OncePerRequestFilter.java:117) ~[spring-web-5.3.21.jar:5.3.21]
at org.apache.catalina.core.ApplicationFilterChain.internalDoFilter(ApplicationFilterChain.java:189) ~[tomcat-embed-core-9.0.64.jar:9.0.64]
at org.apache.catalina.core.ApplicationFilterChain.doFilter(ApplicationFilterChain.java:162) ~[tomcat-embed-core-9.0.64.jar:9.0.64]
at org.springframework.web.filter.FormContentFilter.doFilterInternal(FormContentFilter.java:93) ~[spring-web-5.3.21.jar:5.3.21]
at org.springframework.web.filter.OncePerRequestFilter.doFilter(OncePerRequestFilter.java:117) ~[spring-web-5.3.21.jar:5.3.21]
at org.apache.catalina.core.ApplicationFilterChain.internalDoFilter(ApplicationFilterChain.java:189) ~[tomcat-embed-core-9.0.64.jar:9.0.64]
at org.apache.catalina.core.ApplicationFilterChain.doFilter(ApplicationFilterChain.java:162) ~[tomcat-embed-core-9.0.64.jar:9.0.64]
at org.springframework.web.filter.CharacterEncodingFilter.doFilterInternal(CharacterEncodingFilter.java:201) ~[spring-web-5.3.21.jar:5.3.21]
at org.springframework.web.filter.OncePerRequestFilter.doFilter(OncePerRequestFilter.java:117) ~[spring-web-5.3.21.jar:5.3.21]
at org.apache.catalina.core.ApplicationFilterChain.internalDoFilter(ApplicationFilterChain.java:189) ~[tomcat-embed-core-9.0.64.jar:9.0.64]
at org.apache.catalina.core.ApplicationFilterChain.doFilter(ApplicationFilterChain.java:162) ~[tomcat-embed-core-9.0.64.jar:9.0.64]
at org.apache.catalina.core.StandardWrapperValve.invoke(StandardWrapperValve.java:197) ~[tomcat-embed-core-9.0.64.jar:9.0.64]
at org.apache.catalina.core.StandardContextValve.invoke(StandardContextValve.java:97) ~[tomcat-embed-core-9.0.64.jar:9.0.64]
at org.apache.catalina.authenticator.AuthenticatorBase.invoke(AuthenticatorBase.java:541) ~[tomcat-embed-core-9.0.64.jar:9.0.64]
at org.apache.catalina.core.StandardHostValve.invoke(StandardHostValve.java:135) ~[tomcat-embed-core-9.0.64.jar:9.0.64]
at org.apache.catalina.valves.ErrorReportValve.invoke(ErrorReportValve.java:92) ~[tomcat-embed-core-9.0.64.jar:9.0.64]
at org.apache.catalina.core.StandardEngineValve.invoke(StandardEngineValve.java:78) ~[tomcat-embed-core-9.0.64.jar:9.0.64]
at org.apache.catalina.connector.CoyoteAdapter.service(CoyoteAdapter.java:360) ~[tomcat-embed-core-9.0.64.jar:9.0.64]
at org.apache.coyote.http11.Http11Processor.service(Http11Processor.java:399) ~[tomcat-embed-core-9.0.64.jar:9.0.64]
at org.apache.coyote.AbstractProcessorLight.process(AbstractProcessorLight.java:65) ~[tomcat-embed-core-9.0.64.jar:9.0.64]
at org.apache.coyote.AbstractProtocol$ConnectionHandler.process(AbstractProtocol.java:890) ~[tomcat-embed-core-9.0.64.jar:9.0.64]
at org.apache.tomcat.util.net.NioEndpoint$SocketProcessor.doRun(NioEndpoint.java:1787) ~[tomcat-embed-core-9.0.64.jar:9.0.64]
at org.apache.tomcat.util.net.SocketProcessorBase.run(SocketProcessorBase.java:49) ~[tomcat-embed-core-9.0.64.jar:9.0.64]
at org.apache.tomcat.util.threads.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1191) ~[tomcat-embed-core-9.0.64.jar:9.0.64]
at org.apache.tomcat.util.threads.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:659) ~[tomcat-embed-core-9.0.64.jar:9.0.64]
at org.apache.tomcat.util.threads.TaskThread$WrappingRunnable.run(TaskThread.java:61) ~[tomcat-embed-core-9.0.64.jar:9.0.64]
at java.base/java.lang.Thread.run(Thread.java:833) ~[na:na]
```
When using localstack 1.1 (and earlier versions) and leaving everything else the same I don't get the exception.
The message header 'timestamp' is set by Spring messaging under the hood and is immutable, so there's no way to change that without using reflection or something else ugly. What I could do is use the aws-sdk directly.
However, I just wanted to mention the change in behaviour of localstack v1.2
### Expected Behavior
I'd expect to get a 202/Accepted when the application is sending a message to the SNS topic.
### How are you starting LocalStack?
With a docker-compose file
### Steps To Reproduce
You can use the code from this project: https://github.com/polovyivan/spring-cloud-sns-topic-publisher
And only update the localstack version to 1.2
```
cd src/main/resources/docker-compose
docker-compose up
mvn clean spring-boot:run
```
Then send an empty http POST to http://localhost:8080/spring-cloud-sns-topic-publisher/purchase-transactions
### Environment
```markdown
- OS: macOS Montery 12.6
- LocalStack: 1.2
- Java: 17
- Spring boot: 2.7.1
- Maven: 3.8.1
- Docker: 20.10.17, build 100c701
```
### Anything else?
_No response_ | null | https://github.com/localstack/localstack/pull/7181 | null | {'base_commit': '0bdfa27ab6cce6f82243470d1e48d283e01aa84c', 'files': [{'path': 'localstack/services/sns/provider.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [135]}, "(None, 'validate_message_attributes', 1362)": {'mod': [1379]}, "(None, 'validate_message_attribute_name', 1395)": {'mod': [1402]}}}, {'path': 'tests/integration/test_sns.py', 'status': 'modified', 'Loc': {"('TestSNSProvider', 'test_publish_to_platform_endpoint_is_dispatched', 2539)": {'add': [2591]}}}, {'path': 'tests/integration/test_sns.snapshot.json', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [2170]}}}]} | [] | [] | [] | {
"iss_type": "1",
"iss_reason": "1",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"localstack/services/sns/provider.py",
"tests/integration/test_sns.snapshot.json"
],
"doc": [],
"test": [
"tests/integration/test_sns.py"
],
"config": [],
"asset": []
} | 1 |
localstack | localstack | 91859102289e257e360682887e871c6a4bfbd75d | https://github.com/localstack/localstack/issues/457 | status: triage needed | CloudWatch listener returns Internal Server Error | Attempting to access the CloudWatch service at port 4582 returns `HTTP/1.0 500 INTERNAL SERVER ERROR`
**Steps to reproduce**
```
$ localstack start
$ curl http://127.0.0.1:4582
```
this returns
```
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">
<title>500 Internal Server Error</title>
<h1>Internal Server Error</h1>
<p>The server encountered an internal error and was unable to complete your request. Either the server is overloaded or there is an error in the application.</p>
```
| null | https://github.com/localstack/localstack/pull/527 | null | {'base_commit': '91859102289e257e360682887e871c6a4bfbd75d', 'files': [{'path': 'README.md', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [261]}}}, {'path': 'localstack/constants.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [33]}}}, {'path': 'localstack/ext/java/pom.xml', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [8]}}}, {'path': 'localstack/services/install.py', 'status': 'modified', 'Loc': {"(None, 'install_lambda_java_libs', 102)": {'add': [104]}}}, {'path': 'requirements.txt', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [20]}}}]} | [] | [] | [] | {
"iss_type": "1",
"iss_reason": "1",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"localstack/constants.py",
"localstack/services/install.py"
],
"doc": [
"README.md"
],
"test": [],
"config": [
"requirements.txt"
],
"asset": [
"localstack/ext/java/pom.xml"
]
} | 1 |
localstack | localstack | 60d2c3dc68d9fae0f1e0acb7d0c705df408bd8c5 | https://github.com/localstack/localstack/issues/5030 | type: bug
priority: high
aws:cloudformation
status: resolved/fixed
aws:stepfunctions | bug: State machines in non-default regions can't be deleted and fail to create proper ARN | ### Is there an existing issue for this?
- [X] I have searched the existing issues
### Current Behavior
- Sometimes wrong ARNs are created for the child state machine (e.g. `arn:aws:states:us-east-1:000000000000:stateMachine:us-east-1_us-east-1_mystatemachine` ... the double region shouldn't be there).
- Deleting a CloudFormation stack with a nested statemachine will fail to properly delete the child state machine when deleting the stack.
### Expected Behavior
State machines should work the same in all regions due to the transparent ARN patching.
Parity with AWS should be established for nested state machines.
### How are you starting LocalStack?
Custom (please describe below)
### Steps To Reproduce
will be provided via integration test
### Environment
```markdown
- OS: Ubuntu 20.04 LTS
- LocalStack: latest
```
### Anything else?
Might be regressions from the move of stepfunctions functionality to Community. | null | https://github.com/localstack/localstack/pull/5183 | null | {'base_commit': '60d2c3dc68d9fae0f1e0acb7d0c705df408bd8c5', 'files': [{'path': 'localstack/services/install.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [36, 77, 344], 'mod': [82]}, "(None, 'install_stepfunctions_local', 315)": {'mod': [340, 341]}}}, {'path': 'localstack/services/stepfunctions/stepfunctions_listener.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [3, 5, 9, 10, 16]}, "('ProxyListenerStepFunctions', 'forward_request', 20)": {'mod': [23, 24, 25, 26, 27, 29, 30, 31, 32]}, "('ProxyListenerStepFunctions', 'return_response', 34)": {'mod': [51, 52, 53, 54, 56, 57, 58, 60, 61, 62, 63, 64, 65, 66, 67, 69, 70]}}}, {'path': 'localstack/services/stepfunctions/stepfunctions_starter.py', 'status': 'modified', 'Loc': {"(None, 'get_command', 20)": {'mod': [22, 23, 28, 29]}}}, {'path': 'tests/integration/test_stepfunctions.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [11, 479], 'mod': [481, 482, 483, 484, 485, 486]}, "(None, 'test_multiregion', 482)": {'add': [487, 494], 'mod': [489, 492, 496, 497, 498, 499, 501, 502, 503, 504, 506, 507, 508, 509, 511, 512]}}}]} | [] | [] | [] | {
"iss_type": "2",
"iss_reason": "1",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"localstack/services/stepfunctions/stepfunctions_listener.py",
"localstack/services/stepfunctions/stepfunctions_starter.py",
"localstack/services/install.py"
],
"doc": [],
"test": [
"tests/integration/test_stepfunctions.py"
],
"config": [],
"asset": []
} | 1 |
localstack | localstack | b302f2939d4f39432ccd565ab44d040dc1be4eea | https://github.com/localstack/localstack/issues/7494 | type: bug
aws:kms
status: confirmed | bug: KMS Alias Creation Fails to Return Error | ### Is there an existing issue for this?
- [X] I have searched the existing issues
### Current Behavior
This looks similar to https://github.com/localstack/localstack/issues/6471
I am trying to sign something using KMS for some tests. It seems like doing so using an alias does not work. For example I create a key and an alias like so:
```
# Add a key used for signing urls
aws-cli --endpoint-url=http://localhost:4566 kms create-key \
--key-usage SIGN_VERIFY \
--key-spec RSA_4096
# Add well known alias for key
aws-cli --endpoint-url=http://localhost:4566 kms create-alias \
--alias-name "some-nice-alias-name" \
--target-key-id <key id generated above>
```
I can see that this looks to have worked by verifying the key and alias on the CLI
```
aws-cli --endpoint-url=http://localhost:4566 kms list-keys
{
"Keys": [
{
"KeyId": "f7d2d869-f6b8-4977-96ea-5bd70cb0d5f2",
"KeyArn": "arn:aws:kms:us-east-1:000000000000:key/<someuuid>"
}
]
}
```
and
```
aws-cli --endpoint-url=http://localhost:4566 kms list-aliases
{
"Aliases": [
{
"AliasName": "census-webform-url-signing-key",
"AliasArn": "arn:aws:kms:us-east-1:000000000000:alias/some-nice-alias-name",
"TargetKeyId": "<sameuuid>",
"CreationDate": "2023-01-13T16:58:52.279782-05:00"
}
]
}
```
however attempting to sign something does not work
```
# Make sure we can sign
aws-cli --endpoint-url=http://localhost:4566 kms sign \
--cli-binary-format raw-in-base64-out \
--key-id "alias/some-nice-alias-name" \
--message 'wwwtestcom' \
--message-type RAW \
--signing-algorithm "RSASSA_PSS_SHA_512"
```
results in
An error occurred (NotFoundException) when calling the Sign operation: Unable to find KMS alias with name alias/some-nice-alias-name
### Expected Behavior
Would expect output from the last command not the resulting error.
### How are you starting LocalStack?
With a docker-compose file
### Steps To Reproduce
#### How are you starting localstack (e.g., `bin/localstack` command, arguments, or `docker-compose.yml`)
docker run localstack/localstack
#### Client commands (e.g., AWS SDK code snippet, or sequence of "awslocal" commands)
```
aws-cli --endpoint-url=http://localhost:4566 kms create-key \
--key-usage SIGN_VERIFY \
--key-spec RSA_4096
aws-cli --endpoint-url=http://localhost:4566 kms create-alias \
--alias-name "some-nice-alias-name" \
--target-key-id <key id generated above>
aws-cli --endpoint-url=http://localhost:4566 kms list-keys
{
"Keys": [
{
"KeyId": "f7d2d869-f6b8-4977-96ea-5bd70cb0d5f2",
"KeyArn": "arn:aws:kms:us-east-1:000000000000:key/<someuuid>"
}
]
}
aws-cli --endpoint-url=http://localhost:4566 kms list-aliases
{
"Aliases": [
{
"AliasName": "census-webform-url-signing-key",
"AliasArn": "arn:aws:kms:us-east-1:000000000000:alias/some-nice-alias-name",
"TargetKeyId": "<sameuuid>",
"CreationDate": "2023-01-13T16:58:52.279782-05:00"
}
]
}
aws-cli --endpoint-url=http://localhost:4566 kms sign \
--cli-binary-format raw-in-base64-out \
--key-id "alias/some-nice-alias-name" \
--message 'wwwtestcom' \
--message-type RAW \
--signing-algorithm "RSASSA_PSS_SHA_512"
```
### Environment
```markdown
- OS: Macos 12.6.2
- LocalStack: latest docker image
```
### Anything else?
I did test using the same using the actual generated key ID and this works. I also attempted this through a BOTO3 client in python and the same resulted. | null | https://github.com/localstack/localstack/pull/7826 | null | {'base_commit': 'b302f2939d4f39432ccd565ab44d040dc1be4eea', 'files': [{'path': 'localstack/services/kms/provider.py', 'status': 'modified', 'Loc': {"('KmsProvider', 'create_alias', 712)": {'add': [714]}}}, {'path': 'tests/integration/test_kms.py', 'status': 'modified', 'Loc': {"('TestKMS', None, 56)": {'add': [60]}}}, {'path': 'tests/integration/test_kms.snapshot.json', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [391]}}}]} | [] | [] | [] | {
"iss_type": "1",
"iss_reason": "1",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"tests/integration/test_kms.snapshot.json",
"localstack/services/kms/provider.py"
],
"doc": [],
"test": [
"tests/integration/test_kms.py"
],
"config": [],
"asset": []
} | 1 |
localstack | localstack | fa1a64c954b89b88ac30e77fd12930efc04c04c5 | https://github.com/localstack/localstack/issues/748 | area: documentation
status: triage needed | Contributors not loading up. Broken links for backers and Contributors. | <!-- Love localstack? Please consider supporting our collective:
👉 https://opencollective.com/localstack/donate -->
Links like this https://opencollective.com/localstack/sponsor/X/website won't exist.
Just symlinks to https://opencollective.com/localstack#contributors. | null | https://github.com/localstack/localstack/pull/856 | null | {'base_commit': 'fa1a64c954b89b88ac30e77fd12930efc04c04c5', 'files': [{'path': 'README.md', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [454]}}}]} | [] | [] | [] | {
"iss_type": "2",
"iss_reason": "1",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [],
"doc": [
"README.md"
],
"test": [],
"config": [],
"asset": []
} | 1 |
localstack | localstack | 5b6eee89f41af000b2da5ff43e3292529ff4c56f | https://github.com/localstack/localstack/issues/1808 | type: question
area: configuration
good first issue | SNS: unable to ConfirmSubscription: Topic not found | Hi all,
Thanks for your effort on localstack! I'm trying to locally test SNS (HTTP) w/ cloudwatch triggers, but am unable to get past confirming the subscription.
My application receives the following POST body when creating a subscription:
```
{"MessageId": "5cb062ad-0d4e-41e6-9a80-7053926b20b4", "Type": "SubscriptionConfirmation", "Timestamp": "2019-11-27T04:29:21.166530Z", "Message": "You have chosen to subscribe to the topic arn:aws:sns:us-e
ast-1:000000000000:lambda-xyz-errors.\nTo confirm the subscription, visit the SubscribeURL included in this message.", "TopicArn": "arn:aws:sns:us-east-1:000000000000:lambda-xyz-errors", "Token": "3f97b1$
2", "SubscribeURL": "http://b40035e82fc6:4575/?Action=ConfirmSubscription&TopicArn=arn:aws:sns:us-east-1:000000000000:lambda-xyz-errors&Token=3f97b192"}
```
If I curl the SubscribeURL, I get a `topic does not exist` error:
```
[I] ➜ curl -XGET -H 'Authorization: AWS4-HMAC-SHA256 Credential=AKIAIOSFODNN7EXAMPLE/20130524/us-east-1/sns/aws4_request,SignedHeaders=host;range;x-amz-date,Signature=fe5f80f77d5fa3beca038a248ff027d044534
2fe2855ddc963176630326f1024' http://localhost:4575/\?Action\=ConfirmSubscription\&TopicArn\=arn:aws:sns:us-east-1:000000000000:lambda-xyz-errors\&Token\=75f32aec
<ErrorResponse xmlns="http://sns.amazonaws.com/doc/2010-03-31/">
<Error>
<Type>Sender</Type>
<Code>NotFound</Code>
<Message>Topic does not exist</Message>
</Error>
<RequestId>9dd01905-5012-5f99-8663-4b3ecd0dfaef</RequestId>
</ErrorResponse>%
```
If I run list-topics against the container, I can see it exists:
```bash
docker-compose exec localstack awslocal sns list-topics
{
"Topics": [
{
"TopicArn": "arn:aws:sns:us-east-1:000000000000:lambda-xyz-errors"
}
]
}
```
The topic and subscription were created with:
```bash
awslocal sns create-topic --name lambda-xyz-errors
awslocal sns subscribe --topic-arn arn:aws:sns:us-east-1:000000000000:lambda-xyz-errors --protocol http --notification-endpoint "http://localhost:3000/"
``` | null | https://github.com/localstack/localstack/pull/2043 | null | {'base_commit': '5b6eee89f41af000b2da5ff43e3292529ff4c56f', 'files': [{'path': 'localstack/services/sns/sns_listener.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [21, 251]}, "('ProxyListenerSNS', 'forward_request', 31)": {'add': [73]}, "(None, 'do_subscribe', 252)": {'add': [269]}}}, {'path': 'tests/integration/test_sns.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [12]}, "('SNSTest', None, 22)": {'add': [206]}}}]} | [] | [] | [] | {
"iss_type": "1",
"iss_reason": "2",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"localstack/services/sns/sns_listener.py"
],
"doc": [],
"test": [
"tests/integration/test_sns.py"
],
"config": [],
"asset": []
} | 1 |
localstack | localstack | e65705a6ebf93ed7fbb05b690ebeb2c9c4aa88ae | https://github.com/localstack/localstack/issues/1225 | Presigned S3 url doesnt notify sqs | <!-- Love localstack? Please consider supporting our collective:
👉 https://opencollective.com/localstack/donate -->
I have configured s3 bucket with event configuration to sqs for every object creation. When I try out aws cli command I get the notification correctly.
When I try using presigned url with curl/postman command, I dont get the sqs notification. **Is this a known issue and are there any work arounds?** | null | https://github.com/localstack/localstack/pull/1639 | null | {'base_commit': 'e65705a6ebf93ed7fbb05b690ebeb2c9c4aa88ae', 'files': [{'path': 'localstack/services/generic_proxy.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [32]}}}, {'path': 'localstack/services/s3/s3_listener.py', 'status': 'modified', 'Loc': {"('ProxyListenerS3', 'is_query_allowable', 705)": {'mod': [709, 710]}}}, {'path': 'tests/integration/test_s3.py', 'status': 'modified', 'Loc': {"('S3ListenerTest', None, 30)": {'add': [496]}, "('S3ListenerTest', '_perform_multipart_upload', 503)": {'add': [523]}, "('S3ListenerTest', 'test_s3_put_object_notification', 62)": {'mod': [66, 67, 68, 70, 71, 74, 75, 76, 77, 90, 91, 92, 93, 94]}, "('S3ListenerTest', 'test_s3_upload_fileobj_with_large_file_notification', 117)": {'mod': [118, 119, 120, 122, 123, 124, 125, 126, 127, 136, 137, 138, 139, 140]}, "('S3ListenerTest', 'test_s3_multipart_upload_with_small_single_part', 161)": {'mod': [167, 168, 169, 171, 172, 173, 174, 175, 180, 181, 182, 183, 184]}}}]} | [] | [] | [] | {
"iss_type": "2",
"iss_reason": "2",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"localstack/services/generic_proxy.py",
"localstack/services/s3/s3_listener.py"
],
"doc": [],
"test": [
"tests/integration/test_s3.py"
],
"config": [],
"asset": []
} | 1 | |
localstack | localstack | 3cc0541a260c2f2af90e435f333c623e84ed4880 | https://github.com/localstack/localstack/issues/4137 | type: feature | Consider Kinesis-Mock over Kinesalite | # Type of request: This is a ...
- [ ] bug report
- [X] feature request
# Detailed description
Kinesalite has been a great mock for a long time. However, it is missing several API calls (e.g. UpdateShards), and seems to be on life-support as of late (last commit being Oct. 2020).
[Kinesis-Mock](https://github.com/etspaceman/kinesis-mock) is a new mock which supports all API calls except SubscribeToShard (due to lack of support for the required Http2 Features in the Scala ecosystem). It is distributed as a docker image, but there is also a jar executable that can be used.
I am the creator of Kinesis-Mock, so I can work with Localstack on any changes that would be needed to make this pairing work, if desired.
| null | https://github.com/localstack/localstack/pull/4152 | null | {'base_commit': '3cc0541a260c2f2af90e435f333c623e84ed4880', 'files': [{'path': 'README.md', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [103, 189, 719], 'mod': [193]}}}, {'path': 'localstack/config.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [46]}}}, {'path': 'localstack/services/kinesis/kinesis_listener.py', 'status': 'modified', 'Loc': {"('ProxyListenerKinesis', 'forward_request', 37)": {'mod': [40, 62, 73, 81, 109, 117]}, "('ProxyListenerKinesis', 'return_response', 131)": {'mod': [173]}}}, {'path': 'localstack/services/kinesis/kinesis_starter.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [0, 1, 2, 11], 'mod': [5, 7]}, "(None, 'start_kinesis', 23)": {'add': [23], 'mod': [26]}, "(None, 'appy_patches', 13)": {'mod': [13]}}}, {'path': 'tests/integration/test_cloudformation.py', 'status': 'modified', 'Loc': {"('CloudFormationTest', 'test_create_delete_stack', 618)": {'mod': [694]}}}, {'path': 'tests/integration/test_dynamodb.py', 'status': 'modified', 'Loc': {"('TestDynamoDB', 'test_dynamodb_stream_stream_view_type', 368)": {'mod': [387]}}}, {'path': 'tests/integration/test_kinesis.py', 'status': 'modified', 'Loc': {"('TestKinesis', 'test_stream_consumers', 14)": {'add': [25, 30, 59], 'mod': [54, 55, 56]}, "('TestKinesis', 'test_subscribe_to_shard', 65)": {'add': [73]}, "('TestKinesis', 'test_subscribe_to_shard_with_sequence_number_as_iterator', 109)": {'add': [117]}}}, {'path': 'tests/unit/test_kinesis.py', 'status': 'modified', 'Loc': {"('KinesisListenerTest', 'test_describe_stream_summary_is_redirected', 13)": {'mod': [14, 16, 18]}, "('KinesisListenerTest', 'test_overwrite_update_shard_count_on_error', 46)": {'mod': [47, 48, 49, 50, 52, 54, 55, 56, 57, 58]}}}]} | [] | [] | [] | {
"iss_type": "4",
"iss_reason": "2",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"localstack/config.py",
"localstack/services/kinesis/kinesis_starter.py",
"localstack/services/kinesis/kinesis_listener.py"
],
"doc": [
"README.md"
],
"test": [
"tests/integration/test_kinesis.py",
"tests/unit/test_kinesis.py",
"tests/integration/test_dynamodb.py",
"tests/integration/test_cloudformation.py"
],
"config": [],
"asset": []
} | 1 |
localstack | localstack | 31286eb81823ee97e4e4a6b519abab9efcffe091 | https://github.com/localstack/localstack/issues/6155 | type: bug
aws:kinesis
aws:dynamodbstreams
area: integration/sam | samlocal not returning on second and later deploys | ### Is there an existing issue for this?
- [X] I have searched the existing issues
### Current Behavior
When executing samlocal deploy twice without updating the template it gets stuck after the deploy step (before the step of the output is shown)
Also i left it running for long time checking if it could finish and around 2 hours laters a kinesis stacktrace appeared on the logs on the docker container
```
localstack_main | 2022-05-26T09:14:21.135:INFO:localstack.services.kinesis.kinesis_mock_server: [io-compute-1] WARN 2022-05-26 09:14:21,132 k.m.cache.Cache x-amzn-RequestId=3a527f8b-dcd4-11ec-ac09-ad4f3aa847a9, action=GetRecords, contextId=3a527f8a-dcd4-11ec-ac09-ad4f3aa847a9, x-amz-id-2=WLxUzW0heAIF5/pAXeHZEM0qejb1MRVum6fgYxuWPz146y+KGwkjmNwjv9IWngSM8RaihKhqcdibbhN4kruU+3p8/FlKZnTp, contentType=application/x-amz-json-1.1 - Getting records was unuccessful
localstack_main | 2022-05-26T09:14:21.135:INFO:localstack.services.kinesis.kinesis_mock_server: kinesis.mock.ExpiredIteratorException: The shard iterator has expired. Shard iterators are only valid for 300 seconds
localstack_main | 2022-05-26T09:14:21.135:INFO:localstack.services.kinesis.kinesis_mock_server: at kinesis.mock.models.ShardIterator.parse(ShardIterator.scala:58)
localstack_main | 2022-05-26T09:14:21.136:INFO:localstack.services.kinesis.kinesis_mock_server: at kinesis.mock.api.GetRecordsRequest.$anonfun$getRecords$1(GetRecordsRequest.scala:24)
localstack_main | 2022-05-26T09:14:21.136:INFO:localstack.services.kinesis.kinesis_mock_server: at cats.effect.IOFiber.runLoop(IOFiber.scala:358)
localstack_main | 2022-05-26T09:14:21.136:INFO:localstack.services.kinesis.kinesis_mock_server: at cats.effect.IOFiber.asyncContinueSuccessfulR(IOFiber.scala:1338)
localstack_main | 2022-05-26T09:14:21.136:INFO:localstack.services.kinesis.kinesis_mock_server: at cats.effect.IOFiber.run(IOFiber.scala:140)
localstack_main | 2022-05-26T09:14:21.136:INFO:localstack.services.kinesis.kinesis_mock_server: at cats.effect.unsafe.WorkerThread.run(WorkerThread.scala:549)
localstack_main | 2022-05-26T09:14:21.136:INFO:localstack.services.kinesis.kinesis_mock_server: at com.oracle.svm.core.thread.JavaThreads.threadStartRoutine(JavaThreads.java:519)
localstack_main | 2022-05-26T09:14:21.136:INFO:localstack.services.kinesis.kinesis_mock_server: at com.oracle.svm.core.posix.thread.PosixJavaThreads.pthreadStartRoutine(PosixJavaThreads.java:192)
localstack_main | 2022-05-26T09:14:21.140:DEBUG:localstack.services.dynamodbstreams.provider: Shard iterator for underlying kinesis stream expired
localstack_main | 2022-05-26T09:14:21.146:INFO:localstack.utils.threads: Thread run method <bound method StreamEventSourceListener._listen_to_shard_and_invoke_lambda of <localstack.services.awslambda.event_source_listeners.dynamodb_event_source_listener.DynamoDBEventSourceListener object at 0x7fa9b98dbb50>>({'function_arn': 'arn:aws:lambda:us-west-2:000000000000:function:sandbox-events-generator-worker', 'stream_arn': 'arn:aws:dynamodb:us-west-2:000000000000:table/sandbox-events-generator-jobs/stream/2022-05-26T07:20:41.621', 'batch_size': 1, 'parallelization_factor': 1, 'lock_discriminator': '60cf3f5b-11af-4b81-840b-c62920e5f0cb/arn:aws:dynamodb:us-west-2:000000000000:table/sandbox-events-generator-jobs/stream/2022-05-26T07:20:41.621/shardId-00000001653500000000-000000000000', 'shard_id': 'shardId-00000001653500000000-000000000000', 'stream_client': <botocore.client.DynamoDBStreams object at 0x7fa9bc321810>, 'shard_iterator': 'AAAAAAAAAAEqli29q/ZrvGK0Qv58Ys0UOaNNnguVf1262Mr190addTsT21HR/XdUWnOyHg1FUUW4R774Gy1X2lmyJQMqkTKuh5nVySaVOmGBrjNRHabrLqpzejZqpTYba8lThyNRgs95fCdid2O4GmMSpaBEXElMSDpWQ/LU/Hb5NG3P0pInAfuajJsFpH8TjqTbNHNf3EBxC0OYM1EfSBu183HSLUkECOBmWfp87OOWPH+WiWiWzQ==', 'failure_destination': None, 'max_num_retries': inf}) failed: An error occurred (ExpiredIteratorException) when calling the GetRecords operation: Shard iterator has expired Traceback (most recent call last):
localstack_main | File "/opt/code/localstack/localstack/utils/threads.py", line 39, in run
localstack_main | result = self.func(self.params, **kwargs)
localstack_main | File "/opt/code/localstack/localstack/services/awslambda/event_source_listeners/stream_event_source_listener.py", line 182, in _listen_to_shard_and_invoke_lambda
localstack_main | records_response = stream_client.get_records(
localstack_main | File "/opt/code/localstack/.venv/lib/python3.10/site-packages/botocore/client.py", line 508, in _api_call
localstack_main | return self._make_api_call(operation_name, kwargs)
localstack_main | File "/opt/code/localstack/.venv/lib/python3.10/site-packages/botocore/client.py", line 911, in _make_api_call
localstack_main | raise error_class(parsed_response, operation_name)
localstack_main | botocore.errorfactory.ExpiredIteratorException: An error occurred (ExpiredIteratorException) when calling the GetRecords operation: Shard iterator has expired
```
### Expected Behavior
the command finishes properly like in the first run
### How are you starting LocalStack?
With a docker-compose file
### Steps To Reproduce
#### How are you starting localstack (e.g., `bin/localstack` command, arguments, or `docker-compose.yml`)
docker-compose up -d
docker-compose.yml:
```
version: "3.8"
services:
localstack:
container_name: "${LOCALSTACK_DOCKER_NAME-localstack_main}"
image: localstack/localstack:latest
network_mode: bridge
ports:
- "127.0.0.1:53:53" # only required for Pro (DNS)
- "127.0.0.1:53:53/udp" # only required for Pro (DNS)
- "127.0.0.1:443:443" # only required for Pro (LocalStack HTTPS Edge Proxy)
- "127.0.0.1:4510-4559:4510-4559" # external service port range
- "127.0.0.1:4566:4566" # LocalStack Edge Proxy
environment:
- SERVICES=dynamodb,cloudformation,lambda,s3,sts,apigateway,iam
- DEBUG=1
- HOST_TMP_FOLDER=${TMPDIR:-/tmp/}localstack
- DOCKER_HOST=unix:///var/run/docker.sock
volumes:
- "${TMPDIR:-/tmp}/localstack:/tmp/localstack"
- "/var/run/docker.sock:/var/run/docker.sock"
networks:
net1:
ipv4_address: 10.10.100.3
networks:
net1:
driver: bridge
enable_ipv6: false
ipam:
config:
- subnet: 10.10.100.0/24
gateway: 10.10.100.32
```
template.yaml
```
AWSTemplateFormatVersion: '2010-09-09'
Transform: AWS::Serverless-2016-10-31
Description: >
test-sam
SAM Template for test
Parameters:
Environment:
Type: String
Default: sandbox
Project:
Type: String
Default: project
Component:
Type: String
Default: component
DynamoDBUrl:
Type: String
Default: LOCALSTACK_HOSTNAME
UseLocalstack:
Type: String
Default: 'true'
AllowedValues: [true, false]
Conditions:
UseLocalStack: !Equals [!Ref UseLocalstack, 'true']
# More info about Globals: https://github.com/awslabs/serverless-application-model/blob/master/docs/globals.rst
Globals:
Function:
Timeout: 360
Environment:
Variables:
REGION: !Ref "AWS::Region"
ENVIRONMENT: !Ref Environment
PROJECT: !Ref Project
COMPONENT: !Ref Component
DYNAMO_DB_JOB_TABLE: !Ref JobTable
DYNAMO_DB_URL: !Ref DynamoDBUrl
Resources:
TestFunction:
Type: AWS::Serverless::Function
Properties:
FunctionName: !Join [ "-", [ !Ref Environment, !Ref Component, "api" ] ]
CodeUri: api/
Handler: api
Runtime: go1.x
Architectures:
- x86_64
Tracing: Active
Events:
CatchAll:
Type: Api # More info about API Event Source: https://github.com/awslabs/serverless-application-model/blob/master/versions/2016-10-31.md#api
Properties:
Path: /{proxy+}
Method: ANY
Environment:
Variables:
API_GATEWAY_V1_ENDPOINT: !Sub "http://localhost:4566/restapis/${ApiGatewayIdV1}/"
Policies:
- DynamoDBCrudPolicy:
TableName: !Ref JobTable
- AmazonAPIGatewayInvokeFullAccess
WorkerFunction:
Type: AWS::Serverless::Function
Properties:
FunctionName: !Join [ "-", [ !Ref Environment, !Ref Component, "worker" ] ]
CodeUri: worker/
Handler: worker
Runtime: go1.x
Architectures:
- x86_64
MemorySize: 512
Timeout: 180
Tracing: Active
Events:
JobTable:
Type: DynamoDB
Properties:
Stream: !GetAtt JobTable.StreamArn
StartingPosition: TRIM_HORIZON
BatchSize: 1
Environment:
Variables:
API_GATEWAY_ENDPOINT: !Sub "http://localhost:4566/restapis/${ApiGatewayIdV2}/"
API_GATEWAY_BULK_ENDPOINT: !Sub "http://localhost:4566/restapis/${ApiGatewayIdV2}/"
Policies:
- DynamoDBCrudPolicy:
TableName: !Ref JobTable
- AmazonSNSFullAccess
- AmazonAPIGatewayInvokeFullAccess
JobTable:
Type: AWS::DynamoDB::Table
Properties:
TableName: !Join [ "-", [ !Ref Environment, !Ref Component, "jobs" ] ]
AttributeDefinitions:
- AttributeName: Id
AttributeType: S
KeySchema:
- AttributeName: Id
KeyType: HASH
BillingMode: PAY_PER_REQUEST
StreamSpecification:
StreamViewType: NEW_IMAGE
Outputs:
# ServerlessRestApi is an implicit API created out of Events key under Serverless::Function
# Find out more about other implicit resources you can reference within SAM
# https://github.com/awslabs/serverless-application-model/blob/master/docs/internals/generated_resources.rst#api
EventsGeneratorAPI:
Description: "API Gateway endpoint URL for Prod environment for First Function"
Value: !Sub "http://localhost:4566/restapis/${ServerlessRestApi}/Prod/api/"
```
#### Client commands (e.g., AWS SDK code snippet, or sequence of "awslocal" commands)
awslocal s3 mb s3://mybucket
### Environment
```markdown
- OS:
MacOS Monterey 12.4
- LocalStack:
LocalStack version: 0.14.3.1
LocalStack Docker container id: 95b639ae197e
LocalStack build date: 2022-05-25
LocalStack build git hash: 2a564393
```
### Anything else?
_No response_ | null | https://github.com/localstack/localstack/pull/6780 | null | {'base_commit': '31286eb81823ee97e4e4a6b519abab9efcffe091', 'files': [{'path': 'localstack/services/cloudformation/models/cdk.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [1]}, "('CDKMetadata', None, 4)": {'add': [10, 12]}, "('CDKMetadata', 'get_deploy_templates', 12)": {'mod': [14]}}}, {'path': 'localstack/services/cloudformation/models/ec2.py', 'status': 'modified', 'Loc': {"('EC2RouteTable', 'get_deploy_templates', 35)": {'mod': [46]}}}, {'path': 'localstack/services/cloudformation/provider.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [3], 'mod': [74]}, "('Stack', '_set_resource_status_details', 221)": {'add': [238], 'mod': [224]}, "('CloudformationProvider', 'describe_stack_resources', 1180)": {'add': [1198]}, "('CloudformationProvider', 'describe_change_set', 960)": {'mod': [983]}, "('CloudformationProvider', 'list_stack_resources', 1202)": {'mod': [1206]}}}, {'path': 'localstack/utils/cloudformation/template_deployer.py', 'status': 'modified', 'Loc': {"('TemplateDeployer', 'apply_change_set', 1218)": {'add': [1220], 'mod': [1219, 1225]}, "('TemplateDeployer', 'construct_changes', 1474)": {'add': [1477], 'mod': [1489]}, "('TemplateDeployer', 'prepare_should_deploy_change', 1662)": {'add': [1677], 'mod': [1679, 1680, 1681, 1682, 1683]}, "('TemplateDeployer', 'apply_change', 1699)": {'add': [1703], 'mod': [1709]}, "(None, 'execute_resource_action', 861)": {'mod': [888]}, "(None, 'get_action_name_for_resource_change', 1032)": {'mod': [1032]}, "('TemplateDeployer', 'deploy_stack', 1203)": {'mod': [1209]}, "('TemplateDeployer', 'update_stack', 1236)": {'mod': [1239]}, "('TemplateDeployer', 'init_resource_status', 1339)": {'mod': [1343]}, "('TemplateDeployer', 'update_resource_details', 1345)": {'mod': [1360]}, "('TemplateDeployer', None, 1187)": {'mod': [1473, 1555, 1581]}, "('TemplateDeployer', 'apply_changes', 1509)": {'mod': [1513, 1552]}, "('TemplateDeployer', '_run', 1558)": {'mod': [1560]}, "('TemplateDeployer', 'do_apply_changes_in_loop', 1581)": {'mod': [1625]}}}, {'path': 'tests/integration/cloudformation/test_cloudformation_stacks.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [0, 9]}, "(None, 'test_get_template', 251)": {'add': [271]}, "(None, 'test_list_stack_resources_for_removed_resource', 51)": {'mod': [71, 89]}}}, {'path': 'tests/integration/cloudformation/test_cloudformation_stacks.snapshot.json', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [183]}}}, {'path': 'tests/integration/templates/template36.yaml', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [41, 54], 'mod': [46, 47]}}}, {'path': 'tests/integration/test_cloudformation.py', 'status': 'modified', 'Loc': {"('TestCloudFormation', None, 501)": {'add': [1783]}, "('TestCloudFormation', 'test_cfn_with_multiple_route_tables', 1784)": {'mod': [1785, 1786, 1787, 1789, 1791, 1792, 1793, 1795, 1796, 1797]}}}]} | [] | [] | [] | {
"iss_type": "1",
"iss_reason": "1",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"localstack/utils/cloudformation/template_deployer.py",
"localstack/services/cloudformation/models/cdk.py",
"tests/integration/cloudformation/test_cloudformation_stacks.snapshot.json",
"localstack/services/cloudformation/models/ec2.py",
"localstack/services/cloudformation/provider.py"
],
"doc": [],
"test": [
"tests/integration/cloudformation/test_cloudformation_stacks.py",
"tests/integration/test_cloudformation.py"
],
"config": [
"tests/integration/templates/template36.yaml"
],
"asset": []
} | 1 |
localstack | localstack | 0cf839ae1237e9b5aa9479d80e8f3f1eb3b79b5d | https://github.com/localstack/localstack/issues/164 | priority: high
type: feature | Data persistence for all services | We should document our roadmap for extended data persistence. (So far, persistent state is only supported for a few of the services). We'll keep this ticket as a reminder in the meantime. | null | https://github.com/localstack/localstack/pull/2382 | null | {'base_commit': '0cf839ae1237e9b5aa9479d80e8f3f1eb3b79b5d', 'files': [{'path': 'README.md', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [211]}}}, {'path': 'localstack/constants.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [34]}}}, {'path': 'localstack/plugins.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [4]}, "(None, 'do_register_localstack_plugins', 29)": {'mod': [144]}}}, {'path': 'localstack/services/infra.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [15, 16]}, "(None, 'start_apigateway', 82)": {'mod': [82, 83, 84, 85]}, "(None, 'start_events', 99)": {'mod': [99, 100, 101, 102]}, "(None, 'start_secretsmanager', 151)": {'mod': [151, 152, 153]}}}, {'path': 'localstack/services/s3/s3_listener.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [27], 'mod': [21, 29]}, "('ProxyListenerS3', 'return_response', 995)": {'add': [1003], 'mod': [998, 1001]}, "('ProxyListenerS3', None, 826)": {'mod': [826, 995]}}}, {'path': 'localstack/services/secretsmanager/secretsmanager_starter.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [0, 3]}, "(None, 'start_secretsmanager', 22)": {'add': [23, 30], 'mod': [22, 29]}}}, {'path': 'localstack/services/ssm/ssm_listener.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [4]}, "('ProxyListenerSSM', None, 19)": {'mod': [19]}}}, {'path': 'localstack/utils/persistence.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [1, 6, 9]}, "(None, 'should_record', 29)": {'mod': [29, 31, 32, 33]}, "(None, 'record', 36)": {'mod': [46, 49, 54, 55]}, "(None, 'get_recordable_data', 54)": {'mod': [57, 58, 59, 60, 61]}}}]} | [] | [] | [] | {
"iss_type": "4",
"iss_reason": "2",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"localstack/constants.py",
"localstack/utils/persistence.py",
"localstack/services/ssm/ssm_listener.py",
"localstack/plugins.py",
"localstack/services/infra.py",
"localstack/services/secretsmanager/secretsmanager_starter.py",
"localstack/services/s3/s3_listener.py"
],
"doc": [
"README.md"
],
"test": [],
"config": [],
"asset": []
} | 1 |
localstack | localstack | 23cd5fba5b3a2012f280a10b0d7266514fc46eb5 | https://github.com/localstack/localstack/issues/451 | area: configuration
type: feature | Unable to use self-signed certs - CN incorrect, and/or missing subject alternative name field | When enabling SSL for the services (required as the Kinesis producer will only support HTTPS), I would like to add the generated self-signed cert to my java truststore so that I can interact with the services. In some instances, I can disable SSL verification, but in others I cannot (for example, when using the Jest ES client library).
I have tried adding the generated certs to my truststore, however, there is no Subject Alternative Name field, and the CN on the cert doesn't match the host (localhost in this instance), so I'm unable to make use of the cert. If I add a `localstack` entry to my hosts file, it works.
The error essentially looks like this:
```
Caused by: javax.net.ssl.SSLPeerUnverifiedException: Certificate for <localhost> doesn't match any of the subject alternative names: []
```
Please consider setting the certificate CN to the configured hostname (defaulting to localhost), and or add the subject alternative name field to the cert, which would include the various DNS entries to enable use of the cert. | null | https://github.com/localstack/localstack/pull/1742 | null | {'base_commit': '23cd5fba5b3a2012f280a10b0d7266514fc46eb5', 'files': [{'path': 'localstack/utils/common.py', 'status': 'modified', 'Loc': {"(None, 'generate_ssl_cert', 779)": {'mod': [806]}}}, {'path': 'tests/integration/test_sqs.py', 'status': 'modified', 'Loc': {"('SQSTest', None, 29)": {'add': [46]}, "('SQSTest', 'test_set_queue_policy', 58)": {'mod': [59, 60]}}}]} | [] | [] | [] | {
"iss_type": "4",
"iss_reason": "2",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"localstack/utils/common.py"
],
"doc": [],
"test": [
"tests/integration/test_sqs.py"
],
"config": [],
"asset": []
} | 1 |
localstack | localstack | c2c025a96888ce091adc4d9c6c9053af86704c4f | https://github.com/localstack/localstack/issues/3336 | aws:sns
status: resolved/stale | SNS Fifo topic | <!-- Love localstack? Please consider supporting our collective:
👉 https://opencollective.com/localstack/donate -->
# Type of request: This is a bug report
# Detailed description
`aws --endpoint-url=http://localhost:4575 sns create-topic --name command_post_topic.fifo --attributes FifoTopic=true --attributes ContentBasedDeduplication=false --region us-east-1`
When the above command is executed the following error is thrown
> _**`An error occurred (InvalidParameterValue) when calling the CreateTopic operation: Topic names must be made up of only uppercase and lowercase ASCII letters, numbers, underscores, and hyphens, and must be between 1 and 256 characters long.`**_
Here the error is thrown because of the ".fifo" suffix which is necessary according to AWS. So not able to create a topic with 'fifo' suffix.
| null | https://github.com/getmoto/moto/pull/3533 | null | {'base_commit': 'c2c025a96888ce091adc4d9c6c9053af86704c4f', 'files': [{'path': 'localstack/services/awslambda/lambda_api.py', 'status': 'modified', 'Loc': {"(None, 'forward_to_fallback_url', 878)": {'mod': [900]}}}]} | [] | [] | [] | {
"iss_type": "1",
"iss_reason": "2",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"localstack/services/awslambda/lambda_api.py"
],
"doc": [],
"test": [],
"config": [],
"asset": []
} | 1 |
localstack | localstack | 3a6a3301fca769f2b9c5adbc5c19db442c02e03c | https://github.com/localstack/localstack/issues/8444 | status: resolved/fixed
type: feature
aws:s3 | enhancement request: support for s3:ObjectRestore:* bucket notifications | ### Is there an existing issue for this?
- [X] I have searched the existing issues
### Enhancement description
Currently, I'm using Localstack to test locally a lambda function that takes s3`ObjectRestore:Completed` notifications as inputs and it would be really great to have support for these events.
I know that right now as a workaround I can invoke the lambda function manually using a payload with the same shape that s3 uses, but it's better to have the process run as close as it would run in AWS.
Thanks for creating and maintaining localstack, it's really great!
### 🧑💻 Implementation
Not sure, but happy to help if you can give me some pointers.
### Anything else?
_No response_ | null | https://github.com/localstack/localstack/pull/8690 | null | {'base_commit': '3a6a3301fca769f2b9c5adbc5c19db442c02e03c', 'files': [{'path': 'localstack/services/events/provider.py', 'status': 'modified', 'Loc': {"(None, 'events_handler_put_events', 542)": {'add': [567], 'mod': [575]}}}, {'path': 'localstack/services/s3/notifications.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [0, 28, 57], 'mod': [44]}, "('S3EventNotificationContext', None, 87)": {'add': [89, 97]}, "('S3EventNotificationContext', 'from_request_context', 100)": {'add': [136, 139, 143, 149]}, "('BaseNotifier', '_get_event_payload', 303)": {'add': [349], 'mod': [313, 329, 342]}, "('EventBridgeNotifier', '_get_event_payload', 557)": {'add': [564, 612]}}}, {'path': 'localstack/services/s3/provider.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [0, 127]}, "('S3Provider', None, 234)": {'add': [1680]}}}, {'path': 'tests/integration/s3/test_s3_notifications_eventbridge.py', 'status': 'modified', 'Loc': {"('TestS3NotificationsToEventBridge', 'test_object_put_acl', 126)": {'add': [178]}}}, {'path': 'tests/integration/s3/test_s3_notifications_eventbridge.snapshot.json', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [172]}}}, {'path': 'tests/integration/s3/test_s3_notifications_sqs.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [11]}, "('TestS3NotificationsToSQS', 'test_object_put_acl', 962)": {'add': [1018]}}}, {'path': 'tests/integration/s3/test_s3_notifications_sqs.snapshot.json', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [993]}}}, {'path': 'tests/integration/test_events.py', 'status': 'modified', 'Loc': {"('TestEvents', 'test_test_event_pattern', 1823)": {'add': [1863]}}}, {'path': 'tests/integration/test_events.snapshot.json', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [173]}}}]} | [] | [] | [] | {
"iss_type": "4",
"iss_reason": "2",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"tests/integration/s3/test_s3_notifications_eventbridge.snapshot.json",
"tests/integration/test_events.snapshot.json",
"localstack/services/events/provider.py",
"tests/integration/s3/test_s3_notifications_sqs.snapshot.json",
"localstack/services/s3/notifications.py",
"localstack/services/s3/provider.py"
],
"doc": [],
"test": [
"tests/integration/s3/test_s3_notifications_eventbridge.py",
"tests/integration/test_events.py",
"tests/integration/s3/test_s3_notifications_sqs.py"
],
"config": [],
"asset": []
} | 1 |
localstack | localstack | 784d5c3329b9fd0b77db92ee464c2f5404eab93b | https://github.com/localstack/localstack/issues/187 | type: feature | Pass custom environment variables to lambda functions | Is it possible to pass custom environment variables when invoking lambda functions? Ideally I'd like to send the environment variables defined in the docker-compose.yml file to the docker run command here https://github.com/localstack/localstack/blob/d9b2715ba1776e57fabb9e46864e9c5d14d0933b/localstack/services/awslambda/lambda_api.py#L281 but maybe there's a better way of doing it from your point of view. | null | https://github.com/localstack/localstack/pull/262 | null | {'base_commit': '784d5c3329b9fd0b77db92ee464c2f5404eab93b', 'files': [{'path': 'localstack/services/awslambda/lambda_api.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [15, 62], 'mod': [70, 71]}, "(None, 'run_lambda', 219)": {'add': [230, 252, 257, 269], 'mod': [262, 271]}, "(None, 'do_execute', 286)": {'add': [289], 'mod': [287]}, "(None, 'set_function_code', 362)": {'add': [372], 'mod': [438]}, "(None, 'create_function', 468)": {'add': [486]}, "(None, 'update_function_configuration', 597)": {'add': [610]}, "(None, 'exec_lambda_code', 306)": {'mod': [306, 307, 308, 309, 310, 311, 326, 327, 328, 329]}}}, {'path': 'localstack/utils/testutil.py', 'status': 'modified', 'Loc': {"(None, 'create_lambda_function', 105)": {'mod': [106, 119]}}}, {'path': 'tests/integration/test_lambda.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [16, 21]}, "(None, 'test_lambda_runtimes', 61)": {'add': [106]}}}]} | [] | [] | [] | {
"iss_type": "4",
"iss_reason": "2",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"localstack/services/awslambda/lambda_api.py"
],
"doc": [],
"test": [
"tests/integration/test_lambda.py",
"localstack/utils/testutil.py"
],
"config": [],
"asset": []
} | 1 |
localstack | localstack | 83ff0cb0a0366db3c8067eef40b7869f15e7d05e | https://github.com/localstack/localstack/issues/1860 | status: resolved/fixed
area: integration/terraform | Route53 Add HostedZone or Add recordSet failing when done via terraform | I am trying to create route53 hosted zones/record set addition ussing terraform. Though the resources are getting executed, terraform is ultimately failing.
On digging i see that, terraform is calling the getChange API after resource creation API to check the status of changes and seems like that API getChange is not implemented in localstack ?
```
019-12-11T12:08:02.818-0500 [DEBUG] plugin.terraform-provider-aws_v2.41.0_x4:
2019-12-11T12:08:02.818-0500 [DEBUG] plugin.terraform-provider-aws_v2.41.0_x4:
2019-12-11T12:08:02.818-0500 [DEBUG] plugin.terraform-provider-aws_v2.41.0_x4: -----------------------------------------------------
2019-12-11T12:08:02.818-0500 [DEBUG] plugin.terraform-provider-aws_v2.41.0_x4: 2019/12/11 12:08:02 [DEBUG] [aws-sdk-go] <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN
">
2019-12-11T12:08:02.818-0500 [DEBUG] plugin.terraform-provider-aws_v2.41.0_x4: <title>404 Not Found</title>
2019-12-11T12:08:02.818-0500 [DEBUG] plugin.terraform-provider-aws_v2.41.0_x4: <h1>Not Found</h1>
2019-12-11T12:08:02.818-0500 [DEBUG] plugin.terraform-provider-aws_v2.41.0_x4: <p>The requested URL was not found on the server. If you entered the URL manually please check
your spelling and try again.</p>
2019-12-11T12:08:02.818-0500 [DEBUG] plugin.terraform-provider-aws_v2.41.0_x4: 2019/12/11 12:08:02 [DEBUG] [aws-sdk-go] DEBUG: Validate Response route53/GetChange failed, at
tempt 0/25, error SerializationError: failed to unmarshal error message
2019-12-11T12:08:02.818-0500 [DEBUG] plugin.terraform-provider-aws_v2.41.0_x4: status code: 404, request id:
2019-12-11T12:08:02.818-0500 [DEBUG] plugin.terraform-provider-aws_v2.41.0_x4: caused by: UnmarshalError: failed to unmarshal error message
2019-12-11T12:08:02.818-0500 [DEBUG] plugin.terraform-provider-aws_v2.41.0_x4: 00000000 3c 21 44 4f 43 54 59 50 45 20 48 54 4d 4c 20 50 |<!DOCTYPE HTML P|
2019-12-11T12:08:02.818-0500 [DEBUG] plugin.terraform-provider-aws_v2.41.0_x4: 00000010 55 42 4c 49 43 20 22 2d 2f 2f 57 33 43 2f 2f 44 |UBLIC "-//W3C//D|
2019-12-11T12:08:02.818-0500 [DEBUG] plugin.terraform-provider-aws_v2.41.0_x4: 00000020 54 44 20 48 54 4d 4c 20 33 2e 32 20 46 69 6e 61 |TD HTML 3.2 Fina|
2019-12-11T12:08:02.818-0500 [DEBUG] plugin.terraform-provider-aws_v2.41.0_x4: 00000030 6c 2f 2f 45 4e 22 3e 0a 3c 74 69 74 6c 65 3e 34 |l//EN">.<title>4|
2019-12-11T12:08:02.818-0500 [DEBUG] plugin.terraform-provider-aws_v2.41.0_x4: 00000040 30 34 20 4e 6f 74 20 46 6f 75 6e 64 3c 2f 74 69 |04 Not Found</ti|
2019-12-11T12:08:02.818-0500 [DEBUG] plugin.terraform-provider-aws_v2.41.0_x4: 00000050 74 6c 65 3e 0a 3c 68 31 3e 4e 6f 74 20 46 6f 75 |tle>.<h1>Not Fou|
2019-12-11T12:08:02.818-0500 [DEBUG] plugin.terraform-provider-aws_v2.41.0_x4: 00000060 6e 64 3c 2f 68 31 3e 0a 3c 70 3e 54 68 65 20 72 |nd</h1>.<p>The r|
2019-12-11T12:08:02.818-0500 [DEBUG] plugin.terraform-provider-aws_v2.41.0_x4: 00000070 65 71 75 65 73 74 65 64 20 55 52 4c 20 77 61 73 |equested URL was|
2019-12-11T12:08:02.818-0500 [DEBUG] plugin.terraform-provider-aws_v2.41.0_x4: 00000080 20 6e 6f 74 20 66 6f 75 6e 64 20 6f 6e 20 74 68 | not found on th|
2019-12-11T12:08:02.818-0500 [DEBUG] plugin.terraform-provider-aws_v2.41.0_x4: 00000090 65 20 73 65 72 76 65 72 2e 20 49 66 20 79 6f 75 |e server. If you|
2019-12-11T12:08:02.818-0500 [DEBUG] plugin.terraform-provider-aws_v2.41.0_x4: 000000a0 20 65 6e 74 65 72 65 64 20 74 68 65 20 55 52 4c | entered the URL|
2019-12-11T12:08:02.818-0500 [DEBUG] plugin.terraform-provider-aws_v2.41.0_x4: 000000b0 20 6d 61 6e 75 61 6c 6c 79 20 70 6c 65 61 73 65 | manually please|
2019-12-11T12:08:02.818-0500 [DEBUG] plugin.terraform-provider-aws_v2.41.0_x4: 000000c0 20 63 68 65 63 6b 20 79 6f 75 72 20 73 70 65 6c | check your spel|
2019-12-11T12:08:02.818-0500 [DEBUG] plugin.terraform-provider-aws_v2.41.0_x4: 000000d0 6c 69 6e 67 20 61 6e 64 20 74 72 79 20 61 67 61 |ling and try aga|
2019-12-11T12:08:02.818-0500 [DEBUG] plugin.terraform-provider-aws_v2.41.0_x4: 000000e0 69 6e 2e 3c 2f 70 3e 0a |in.</p>.|
2019-12-11T12:08:02.818-0500 [DEBUG] plugin.terraform-provider-aws_v2.41.0_x4:
2019-12-11T12:08:02.818-0500 [DEBUG] plugin.terraform-provider-aws_v2.41.0_x4: caused by: unknown error response tag, {{ title} []}
```
┆Issue is synchronized with this [Jira Bug](https://localstack.atlassian.net/browse/LOC-141) by [Unito](https://www.unito.io/learn-more)
| null | https://github.com/localstack/localstack/pull/3248 | null | {'base_commit': '83ff0cb0a0366db3c8067eef40b7869f15e7d05e', 'files': [{'path': 'localstack/plugins.py', 'status': 'modified', 'Loc': {"(None, 'do_register_localstack_plugins', 29)": {'add': [39], 'mod': [142]}}}, {'path': 'localstack/services/infra.py', 'status': 'modified', 'Loc': {"(None, 'start_route53', 104)": {'mod': [104, 106]}}}, {'path': 'tests/integration/test_route53.py', 'status': 'modified', 'Loc': {"('TestRoute53', 'test_create_hosted_zone', 7)": {'mod': [13, 14]}}}]} | [] | [] | [] | {
"iss_type": "1",
"iss_reason": "2",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"localstack/plugins.py",
"localstack/services/infra.py"
],
"doc": [],
"test": [
"tests/integration/test_route53.py"
],
"config": [],
"asset": []
} | 1 |
localstack | localstack | 6aafbcdebade24b26705913cbc413dc7d50dad7a | https://github.com/localstack/localstack/issues/11048 | type: bug
aws:ssm
status: backlog | bug: get-parameter and get-parameters on SSM does not work with ARNs (Localstack 3.5.0) | ### Is there an existing issue for this?
- [X] I have searched the existing issues
### Current Behavior
Queries to Localstack SSM endpoints with the `get-parameter` or `get-parameters` commands do not work if parameter names are provided as ARNs. This appears to be due to the internal LocalStack parameter validation disallowing forward slashes in SSM parameter names. We observe the following:
```
$ awslocal ssm get-parameter --name arn:aws:service:us-east-1:0000000000:parameter/myparam
An error occurred (ValidationException) when calling the GetParameter operation: Parameter name: can't be prefixed with "ssm" (case-insensitive). If formed as a path, it can consist of sub-paths divided by slash symbol; each sub-path can be formed as a mix of letters, numbers and the following 3 symbols .-_
```
Removing the forward slash passes the input validation, but obviously fails to fetch a parameter:
```
$ awslocal ssm get-parameter --name arn:aws:service:us-east-1:0000000000:parametermyparam
An error occurred (ParameterNotFound) when calling the GetParameter operation: Parameter arn:aws:service:us-east-1:0000000000:parametermyparam not found.
```
### Expected Behavior
`get-parameter` and `get-parameters` should allow ARNs in names, following the [official docs](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ssm/get-parameters.html#options)
### How are you starting LocalStack?
With a docker-compose file
### Steps To Reproduce
#### How are you starting localstack (e.g., `bin/localstack` command, arguments, or `docker-compose.yml`)
localstack start -d
#### Client commands (e.g., AWS SDK code snippet, or sequence of "awslocal" commands)
awslocal ssm get-parameter --name arn:aws:service:us-east-1:0000000000:parameter/myparam
### Environment
```markdown
- OS: Sonoma 14.5
- LocalStack:
LocalStack version: 3.5.1.dev20240618022512
LocalStack Docker image sha: sha256:5cd0557de2fdfac98d8d26d2f861b8266dcfc07ed09dbdacad7dc21ee2560310
LocalStack build date: 2024-06-18
LocalStack build git hash: 666e239
```
### Anything else?
_No response_ | null | https://github.com/localstack/localstack/pull/11218 | null | {'base_commit': '6aafbcdebade24b26705913cbc413dc7d50dad7a', 'files': [{'path': 'localstack-core/localstack/services/ssm/provider.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [82]}, "('SsmProvider', None, 118)": {'add': [364]}}}, {'path': 'localstack-core/localstack/utils/aws/arns.py', 'status': 'modified', 'Loc': {"(None, 's3_bucket_name', 548)": {'add': [549]}}}, {'path': 'tests/aws/services/ssm/test_ssm.py', 'status': 'modified', 'Loc': {"('TestSSM', None, 26)": {'add': [151]}}}]} | [] | [] | [] | {
"iss_type": "2",
"iss_reason": "2",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"localstack-core/localstack/utils/aws/arns.py",
"localstack-core/localstack/services/ssm/provider.py"
],
"doc": [],
"test": [
"tests/aws/services/ssm/test_ssm.py"
],
"config": [],
"asset": []
} | 1 |
localstack | localstack | 1aad84d96159f6d12f872357e04f080a39836f5f | https://github.com/localstack/localstack/issues/11253 | type: bug
status: resolved/fixed
aws:apigateway | bug: API Gateway V1 (targeted to Lambda) gives 500 error with // in path | ### Is there an existing issue for this?
- [X] I have searched the existing issues
### Current Behavior
With API Gateway V1, I have a `/orders` path that targets a Lambda function. When accessing the URL using the syntax:
`curl -v https://45szzn0od7.execute-api.localhost.localstack.cloud:4566/prod/orders` I see the correct response.
However, if there's an extra `/` in the path after `/prod` I see a 500 response:
`curl -v https://45szzn0od7.execute-api.localhost.localstack.cloud:4566/prod//orders`
I see the response message:
```
"__type": "InternalError", "message": "exception while calling apigateway with unknown operation: 308 Permanent Redirect: http://45szzn0od7.execute-api.localhost.localstack.cloud:4566/prod/orders"
```
I keep on hitting this bug because I have `API_URL` set to the base URL, and then call `$API_URL/orders`. If the `API_URL` contains a trailing `/`, it crashes.
I'm not sure if the Lambda integration is relevant or not (or whether other integrations will also see the problem). I can confirm that my Lambda function is _not_ invoked when `/prod//orders` is used.
### Expected Behavior
In the AWS service, both of these work correctly, regardless of whether the path is `/prod/orders` or `/prod//orders`.
### How are you starting LocalStack?
With a docker-compose file
### Steps To Reproduce
#### How are you starting localstack (e.g., `bin/localstack` command, arguments, or `docker-compose.yml`)
docker-composed up
#### Client commands (e.g., AWS SDK code snippet, or sequence of "awslocal" commands)
API Gateway V1 created with CDK
```
const api = new apigateway.RestApi(this, 'example-api')
const ordersApi = api.root.addResource('orders')
const ordersLambdaInt = new apigateway.LambdaIntegration(orderLambda, { proxy: true })
ordersApi.addMethod('GET', ordersLambdaInt)
```
I don't have the `awslocal` commands on hand, but I can figure them out if necessary.
### Environment
```markdown
- OS: MacOS Sonoma 4.5
- LocalStack:
LocalStack version: 3.5.1.dev
LocalStack Docker image sha: (built from latest source)
LocalStack build date: 2024-07-23
LocalStack build git hash: a0a1ba090
```
### Anything else?
_No response_ | null | https://github.com/localstack/localstack/pull/11304 | null | {'base_commit': '96f447ffcc6c56821b4f0b1e2c603a3976949307', 'files': [{'path': 'localstack-core/localstack/services/apigateway/context.py', 'status': 'modified', 'Loc': {"('ApiInvocationContext', None, 21)": {'add': [100]}, "('ApiInvocationContext', 'path_with_query_string', 117)": {'add': [118]}, "('ApiInvocationContext', '__init__', 68)": {'mod': [80]}}}, {'path': 'localstack-core/localstack/services/apigateway/helpers.py', 'status': 'modified', 'Loc': {"(None, 'get_event_request_context', 1497)": {'add': [1511], 'mod': [1506, 1507, 1508]}}}, {'path': 'localstack-core/localstack/services/apigateway/next_gen/execute_api/integrations/aws.py', 'status': 'modified', 'Loc': {"('RestApiAwsProxyIntegration', 'create_lambda_input_event', 494)": {'mod': [517]}}}, {'path': 'tests/aws/services/apigateway/test_apigateway_lambda.py', 'status': 'modified', 'Loc': {"(None, 'test_lambda_aws_proxy_integration', 81)": {'add': [175, 190], 'mod': [85, 132, 135, 136, 137, 138, 139, 140, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 154, 155, 156, 157, 158, 159, 181, 185, 186, 187, 192, 198, 199, 200, 201, 204, 205, 206, 212, 213, 214, 222, 223, 224, 225, 226, 227, 245, 246, 247, 248, 249, 250]}, '(None, None, None)': {'add': [276]}, "(None, 'invoke_api', 161)": {'mod': [164, 173, 174]}}}, {'path': 'tests/aws/services/apigateway/test_apigateway_lambda.snapshot.json', 'status': 'modified', 'Loc': {'(None, None, 1236)': {'add': [1236]}, '(None, None, 3)': {'mod': [3]}, '(None, None, 84)': {'mod': [84]}, '(None, None, 86)': {'mod': [86]}, '(None, None, 111)': {'mod': [111]}, '(None, None, 118)': {'mod': [118]}, '(None, None, 202)': {'mod': [202]}, '(None, None, 204)': {'mod': [204]}, '(None, None, 229)': {'mod': [229]}, '(None, None, 236)': {'mod': [236]}, '(None, None, 320)': {'mod': [320]}, '(None, None, 322)': {'mod': [322]}, '(None, None, 347)': {'mod': [347]}, '(None, None, 354)': {'mod': [354]}, '(None, None, 374)': {'mod': [374]}, '(None, None, 422)': {'mod': [422]}, '(None, None, 438)': {'mod': [438]}, '(None, None, 440)': {'mod': [440]}, '(None, None, 465)': {'mod': [465]}, '(None, None, 472)': {'mod': [472]}, '(None, None, 492)': {'mod': [492]}, '(None, None, 540)': {'mod': [540]}, '(None, None, 556)': {'mod': [556]}, '(None, None, 558)': {'mod': [558]}, '(None, None, 583)': {'mod': [583]}, '(None, None, 590)': {'mod': [590]}, '(None, None, 610)': {'mod': [610]}, '(None, None, 658)': {'mod': [658]}, '(None, None, 678)': {'mod': [678]}, '(None, None, 680)': {'mod': [680]}, '(None, None, 707)': {'mod': [707]}, '(None, None, 714)': {'mod': [714]}, '(None, None, 734)': {'mod': [734]}, '(None, None, 782)': {'mod': [782]}, '(None, None, 802)': {'mod': [802]}, '(None, None, 804)': {'mod': [804]}, '(None, None, 831)': {'mod': [831]}, '(None, None, 838)': {'mod': [838]}, '(None, None, 858)': {'mod': [858]}, '(None, None, 906)': {'mod': [906]}, '(None, None, 922)': {'mod': [922]}, '(None, None, 924)': {'mod': [924]}, '(None, None, 949)': {'mod': [949]}, '(None, None, 956)': {'mod': [956]}, '(None, None, 976)': {'mod': [976]}, '(None, None, 1024)': {'mod': [1024]}, '(None, None, 1059)': {'mod': [1059]}, '(None, None, 1061)': {'mod': [1061]}, '(None, None, 1093)': {'mod': [1093]}, '(None, None, 1100)': {'mod': [1100]}, '(None, None, 1123)': {'mod': [1123]}, '(None, None, 1173)': {'mod': [1173]}, '(None, None, 1196)': {'mod': [1196]}, '(None, None, 1198)': {'mod': [1198]}, '(None, None, 1226)': {'mod': [1226]}, '(None, None, 1233)': {'mod': [1233]}}}, {'path': 'tests/aws/services/apigateway/test_apigateway_lambda.validation.json', 'status': 'modified', 'Loc': {'(None, None, 12)': {'mod': [12]}}}]} | [] | [] | [] | {
"iss_type": "1",
"iss_reason": "1",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"localstack-core/localstack/services/apigateway/next_gen/execute_api/integrations/aws.py",
"localstack-core/localstack/services/apigateway/helpers.py",
"tests/aws/services/apigateway/test_apigateway_lambda.snapshot.json",
"tests/aws/services/apigateway/test_apigateway_lambda.validation.json",
"localstack-core/localstack/services/apigateway/context.py"
],
"doc": [],
"test": [
"tests/aws/services/apigateway/test_apigateway_lambda.py"
],
"config": [],
"asset": []
} | 1 |
localstack | localstack | 61535b7d970493d9bb6740a03d698d075dd0a3b9 | https://github.com/localstack/localstack/issues/11905 | type: bug
aws:kms
status: backlog | bug: KMS DeriveSharedSecret does not work symmetrically | ### Is there an existing issue for this?
- [X] I have searched the existing issues
### Current Behavior
When creating two keys with the following command:
```
awslocal kms create-key --key-spec ECC_NIST_P256 --key-usage KEY_AGREEMENT --description "ECC NIST P-256 Key Agreement Key <Number>
```
And then running the `derive-shared-secret` command twice like following:
```
awslocal kms derive-shared-secret \
--key-id $KEY1_ID \
--key-agreement-algorithm ECDH \
--public-key $PUB2
```
```
awslocal kms derive-shared-secret \
--key-id $KEY2_ID \
--key-agreement-algorithm ECDH \
--public-key $PUB1
```
The resulting `SharedSecret` values are different.
### Expected Behavior
Running the following:
```
awslocal kms derive-shared-secret \
--key-id $KEY1_ID \
--key-agreement-algorithm ECDH \
--public-key $PUB2
```
```
awslocal kms derive-shared-secret \
--key-id $KEY2_ID \
--key-agreement-algorithm ECDH \
--public-key $PUB1
```
The resulting `SharedSecret` values should be the same.
### How are you starting LocalStack?
With a docker-compose file
### Steps To Reproduce
#### How are you starting localstack (e.g., `bin/localstack` command, arguments, or `docker-compose.yml`)
docker run localstack/localstack
#### Client commands (e.g., AWS SDK code snippet, or sequence of "awslocal" commands)
```
// get the value for ID and export it to FIRST_KEY_ID variable
awslocal kms create-key --key-spec ECC_NIST_P256 --key-usage KEY_AGREEMENT --description "ECC NIST P-256 Key Agreement Key" --region us-east-1
// get the value for ID and export it to SECOND_KEY_ID variable
awslocal kms create-key --key-spec ECC_NIST_P256 --key-usage KEY_AGREEMENT --description "ECC NIST P-256 Key Agreement Key 2" --region us-east-1
// get the value for PublicKey and export it to PUB1 variable
awslocal kms get-public-key --key-id $FIRST_KEY_ID
// get the value for PublicKey and export it to PUB2 variable
awslocal kms get-public-key --key-id $SECOND_KEY_ID
// the two values for "SharedSecret" from below commands should be the same
awslocal kms derive-shared-secret --key-id $FIRST_KEY_ID --key-agreement-algorithm ECDH --public-key $PUB2
awslocal kms derive-shared-secret --key-id $SECOND_KEY_ID --key-agreement-algorithm ECDH --public-key $PUB1
```
### Environment
```markdown
- OS: macOS 14.7.1 (23H222)
- LocalStack:
LocalStack version: 3.8.2.dev155
LocalStack Docker image sha: sha256:00e62cf9abaa00984b7bf835b411271822ddea2f44d209a24e734909db7ea29f
LocalStack build date: 2024-11-21
LocalStack build git hash: 6748e0e07
```
### Anything else?
_No response_ | null | https://github.com/localstack/localstack/pull/12071 | null | {'base_commit': '61535b7d970493d9bb6740a03d698d075dd0a3b9', 'files': [{'path': 'localstack-core/localstack/services/kms/models.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [24], 'mod': [15]}, "('KmsKey', 'derive_shared_secret', 368)": {'add': [381], 'mod': [388]}}}, {'path': 'tests/aws/services/kms/test_kms.py', 'status': 'modified', 'Loc': {"('TestKMS', 'test_derive_shared_secret', 1326)": {'add': [1332], 'mod': [1337, 1338, 1341]}, '(None, None, None)': {'add': [1370]}}}, {'path': 'tests/aws/services/kms/test_kms.snapshot.json', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [1783], 'mod': [1731]}}}, {'path': 'tests/aws/services/kms/test_kms.validation.json', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [33]}}}]} | [] | [] | [] | {
"iss_type": "2",
"iss_reason": "1",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"localstack-core/localstack/services/kms/models.py",
"tests/aws/services/kms/test_kms.validation.json",
"tests/aws/services/kms/test_kms.snapshot.json"
],
"doc": [],
"test": [
"tests/aws/services/kms/test_kms.py"
],
"config": [],
"asset": []
} | 1 |
localstack | localstack | a23e2fc70542af481fb3a0bd7042627ff50f0802 | https://github.com/localstack/localstack/issues/737 | Kinesis events to Lambda do not conform to spec | I am using Kinesis streams to trigger a Lambda function in localstack. The Kinesis records only include the `"kinesis"` block. AWS docs show several other metadata fields with each record:
```{
"eventID": "shardId-000000000000:49545115243490985018280067714973144582180062593244200961",
"eventVersion": "1.0",
"kinesis": {
"partitionKey": "partitionKey-3",
"data": "SGVsbG8sIHRoaXMgaXMgYSB0ZXN0IDEyMy4=",
"kinesisSchemaVersion": "1.0",
"sequenceNumber": "49545115243490985018280067714973144582180062593244200961"
},
"invokeIdentityArn": identityarn,
"eventName": "aws:kinesis:record",
"eventSourceARN": eventsourcearn,
"eventSource": "aws:kinesis",
"awsRegion": "us-east-1"
}
```
My lambda is using the eventSourceARN to determine the source stream name. I can hack it for testing, but would prefer to test against proper live records.
<!-- Love localstack? Please consider supporting our collective:
👉 https://opencollective.com/localstack/donate --> | null | null | https://github.com/localstack/localstack/commit/85e39818ae11e8f35e24b8df88703ede1231b62e | {'base_commit': '85e39818ae11e8f35e24b8df88703ede1231b62e', 'files': [{'path': 'localstack/services/awslambda/lambda_api.py', 'status': 'modified', 'Loc': {"(None, 'process_kinesis_records', 183)": {'add': [194]}}}]} | [] | [] | [] | {
"iss_type": "2",
"iss_reason": "1",
"loc_way": "commit",
"loc_scope": null,
"info_type": null
} | {
"code": [
"localstack/services/awslambda/lambda_api.py"
],
"doc": [],
"test": [],
"config": [],
"asset": []
} | 1 | |
localstack | localstack | 28d3b76087979229f586911423307e6fd8995f19 | https://github.com/localstack/localstack/issues/2231 | [IAM] AmazonIdentityManagement with null message is thrown instead of EntityAlreadyExistsException | # Type of request: This is a ...
[X] bug report
# Detailed description
`EntityAlreadyExistsException` is not thrown correctly when creating IAM objects that are already present. `AmazonIdentityManagementException` with a null message is thrown instead
## Expected behavior
Localstack should throw `EntityAlreadyExistsException` with a populated message (not null)
## Actual behavior
```
com.amazonaws.services.identitymanagement.model.AmazonIdentityManagementException: null (Service: AmazonIdentityManagement; Status Code: 409; Error Code: 409 Conflict; Request ID: null)
```
# Steps to reproduce
- create an IAM role
- try to re-create it, catch `EntityAlreadyExistsException` but `AmazonIdentityManagementException` with null message is thrown instead
## Command used to start LocalStack
docker-compose up with `0.10.9`
## Client code (AWS SDK code snippet, or sequence of "awslocal" commands)
```
try {
localStackIAMClient.createRole(createRoleRequest);
localStackIAMClient.createRole(createRoleRequest);
} catch (EntityAlreadyExistsException e) {
// AmazonIdentityManagementException with null is thrown instead
}
```
| null | https://github.com/localstack/localstack/pull/2316 | null | {'base_commit': '28d3b76087979229f586911423307e6fd8995f19', 'files': [{'path': '.dockerignore', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [6]}}}, {'path': 'localstack/services/iam/iam_listener.py', 'status': 'modified', 'Loc': {"('ProxyListenerIAM', 'return_response', 17)": {'add': [22]}, "('ProxyListenerIAM', None, 9)": {'add': [36]}}}, {'path': 'tests/integration/test_iam.py', 'status': 'modified', 'Loc': {}}]} | [] | [] | [] | {
"iss_type": "2",
"iss_reason": "1",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"localstack/services/iam/iam_listener.py"
],
"doc": [
".dockerignore"
],
"test": [
"tests/integration/test_iam.py"
],
"config": [],
"asset": []
} | 1 | |
localstack | localstack | 581980f89037694181765dfa400ce9f75c6a01ed | https://github.com/localstack/localstack/issues/4409 | type: feature | feature request: ConfigService | `moto` provides support for several of the AWS ConfigService APIs. Would it be possible to provide that same support with LocalStack? | null | https://github.com/localstack/localstack/pull/4500 | null | {'base_commit': '581980f89037694181765dfa400ce9f75c6a01ed', 'files': [{'path': 'localstack/plugins.py', 'status': 'modified', 'Loc': {"(None, 'do_register_localstack_plugins', 29)": {'add': [35, 85]}}}, {'path': 'localstack/services/support/support_starter.py', 'status': 'modified', 'Loc': {"(None, 'start_support', 4)": {'mod': [5]}}}, {'path': 'requirements.txt', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [52]}}}]} | [] | [] | [] | {
"iss_type": "4",
"iss_reason": "2",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"localstack/plugins.py",
"localstack/services/support/support_starter.py"
],
"doc": [],
"test": [],
"config": [
"requirements.txt"
],
"asset": []
} | 1 |
localstack | localstack | 95f91f68c16cedbcfbf0a51725f88c113224de27 | https://github.com/localstack/localstack/issues/983 | type: bug
status: triage needed | AWS lambda on localstack not seeing its dependencies | <!-- Love localstack? Please consider supporting our collective:
:point_right: https://opencollective.com/localstack/donate -->
Hi guys. I am running localstack 0.8.7 and i am encountering problems running a lambda function that has external dependencies. The zip file works well in a real AWS environment but fails in localstack because it cannot find the dependencies.
Adding lambda
`
aws --endpoint-url=http://localhost:4574 lambda create-function --function-name=myfunction --runtime=java8 --role=r1 --handler=com.my.UpdateHandler --zip-file fileb://my-lambda-0.1.0-1540476215-64df908.zip
`
Executing lambda
`
aws lambda --endpoint-url=http://localhost:4574 invoke --invocation-type RequestResponse --function-name myfunction --region eu-west-1 --payload {\"store\":\"9722\"\,\"pos\":\"80\"\,\"app\":\"price\"} out.txt
`
This is the stacktrace
`
Exception: Lambda process returned error status code: 1. Output:
Exception in thread "main" java.lang.reflect.InvocationTargetException
at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62)
at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
at java.lang.reflect.Constructor.newInstance(Constructor.java:423)
at cloud.localstack.LambdaExecutor.getHandler(LambdaExecutor.java:138)
at cloud.localstack.LambdaExecutor.main(LambdaExecutor.java:52)
Caused by: java.lang.NoClassDefFoundError: com/fasterxml/jackson/databind/ObjectMapper
at my.create(Houston.java:56)
at my.UpdateHandler.<init>(UpdateHandler.java:17)
... 6 more
Caused by: java.lang.ClassNotFoundException: com.fasterxml.jackson.databind.ObjectMapper
at java.net.URLClassLoader.findClass(URLClassLoader.java:381)
at java.lang.ClassLoader.loadClass(ClassLoader.java:424)
at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:338)
at java.lang.ClassLoader.loadClass(ClassLoader.java:357)
... 8 more
`
I'm suspicious that the problem could be with localstack since the zip file with the structure
-lib (dependencies)
-com (lambda)
works fine in AWS but has problems in localstack.
Help.
A guy in need.
┆Issue is synchronized with this [Jira Bug](https://localstack.atlassian.net/browse/LOC-321) by [Unito](https://www.unito.io/learn-more)
| null | https://github.com/localstack/localstack/pull/3704 | null | {'base_commit': '95f91f68c16cedbcfbf0a51725f88c113224de27', 'files': [{'path': 'tests/integration/test_lambda.py', 'status': 'modified', 'Loc': {"('TestJavaRuntimes', 'test_java_runtime_with_lib', 1476)": {'mod': [1489]}}}]} | [] | [] | [] | {
"iss_type": "1",
"iss_reason": "2",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [],
"doc": [],
"test": [
"tests/integration/test_lambda.py"
],
"config": [],
"asset": []
} | 1 |
localstack | localstack | debb24a792a7e2a1751ddf1f30d5c79f80b4885f | https://github.com/localstack/localstack/issues/612 | type: bug
status: triage needed | Uploading to S3 presigned URLs doesn't check Content-MD5 or other presigned constraints | If I generate a presigned URL for uploading into a bucket, and I specify a content type or a content MD5 to be encapsulated into the URL, these are not then enforced when I upload to that URL. I can set whatever `Content-MD5` header I like in the HTTP upload, and it's accepted.
Furthermore, the `Content-MD5` header doesn't get checked even against the content being uploaded. I can set the header to `blah` and I don't get any errors.
Is this expected? | null | https://github.com/localstack/localstack/pull/772 | null | {'base_commit': 'debb24a792a7e2a1751ddf1f30d5c79f80b4885f', 'files': [{'path': 'localstack/services/generic_proxy.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [9, 15], 'mod': [1, 2, 5, 6, 8]}}}, {'path': 'localstack/services/kinesis/kinesis_listener.py', 'status': 'modified', 'Loc': {"('ProxyListenerKinesis', 'forward_request', 20)": {'mod': [24]}}}, {'path': 'localstack/services/s3/s3_listener.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [4, 281]}, "('ProxyListenerS3', 'forward_request', 338)": {'add': [339]}, "('ProxyListenerS3', 'return_response', 438)": {'mod': [442]}}}, {'path': 'requirements.txt', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [20]}}}, {'path': 'tests/integration/test_s3.py', 'status': 'modified', 'Loc': {"(None, 'test_s3_get_response_headers', 171)": {'add': [206]}}}]} | [] | [] | [] | {
"iss_type": "2",
"iss_reason": "2",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"localstack/services/kinesis/kinesis_listener.py",
"localstack/services/generic_proxy.py",
"localstack/services/s3/s3_listener.py"
],
"doc": [],
"test": [
"tests/integration/test_s3.py"
],
"config": [
"requirements.txt"
],
"asset": []
} | 1 |
localstack | localstack | 2641d910cc5f1a04f70dd60a7ebfc25cd716bcd6 | https://github.com/localstack/localstack/issues/1902 | status: triage needed | changeMessageVisibility function doesn't work | Hi,
I'm using the changeMessageVisibility function in order to return a message to the queue, by calling
`serviceName.changeMessageVisibility(recipientId, 0);`
but it doesn't work, the message doesn't reappear in the queue. | null | https://github.com/localstack/localstack/pull/1914 | null | {'base_commit': '2641d910cc5f1a04f70dd60a7ebfc25cd716bcd6', 'files': [{'path': 'localstack/services/sqs/sqs_listener.py', 'status': 'modified', 'Loc': {"('ProxyListenerSQS', 'return_response', 81)": {'add': [96], 'mod': [89, 90, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151]}}}, {'path': 'localstack/utils/aws/aws_stack.py', 'status': 'modified', 'Loc': {"(None, 'fix_account_id_in_arns', 279)": {'mod': [281]}}}, {'path': 'tests/integration/test_sqs.py', 'status': 'modified', 'Loc': {"('SQSTest', 'test_publish_get_delete_message', 49)": {'add': [59, 65]}}}]} | [] | [] | [] | {
"iss_type": "2",
"iss_reason": "1",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"localstack/utils/aws/aws_stack.py",
"localstack/services/sqs/sqs_listener.py"
],
"doc": [],
"test": [
"tests/integration/test_sqs.py"
],
"config": [],
"asset": []
} | 1 |
localstack | localstack | a258338f5c88f49b517f7ecf66be113e481a0afe | https://github.com/localstack/localstack/issues/6551 | type: question
aws:ssm
aws:secretsmanager | bug: Can't get SSM secret parameter using localstack. | ### Is there an existing issue for this?
- [X] I have searched the existing issues
### Current Behavior
This bug has been reported before here. https://github.com/localstack/localstack/issues/3128
Currently, I'm encountering the same issues.
The issue is that creating an SSM secret using the awslocal cli and trying to retrieve the secret using awslocal gives me a (ParameterNotFound) error.
### Expected Behavior
The expected behaviour is that I should successfully retrieve a stored secret instead of getting a (ParameterNotFound) error.
### How are you starting LocalStack?
Custom (please describe below)
### Steps To Reproduce
#### How are you starting localstack (e.g., `bin/localstack` command, arguments, or `docker-compose.yml`)
Starting local stack from the local stack cli by running `localstack start`
#### Client commands (e.g., AWS SDK code snippet, or sequence of "awslocal" commands)
Create secret
`awslocal secretsmanager create-secret --name TestSecret --secret-string "TT"`
Try to get secret
`awslocal ssm get-parameter --name TestSecret`
### Environment
```markdown
- OS:Windows 10 pro 20H2
- Python version - 3.10
- LocalStack version: 1.0.3.dev
- LocalStack: latest
```
### Anything else?
docker logs
```
2022-07-29T13:36:46.005 INFO --- [ asgi_gw_0] localstack.request.aws : AWS secretsmanager.CreateSecret => 200
2022-07-29T13:37:28.515 INFO --- [ asgi_gw_1] localstack.request.aws : AWS ssm.GetParameter => 400 (ParameterNotFound)
``` | null | https://github.com/localstack/localstack/pull/6564 | null | {'base_commit': 'a258338f5c88f49b517f7ecf66be113e481a0afe', 'files': [{'path': 'localstack/services/ssm/provider.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [0, 23, 27], 'mod': [4, 6]}, "('SsmProvider', None, 28)": {'add': [28], 'mod': [37, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 155, 156, 157, 158, 159, 160, 162, 163, 164, 165, 166, 167, 168, 169, 170]}, "('SsmProvider', '_get_secrets_information', 45)": {'add': [53], 'mod': [51, 56]}}}, {'path': 'tests/integration/test_ssm.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [0, 3], 'mod': [19]}, "('TestSSM', None, 20)": {'add': [20, 25, 67, 110], 'mod': [38, 39, 40, 51, 74]}, "(None, '_assert', 6)": {'mod': [6]}, "('TestSSM', 'test_put_parameters', 26)": {'mod': [36]}, "('TestSSM', 'test_hierarchical_parameter', 39)": {'mod': [48, 49]}, "('TestSSM', 'test_get_secret_parameter', 52)": {'mod': [60, 65, 66]}, "('TestSSM', 'test_get_inexistent_secret', 68)": {'mod': [69, 70, 71, 72]}, "('TestSSM', 'test_get_parameters_and_secrets', 75)": {'mod': [78, 100, 108]}}}]} | [] | [] | [] | {
"iss_type": "2",
"iss_reason": "2",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"localstack/services/ssm/provider.py"
],
"doc": [],
"test": [
"tests/integration/test_ssm.py"
],
"config": [],
"asset": []
} | 1 |
localstack | localstack | 177fc797678664a0c06b8c6c434330cef44541a1 | https://github.com/localstack/localstack/issues/459 | type: bug | Underscore converted to hyphen while put it as metadata using amazon-sdk | Hi,
When try to put object with metadata that include underscore(the metadata include underscore) we got the underscore converted to hyphen.
The same code in Amazon will return the metadata with underscore - not converted.
for example:
we put map of string as metadata - the "__key1" will convert to "--key1"
```
public class MyAwsS3Tester {
public static final String IP = "10.0.0.24";
public static final String BUCKET_NAME = "zanavi-test";
public static void main(String[] args) {
AmazonS3 s3 = AmazonS3ClientBuilder.standard()
.withEndpointConfiguration(new AwsClientBuilder.EndpointConfiguration("http://" + IP + ":4572/", "us-east-1"))
.disableChunkedEncoding()
.withCredentials(new AWSStaticCredentialsProvider(new BasicAWSCredentials("zanavi", "1234")))
.build();
if (s3.doesBucketExistV2(BUCKET_NAME)) {
System.out.println("bucket zanavi exists");
}
else {
System.out.println("bucket " + BUCKET_NAME + " doesn't exists");
s3.createBucket(BUCKET_NAME);
}
String dummyStr = "dummy-str";
Map<String, String> myMap = new HashMap<String, String>();
myMap.put("__key1", "val1");
ObjectMetadata objectMetadata = new ObjectMetadata();
objectMetadata.setUserMetadata(myMap);
InputStream is = new ByteArrayInputStream(dummyStr.getBytes(StandardCharsets.UTF_8));
s3.putObject(new PutObjectRequest(BUCKET_NAME, "my-key1", is, objectMetadata));
S3Object getObj = s3.getObject(new GetObjectRequest(BUCKET_NAME, "my-key1"));
ObjectMetadata objectMetadataResponse = getObj.getObjectMetadata();
Map<String, String> myMap1 = objectMetadataResponse.getUserMetadata();
System.out.println("done " + myMap1);
}
``` | null | https://github.com/localstack/localstack/pull/482 | null | {'base_commit': '177fc797678664a0c06b8c6c434330cef44541a1', 'files': [{'path': 'bin/Dockerfile.base', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [30, 32]}}}, {'path': 'localstack/ext/java/src/test/java/cloud/localstack/S3HttpsConnectionTest.java', 'status': 'removed', 'Loc': {}}, {'path': 'localstack/ext/java/src/test/java/cloud/localstack/S3LifecycleTest.java', 'status': 'removed', 'Loc': {}}, {'path': 'localstack/services/generic_proxy.py', 'status': 'modified', 'Loc': {"('GenericProxyHandler', 'forward', 158)": {'mod': [224, 225]}}}, {'path': 'tests/integration/test_dynamodb.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [8]}, "('DynamoDBIntegrationTest', 'test_non_ascii_chars', 14)": {'add': [34]}}}]} | [] | [] | [] | {
"iss_type": "2",
"iss_reason": "1",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"localstack/services/generic_proxy.py"
],
"doc": [],
"test": [
"tests/integration/test_dynamodb.py"
],
"config": [
"bin/Dockerfile.base"
],
"asset": [
"localstack/ext/java/src/test/java/cloud/localstack/S3LifecycleTest.java",
"localstack/ext/java/src/test/java/cloud/localstack/S3HttpsConnectionTest.java"
]
} | 1 |
localstack | localstack | b09c4f89481cec43b3d126c15050910cae81e9d1 | https://github.com/localstack/localstack/issues/5357 | type: bug
status: triage needed
aws:firehose
aws:opensearch | bug: `AmazonopensearchserviceDestinationConfiguration` is not supported for Firehose-Streams | ### Is there an existing issue for this?
- [X] I have searched the existing issues
### Current Behavior
As mentioned [here](https://github.com/localstack/localstack/issues/4834#issuecomment-1021009701) it seems like the `AmazonopensearchserviceDestinationConfiguration` was not added while implementing OpenSearch. I guess it just needs be added [here](https://github.com/localstack/localstack/blob/53b5c7788bf35b1882b6cb1949e17d27e198cf61/localstack/services/cloudformation/models/kinesisfirehose.py#L23-L27).
### Expected Behavior
I can (and should) use `AmazonopensearchserviceDestinationConfiguration` instead of `ElasticsearchDestinationConfiguration` (which I should only be able to use if I use ElasticSearch-Service).
### How are you starting LocalStack?
With a docker-compose file
### Steps To Reproduce
Well, just use `AmazonopensearchserviceDestinationConfiguration` and your stream will never be able to deliver the records to your (external) Cluster. But it works with `ElasticsearchDestinationConfiguration`.
### Environment
```markdown
- OS: Windows mit WSL (Ubuntu 20.04)
- LocalStack: latest
```
### Anything else?
_No response_ | null | https://github.com/localstack/localstack/pull/5379 | null | {'base_commit': '507c42709ce08911153840f8b2e43b74f52ee9a5', 'files': [{'path': '.github/workflows/pro-integration.yml', 'status': 'modified', 'Loc': {'(None, None, 81)': {'mod': [81]}, '(None, None, 92)': {'mod': [92]}}}, {'path': 'localstack/services/firehose/provider.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [85], 'mod': [83]}, "('FirehoseProvider', None, 139)": {'add': [544]}, "('FirehoseProvider', '_put_records', 432)": {'mod': [463, 464, 465, 466, 467, 469, 470, 471, 472, 473, 474, 475, 476, 477, 478, 479, 480, 481, 482, 483, 484, 485, 486, 487, 488, 490, 491, 492, 493, 494, 495, 496, 498, 499, 500, 501, 502, 504, 505, 506, 507, 508, 509, 510, 511, 513, 514, 515, 516, 517]}}}, {'path': 'localstack/utils/aws/aws_stack.py', 'status': 'modified', 'Loc': {"(None, 'get_elasticsearch_endpoint', 1100)": {'mod': [1100, 1101, 1102, 1103, 1104, 1106]}, "(None, 'connect_elasticsearch', 1112)": {'mod': [1112, 1113, 1116, 1119, 1120, 1134, 1136, 1143]}}}, {'path': 'requirements.txt', 'status': 'modified', 'Loc': {'(None, None, 46)': {'mod': [46]}}}, {'path': 'tests/integration/conftest.py', 'status': 'modified', 'Loc': {"(None, 'pytest_runtestloop', 40)": {'mod': [48, 49, 50, 52]}}}, {'path': 'tests/integration/test_firehose.py', 'status': 'modified', 'Loc': {"('TestFirehoseIntegration', None, 147)": {'add': [248]}, "('TestFirehoseIntegration', 'assert_elasticsearch_contents', 222)": {'mod': [224]}}}]} | [] | [] | [] | {
"iss_type": "4",
"iss_reason": "2",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"tests/integration/conftest.py",
"localstack/utils/aws/aws_stack.py",
"localstack/services/firehose/provider.py"
],
"doc": [],
"test": [
"tests/integration/test_firehose.py"
],
"config": [
"requirements.txt",
".github/workflows/pro-integration.yml"
],
"asset": []
} | 1 |
localstack | localstack | ae8db74df81821040e3ac654c62d2118da85255a | https://github.com/localstack/localstack/issues/27 | priority: medium
type: feature | Use lambci/docker-lambda for local lambda execution? | Feel free to close this but might be worth considering using https://github.com/lambci/docker-lambda to execute lambdas. Seems they dumped the filesystem of a live lambda and made a container out of it. Neat. | null | null | https://github.com/localstack/localstack/commit/2de054cf799e79021290e9590000eb6047f93bef | {'base_commit': '2de054cf799e79021290e9590000eb6047f93bef', 'files': [{'path': 'Dockerfile', 'status': 'modified', 'Loc': {'(None, None, 59)': {'add': [59]}, '(None, None, 7)': {'mod': [7]}, '(None, None, 54)': {'mod': [54]}, '(None, None, 61)': {'mod': [61]}, '(None, None, 74)': {'mod': [74, 76, 77]}}}, {'path': 'Makefile', 'status': 'modified', 'Loc': {'(None, None, 5)': {'add': [5]}, '(None, None, 68)': {'add': [68]}, '(None, None, 60)': {'mod': [60]}}}, {'path': 'README.md', 'status': 'modified', 'Loc': {'(None, None, 115)': {'add': [115]}, '(None, None, 238)': {'add': [238]}}}, {'path': 'localstack/config.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [1], 'mod': [5, 6, 7, 10, 11, 12, 15, 16, 17]}}}, {'path': 'localstack/constants.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [92]}}}, {'path': 'localstack/mock/apis/lambda_api.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [15, 28, 34, 37, 40, 50, 57], 'mod': [22, 23, 24, 30, 232, 233, 234, 235, 236]}, "(None, 'add_event_source', 84)": {'add': [86], 'mod': [95]}, "(None, 'set_function_code', 233)": {'add': [243], 'mod': [248, 249, 270]}, "(None, 'use_docker', 100)": {'mod': [103]}, "(None, 'in_docker', 113)": {'mod': [117]}, "(None, 'process_kinesis_records', 121)": {'mod': [124, 130, 131, 133]}, "(None, 'get_event_sources', 141)": {'mod': [143]}, "(None, 'run_lambda', 150)": {'mod': [166, 176, 177, 178, 181, 182, 189, 192, 193]}, "(None, 'exec_lambda_code', 195)": {'mod': [208, 217]}, "(None, 'delete_function', 378)": {'mod': [390]}, "(None, 'update_function_code', 404)": {'mod': [411, 412]}}}, {'path': 'localstack/mock/infra.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [57]}, "(None, 'start_elasticsearch', 63)": {'add': [70]}}}, {'path': 'localstack/mock/proxy/dynamodb_listener.py', 'status': 'modified', 'Loc': {"(None, 'update_dynamodb', 14)": {'mod': [82]}}}, {'path': 'localstack/utils/aws/aws_stack.py', 'status': 'modified', 'Loc': {"(None, 'connect_elasticsearch', 346)": {'add': [348]}, '(None, None, None)': {'mod': [3, 8, 12, 13]}}}, {'path': 'localstack/utils/common.py', 'status': 'modified', 'Loc': {"('ShellCommandThread', 'stop', 92)": {'add': [99]}, "(None, 'is_zip_file', 197)": {'add': [199]}, "(None, 'make_http_request', 277)": {'add': [277]}, '(None, None, None)': {'add': [290], 'mod': [8, 9, 10, 11]}}}, {'path': 'localstack/utils/testutil.py', 'status': 'modified', 'Loc': {"(None, 'create_lambda_archive', 51)": {'add': [60], 'mod': [52, 57, 74]}, '(None, None, None)': {'mod': [6, 8, 16]}, "(None, 'create_lambda_function', 77)": {'mod': [81]}}}, {'path': 'setup.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [82]}}}, {'path': 'tests/test_integration.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [11, 24], 'mod': [19]}, "(None, 'test_kinesis_lambda_ddb_streams', 109)": {'mod': [136, 137, 138, 139, 140, 142, 145, 146]}}}]} | [] | [] | [] | {
"iss_type": "4",
"iss_reason": "2",
"loc_way": "commit",
"loc_scope": null,
"info_type": null
} | {
"code": [
"localstack/mock/infra.py",
"localstack/constants.py",
"localstack/config.py",
"setup.py",
"localstack/utils/aws/aws_stack.py",
"localstack/mock/apis/lambda_api.py",
"localstack/mock/proxy/dynamodb_listener.py",
"localstack/utils/common.py"
],
"doc": [
"README.md"
],
"test": [
"localstack/utils/testutil.py",
"tests/test_integration.py"
],
"config": [
"Makefile",
"Dockerfile"
],
"asset": []
} | 1 |
localstack | localstack | 651f87eb51c36f7e58b421acf8e9966a8932feb1 | https://github.com/localstack/localstack/issues/2268 | Displaying the version details in the logs | <!-- Love localstack? Please consider supporting our collective:
👉 https://opencollective.com/localstack/donate -->
# Type of request: This is a ...
[ ] bug report
[X ] feature request
# Detailed description
Am running the localstack using docker-compose up and the logs are being printed at console
Its good to have the below features in the logs while start-up (which is useful for debugging purpose).
1. Display the localstack version number
2. Display the docker container id.
...
## Expected behavior
...
## Actual behavior
...
# Steps to reproduce
## Command used to start LocalStack
...
## Client code (AWS SDK code snippet, or sequence of "awslocal" commands)
...
| null | https://github.com/localstack/localstack/pull/2282 | null | {'base_commit': '651f87eb51c36f7e58b421acf8e9966a8932feb1', 'files': [{'path': 'bin/localstack', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [23, 36, 39]}}}, {'path': 'localstack/utils/cli.py', 'status': 'modified', 'Loc': {"(None, 'cmd_infra', 9)": {'add': [20]}, "(None, 'cmd_web', 37)": {'add': [47]}, '(None, None, None)': {'mod': [2]}}}]} | [] | [] | [] | {
"iss_type": "4",
"iss_reason": "2",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"localstack/utils/cli.py"
],
"doc": [],
"test": [],
"config": [],
"asset": [
"bin/localstack"
]
} | 1 | |
localstack | localstack | 2d3a44fdb977213589ba202a5e495710097ce88b | https://github.com/localstack/localstack/issues/1777 | type: bug | Lambda executor "docker-reuse" errors with "tcp :9001: bind: address already in use" | Hi I'm using localstack 0.8.5 and was using lambda executor in "docker-reuse" mode. This was working all along but suddenly started to give these port bind errors during execution. There don't seems to be any processes using this port however. If i use "docker" as the lambda executor this issue goes away, but i end up with another problem a huge number of containers one for each execution of the lambda. My integration tests essentially send events to a kinesis stream and the lambda reads from this stream so for each execution i get a new container. This is not ideal as it hogs up all the memory on the machine and the tests end up timing out.
Has anyone come across this issue recently or know what changed. I don't see any changes to the 0.8.5 docker image.
localstack_1 | 2019-11-20T05:25:59:WARNING:localstack.services.awslambda.lambda_api: Error executing Lambda function: Lambda process returned error status code: 1. Output:
localstack_1 | 2019/11/20 05:25:59 listen tcp :9001: bind: address already in use
localstack_1 | Traceback (most recent call last):
localstack_1 | File "/opt/code/localstack/localstack/services/awslambda/lambda_api.py", line 250, in run_lambda
localstack_1 | event, context=context, version=version, async=async)
localstack_1 | File "/opt/code/localstack/localstack/services/awslambda/lambda_executors.py", line 129, in execute
localstack_1 | result, log_output = self.run_lambda_executor(cmd, environment, async)
localstack_1 | File "/opt/code/localstack/localstack/services/awslambda/lambda_executors.py", line 66, in run_lambda_executor
localstack_1 | (return_code, log_output))
localstack_1 | Exception: Lambda process returned error status code: 1. Output:
localstack_1 | 2019/11/20 05:25:59 listen tcp :9001: bind: address already in use
localstack_1 |
These errors happen sporadically, but the result of these errors is nondeterministic test failures :(
Docker compose service:
localstack:
image: localstack/localstack:0.8.5
ports:
- "4567-4583:4567-4583"
expose:
- "4567-4583"
environment:
- SERVICES=sqs,kinesis,lambda,dynamodb
- DEFAULT_REGION=us-east-1
- LAMBDA_EXECUTOR=docker-reuse
- DOCKER_HOST=unix:///var/run/docker.sock
volumes:
- "/private${TMPDIR}/localstack:/tmp/localstack"
- "/var/run/docker.sock:/var/run/docker.sock" | null | https://github.com/localstack/localstack/pull/1861 | null | {'base_commit': '2d3a44fdb977213589ba202a5e495710097ce88b', 'files': [{'path': 'localstack/services/awslambda/lambda_executors.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [45]}, "('LambdaExecutorSeparateContainers', None, 571)": {'add': [572]}, "('LambdaExecutorSeparateContainers', 'prepare_execution', 579)": {'add': [589], 'mod': [586, 587, 597, 598, 599, 602]}}}]} | [] | [] | [] | {
"iss_type": "1",
"iss_reason": "1",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"localstack/services/awslambda/lambda_executors.py"
],
"doc": [],
"test": [],
"config": [],
"asset": []
} | 1 |
localstack | localstack | 8433682f8ad29dc23a5e909cb229d0cb033beeaa | https://github.com/localstack/localstack/issues/2329 | s3.upload returns `Location: http://localhost:4566` | # Bug report
# Detailed description
The `AWS.s3.upload()` (official SDK - https://github.com/aws/aws-sdk-js) returns an object with the `Location` key that points to 4566 instead of 4572 (LocalStack S3 port).
## Expected behavior
The `Location` should point to the file on S3.
Example:
```
Location: http://localhost:4572/path/to/bucket.txt
```
## Actual behavior
The `Location` points to the LocalStack entrypoint.
Example:
```
Location: http://localhost:4566/path/to/bucket.txt
```
# Steps to reproduce
- Upload a file to S3 using the official AWS SDK (https://github.com/aws/aws-sdk-js).
- Check out the `Location` property.
## Client code
```javascript
const AWS = require('aws-sdk');
const s3 = new AWS.S3({
region: 'us-west-1',
endpoint: 'http://localhost:4566',
apiVersion: '2006-03-01',
s3ForcePathStyle: true,
});
(async () => {
await s3
.createBucket({ Bucket: 'my-bucket', ACL: 'private' })
.promise();
const { Location } = await s3
.upload({ Key: 'file.txt', Body: 'test', Bucket: 'my-bucket' })
.promise();
console.assert(Location === 'http://localhost:4572/my-bucket/file.txt');
})();
``` | null | https://github.com/localstack/localstack/pull/2332 | null | {'base_commit': '8433682f8ad29dc23a5e909cb229d0cb033beeaa', 'files': [{'path': 'localstack/services/edge.py', 'status': 'modified', 'Loc': {"('ProxyListenerEdge', 'forward_request', 22)": {'add': [40]}}}, {'path': 'tests/integration/test_lambda.py', 'status': 'modified', 'Loc': {}}, {'path': 'tests/unit/test_sns.py', 'status': 'modified', 'Loc': {"('SNSTests', 'test_unsubscribe_should_remove_listener', 25)": {'mod': [26, 27, 34]}, "('SNSTests', 'test_only_one_subscription_per_topic_per_endpoint', 207)": {'mod': [208, 209, 217]}}}]} | [] | [] | [] | {
"iss_type": "2",
"iss_reason": "1",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"localstack/services/edge.py"
],
"doc": [],
"test": [
"tests/integration/test_lambda.py",
"tests/unit/test_sns.py"
],
"config": [],
"asset": []
} | 1 | |
OpenInterpreter | open-interpreter | eded6a2ab6f46cd19caa1559ae23b528a70d1707 | https://github.com/OpenInterpreter/open-interpreter/issues/1495 | Date format string error on Windows when using --os flag | ### Describe the bug
I encountered an error when running `interpreter --os` on Windows. The error occurs due to an incompatible date format string.
## Environment
- OS: Windows
- Python version: 3.11
- open-interpreter version: 0.4.1
## Error Message
ValueError: Invalid format string
## Error Location
The error occurs in `interpreter\computer_use\loop.py`, where the date format string uses `%-d` which is not supported on Windows.
## Current code:
```python
datetime.today().strftime('%A, %B %-d, %Y')
## Solution
Changing %-d to %d fixes the issue. This suggests that the code should handle platform-specific date formatting.
## Suggestion
Consider using one of these approaches to fix this cross-platform issue:
Use %d instead of %-d
Add platform-specific handling for date formatting
Use a cross-platform date formatting library
This would improve the Windows user experience with open-interpreter.
### Reproduce
1. Run `pip install open-interpreter`
2. Run `interpreter --os`
3. The error occurs due to incompatible date format string
4. Error message shows: ValueError: Invalid format string
### Expected behavior
The program should start normally without any date format errors when using the --os flag on Windows.
### Screenshots

### Open Interpreter version
0.4.1
### Python version
3.11
### Operating System name and version
Windows 11
### Additional context
_No response_ | null | https://github.com/OpenInterpreter/open-interpreter/pull/1496 | null | {'base_commit': 'eded6a2ab6f46cd19caa1559ae23b528a70d1707', 'files': [{'path': 'interpreter/computer_use/loop.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [104]}}}]} | [] | [] | [] | {
"iss_type": "1",
"iss_reason": "1",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"interpreter/computer_use/loop.py"
],
"doc": [],
"test": [],
"config": [],
"asset": []
} | 1 | |
OpenInterpreter | open-interpreter | 637fedd2dbe2964b09fb7ae9832bdbffed4494ca | https://github.com/OpenInterpreter/open-interpreter/issues/303 | Bug | The code it generated repeated the same function call 20-30 times and used up $5 in a matter of minutes | ### Describe the bug
same line was written multiple times, I was pressing y and enter and didnt realise i executed a code with 20-30 calls to a gpt-4 summary of long text and it ate $5.
### Reproduce
I am not sure, but i did paste a code snippet along with instruction to guide it because it was making same mistake multiple times.
### Expected behavior
i wanted to recreate this https://platform.openai.com/docs/tutorials/meeting-minutes/creating-an-automated-meeting-minutes-generator-with-whisper-and-gpt-4
### Screenshots
_No response_
### Open Interpreter version
0.1.3
### Python version
3.11.4
### Operating System name and version
mac 12.5
### Additional context
_No response_ | null | https://github.com/OpenInterpreter/open-interpreter/pull/316 | null | {'base_commit': '637fedd2dbe2964b09fb7ae9832bdbffed4494ca', 'files': [{'path': 'interpreter/cli.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [30]}, "(None, 'cli', 44)": {'add': [120, 132]}}}, {'path': 'interpreter/interpreter.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [36, 95]}, "('Interpreter', 'chat', 317)": {'add': [391]}, "('Interpreter', 'respond', 581)": {'add': [645], 'mod': [656, 658]}}}, {'path': 'poetry.lock', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [1, 593, 598, 599, 604, 728, 733, 734, 1338]}}}, {'path': 'pyproject.toml', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [13, 28]}}}]} | [] | [] | [] | {
"iss_type": "2",
"iss_reason": "2",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"interpreter/cli.py",
"interpreter/interpreter.py"
],
"doc": [],
"test": [],
"config": [
"pyproject.toml",
"poetry.lock"
],
"asset": []
} | 1 |
abi | screenshot-to-code | 82c448803f28aa3f5035e5302d78891dfcc661c0 | https://github.com/abi/screenshot-to-code/issues/5 | good first issue | Add a Dockerfile | so it's easier for people to get this up and running and then, running a bunch of different commands. | null | https://github.com/abi/screenshot-to-code/pull/7 | null | {'base_commit': '82c448803f28aa3f5035e5302d78891dfcc661c0', 'files': [{'path': '.gitignore', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [4]}}}, {'path': 'README.md', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [45], 'mod': [18, 31]}}}, {'path': 'frontend/src/generateCode.ts', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [4]}}}]} | [] | [] | [] | {
"iss_type": "4",
"iss_reason": "2",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"frontend/src/generateCode.ts"
],
"doc": [
"README.md"
],
"test": [],
"config": [
".gitignore"
],
"asset": []
} | 1 |
pytorch | pytorch | 94e52e1d1745003fa3a434ed74c1fe87cf8ef349 | https://github.com/pytorch/pytorch/issues/89 | todo | Containers should allow module assignments | Right now, after you created a Container, you can assign modules at a later time to it like this:
``` python
container.add_module('linear', nn.Linear())
```
Instead, also allow this simpler interface:
``` python
container.linear = nn.Linear()
```
| null | https://github.com/pytorch/pytorch/pull/136 | null | {'base_commit': '94e52e1d1745003fa3a434ed74c1fe87cf8ef349', 'files': [{'path': 'test/test_nn.py', 'status': 'modified', 'Loc': {"('TestNN', 'test_add_module', 306)": {'mod': [319]}, "('TestNN', 'test_non_leaf_parameters', 335)": {'mod': [340]}}}, {'path': 'torch/nn/modules/container.py', 'status': 'modified', 'Loc': {"('Container', None, 14)": {'add': [70]}, "('Container', 'add_module', 56)": {'mod': [60]}}}, {'path': 'torch/nn/modules/module.py', 'status': 'modified', 'Loc': {"('Module', '__setattr__', 95)": {'mod': [96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110]}}}]} | [] | [] | [] | {
"iss_type": "4",
"iss_reason": "2",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"torch/nn/modules/container.py",
"torch/nn/modules/module.py"
],
"doc": [],
"test": [
"test/test_nn.py"
],
"config": [],
"asset": []
} | 1 |
pytorch | pytorch | e61f5b586bcf42010d42b0c20c0d2b159ce11d11 | https://github.com/pytorch/pytorch/issues/18626 | high priority
module: cuda
triaged
enhancement | [feature request] Set limit on GPU memory use | ## 🚀 Feature
<!-- A clear and concise description of the feature proposal -->
Allow user to easily specify a fraction of the GPU memory to use.
## Motivation
<!-- Please outline the motivation for the proposal. Is your feature request related to a problem? e.g., I'm always frustrated when [...]. If this is related to another GitHub issue, please link here too -->
I recently switched from tensorflow to pytorch for what I saw as greater flexibility and user control. However, I have been recently frustrated by the inability to specify a cap on the fraction of GPU memory my pytorch process should be using. I have what I think is a fairly standard use case in performing hyper-parameter search by running multiple independent training processes in parallel (there is a whole ecosystem of packages for this). A modern GPU is large enough to train 4-8+ models of my size, but very rarely a configuration is selected which uses almost my full GPU memory. Instead of that memory-hungry process failing with an OOM by using more than its share (the sensible behavior), instead all the other training processes die, and as my script automatically generates more to take their place, they fail too, leading to a catastrophic global failure.
There are additional use cases like shared servers, training models with small batch sizes for statistical efficiency, and using very large GPUs effectively (like modern Voltas).
In tensorflow, this can be done simply by passing a `ConfigProto` with `gpu_memory_fraction=x`.
There seems to be a pain point for others as well (see Additional context). I know that for me, finding a way to do this will probably make the difference on switching back to TF.
## Pitch
<!-- A clear and concise description of what you want to happen. -->
It would be awesome if an option like gpu_memory_fraction could be set somewhere in the pytorch flow. My current recommendation would be to allow an optional arg or kwarg to torch.device, like the index argument, which would specify the fraction of GPU memory to use. If more than `gpu_mem_fraction * total_gpu_mem` is attempted to be allocated, raise OOM.
## Alternatives
<!-- A clear and concise description of any alternative solutions or features you've considered, if any. -->
It seems like you might be able to do some kind of hacky work around using https://pytorch.org/docs/stable/cuda.html#torch.cuda.max_memory_allocated and monitoring of each process being trained.
## Additional context
<!-- Add any other context or screenshots about the feature request here. -->
https://discuss.pytorch.org/t/how-to-set-a-limit-to-gpu-usage/7271
https://stackoverflow.com/questions/49529372/force-gpu-memory-limit-in-pytorch
https://discuss.pytorch.org/t/limiting-gpu-usage/7662
cc @ezyang @gchanan @zou3519 @bdhirsh @jbschlosser @ngimel | null | null | https://github.com/pytorch/pytorch/commit/47aa2536328afc51876b2e04384c0cfe71ee1f06 | {'base_commit': '47aa2536328afc51876b2e04384c0cfe71ee1f06', 'files': [{'path': 'c10/cuda/CUDACachingAllocator.cpp', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [204, 640, 944]}, '(None, None, 221)': {'add': [243, 247, 270]}, '(None, None, 363)': {'add': [375]}, '(None, None, 630)': {'add': [632], 'mod': [634]}, '(None, None, 649)': {'add': [667]}, '(None, None, 845)': {'add': [848]}}}, {'path': 'c10/cuda/CUDACachingAllocator.h', 'status': 'modified', 'Loc': {"(None, 'StatType', 54)": {'add': [113]}}}, {'path': 'test/test_cuda.py', 'status': 'modified', 'Loc': {"('TestCuda', None, 94)": {'add': [393]}}}, {'path': 'torch/_C/__init__.pyi.in', 'status': 'modified', 'Loc': {'(None, None, 582)': {'add': [582]}}}, {'path': 'torch/csrc/cuda/Module.cpp', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [265]}, '(None, None, 500)': {'add': [500]}}}, {'path': 'torch/cuda/memory.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [74]}}}]} | [] | [] | [] | {
"iss_type": "4",
"iss_reason": "2",
"loc_way": "commit",
"loc_scope": null,
"info_type": null
} | {
"code": [
"torch/cuda/memory.py",
"c10/cuda/CUDACachingAllocator.h",
"c10/cuda/CUDACachingAllocator.cpp",
"torch/csrc/cuda/Module.cpp",
"torch/_C/__init__.pyi.in"
],
"doc": [],
"test": [
"test/test_cuda.py"
],
"config": [],
"asset": []
} | 1 |
pytorch | pytorch | 443fe7ca0e6169b7178df18dbefd7823f1246f50 | https://github.com/pytorch/pytorch/issues/29984 | high priority
triaged
module: cublas | Some cublas functions don't handle inputs with zero strides | ## 🐛 Bug
<!-- A clear and concise description of what the bug is. -->
## To Reproduce
Steps to reproduce the behavior:
```python
import torch
import torch.nn as nn
torch.set_default_tensor_type('torch.cuda.FloatTensor')
x = nn.Parameter(torch.ones(2, 2))
(x @ torch.ones(2)).sum().backward()
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-11-c3b66f275e9a> in <module>()
1 x = nn.Parameter(torch.ones(2, 2))
----> 2 (x @ torch.ones(2)).sum().backward()
1 frames
/usr/local/lib/python3.6/dist-packages/torch/autograd/__init__.py in backward(tensors, grad_tensors, retain_graph, create_graph, grad_variables)
97 Variable._execution_engine.run_backward(
98 tensors, grad_tensors, retain_graph, create_graph,
---> 99 allow_unreachable=True) # allow_unreachable flag
100
101
RuntimeError: cublas runtime error : an invalid numeric value was used as an argument at /pytorch/aten/src/THC/THCBlas.cu:120
```
## Expected behavior
No exception is raised.
## Environment
```
Collecting environment information...
PyTorch version: 1.3.1+cu100
Is debug build: No
CUDA used to build PyTorch: 10.0.130
OS: Ubuntu 18.04.3 LTS
GCC version: (Ubuntu 7.4.0-1ubuntu1~18.04.1) 7.4.0
CMake version: version 3.12.0
Python version: 3.6
Is CUDA available: Yes
CUDA runtime version: 10.0.130
GPU models and configuration: GPU 0: Tesla T4
Nvidia driver version: 418.67
cuDNN version: /usr/lib/x86_64-linux-gnu/libcudnn.so.7.6.3
Versions of relevant libraries:
[pip3] numpy==1.17.4
[pip3] torch==1.3.1+cu100
[pip3] torchsummary==1.5.1
[pip3] torchtext==0.3.1
[pip3] torchvision==0.4.2+cu100
[conda] Could not collect
```
This code was run on Google Colab. The bug also reproduces on my server (Ubuntu 16.04LTS, 4x GTX1080Ti GPUs).
## Additional context
The bug doesn't occur if:
1. If I replace `sum()` with `mean()`
```python
(x @ torch.ones(2)).mean().backward()
```
2. If I use a different shape of the tensor
```python
(x @ torch.ones(2, 1)).sum().backward()
```
3. If I run the code on CPU
Therefore, I believe that this is not intended behavior.
cc @ezyang @gchanan @zou3519 @jerryzh168 @SsnL @albanD @gqchen | null | https://github.com/pytorch/pytorch/pull/38321 | null | {'base_commit': '52e9953faffe45d48660fc666db3b520b918c37c', 'files': [{'path': 'aten/src/ATen/native/cuda/Blas.cu', 'status': 'modified', 'Loc': {'(None, None, 10)': {'add': [10]}, '(None, None, 16)': {'mod': [16]}, '(None, None, 22)': {'mod': [22]}, '(None, None, 28)': {'mod': [28]}}}, {'path': 'test/test_autograd.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [6154]}}}, {'path': 'test/test_torch.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'add': [16620]}, "('TestTorchDeviceType', None, 5321)": {'mod': [13015]}}}]} | [] | [] | [] | {
"iss_type": "1",
"iss_reason": "1",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"aten/src/ATen/native/cuda/Blas.cu"
],
"doc": [],
"test": [
"test/test_torch.py",
"test/test_autograd.py"
],
"config": [],
"asset": []
} | 1 |
xtekky | gpt4free | ebc10fa465adc32b165afa5f968e4fb6bf26a8ea | https://github.com/xtekky/gpt4free/issues/617 | bug | No module named 'info' | ModuleNotFoundError: No module named 'info'
Traceback:
File "/Users/apple/.pyenv/versions/3.10.11/lib/python3.10/site-packages/streamlit/runtime/scriptrunner/script_runner.py", line 565, in _run_script
exec(code, module.__dict__)
File "/Users/apple/Documents/AI/gpt4free/gui/streamlit_app.py", line 7, in <module>
from gpt4free import you
File "/Users/apple/Documents/AI/gpt4free/gui/../gpt4free/__init__.py", line 8, in <module>
from gpt4free import aicolors
File "/Users/apple/Documents/AI/gpt4free/gui/../gpt4free/aicolors/__init__.py", line 4, in <module>
from typings import AiColorsResponse
File "/Users/apple/.pyenv/versions/3.10.11/lib/python3.10/site-packages/typings/__init__.py", line 1, in <module>
from .database import *
File "/Users/apple/.pyenv/versions/3.10.11/lib/python3.10/site-packages/typings/database.py", line 2, in <module>
from info import DATABASE_NAME, DATABASE_URI, IMDB, IMDB_TEMPLATE, MELCOW_NEW_USERS, P_TTI_SHOW_OFF, SINGLE_BUTTON, SPELL_CHECK_REPLY, PROTECT_CONTENT | null | https://github.com/xtekky/gpt4free/pull/620 | null | {'base_commit': 'ebc10fa465adc32b165afa5f968e4fb6bf26a8ea', 'files': [{'path': 'gpt4free/aicolors/__init__.py', 'status': 'modified', 'Loc': {'(None, None, None)': {'mod': [4]}}}]} | [] | [] | [] | {
"iss_type": "1",
"iss_reason": "1",
"loc_way": "pr",
"loc_scope": null,
"info_type": null
} | {
"code": [
"gpt4free/aicolors/__init__.py"
],
"doc": [],
"test": [],
"config": [],
"asset": []
} | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.