id
int64 20
338k
| vocab_size
int64 2
671
| ast_levels
int64 4
32
| nloc
int64 1
451
| n_ast_nodes
int64 12
5.6k
| n_identifiers
int64 1
186
| n_ast_errors
int64 0
10
| n_words
int64 2
2.17k
| n_whitespaces
int64 2
13.8k
| fun_name
stringlengths 2
73
| commit_message
stringlengths 51
15.3k
| url
stringlengths 31
59
| code
stringlengths 51
31k
| ast_errors
stringlengths 0
1.46k
| token_counts
int64 6
3.32k
| file_name
stringlengths 5
56
| language
stringclasses 1
value | path
stringlengths 7
134
| commit_id
stringlengths 40
40
| repo
stringlengths 3
28
| complexity
int64 1
153
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
59,848
| 12
| 9
| 2
| 87
| 11
| 0
| 12
| 23
|
set_login_api_ready_event
|
Add login with a browser to `prefect cloud login` (#7334)
|
https://github.com/PrefectHQ/prefect.git
|
def set_login_api_ready_event():
login_api.extra["ready-event"].set()
login_api = FastAPI(on_startup=[set_login_api_ready_event])
login_api.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_methods=["*"],
allow_headers=["*"],
)
| 14
|
cloud.py
|
Python
|
src/prefect/cli/cloud.py
|
1a6dee5e9eb71e6e6d1d3492002e9cd674ab9f9b
|
prefect
| 1
|
|
42,591
| 94
| 22
| 47
| 598
| 25
| 0
| 166
| 1,053
|
parse_tag
|
Support both iso639-3 codes and BCP-47 language tags (#3060)
* Add support for iso639-3 language codes
* Add support for retired language codes
* Move langnames.py to the top-level
* Add langcode() function
* Add iso639retired dictionary
* Improve wrapper functions
* Add module docstring with doctest
* Add 2-letter language codes
* Add regular expression check
* Improve inverse lookup of retired codes
* Support BCP-47
* Avoid deprecated langcodes
* Set stack level for warnings to warn on the langname call
Now it throws e.g.
```
...\nltk_3060.py:9: UserWarning: Shortening 'smo' to 'sm'
print(f"{lang}: {langname(code)}")
```
Rather than
```
...\nltk\langnames.py:64: UserWarning: Shortening zha to za
warn(f"Shortening {code} to {code2}")
```
* Dict key membership is equivalent to dict membership
* Resolve bug: subtag -> tag
* Capitalize BCP47 in CorpusReader name
* Reimplement removed type hint changes from #3081
Co-authored-by: Tom Aarsen <Cubiegamedev@gmail.com>
|
https://github.com/nltk/nltk.git
|
def parse_tag(self, tag):
subtags = tag.split("-")
lang = {}
labels = ["language", "extlang", "script", "region", "variant", "variant"]
while subtags and labels:
subtag = subtags.pop(0)
found = False
while labels:
label = labels.pop(0)
subtag = self.casing[label](subtag)
if self.format[label].fullmatch(subtag):
if subtag in self.db[label]:
found = True
valstr = self.val2str(self.db[label][subtag]["Description"])
if label == "variant" and label in lang:
lang[label] += ": " + valstr
else:
lang[label] = valstr
break
elif subtag in self.db["deprecated"][label]:
found = True
note = f"The {subtag!r} {label} code is deprecated"
if "Preferred-Value" in self.db["deprecated"][label][subtag]:
prefer = self.db["deprecated"][label][subtag][
"Preferred-Value"
]
note += f"', prefer '{self.val2str(prefer)}'"
lang[label] = self.val2str(
self.db["deprecated"][label][subtag]["Description"]
)
warn(note)
break
if not found:
if subtag == "u" and subtags[0] == "sd": # CLDR regional subdivisions
sd = subtags[1]
if sd in self.subdiv:
ext = self.subdiv[sd]
else:
ext = f"<Unknown subdivision: {ext}>"
else: # other extension subtags are not supported yet
ext = f"{subtag}{''.join(['-'+ext for ext in subtags])}".lower()
if not self.format["singleton"].fullmatch(subtag):
ext = f"<Invalid extension: {ext}>"
warn(ext)
lang["extension"] = ext
subtags = []
return lang
| 318
|
bcp47.py
|
Python
|
nltk/corpus/reader/bcp47.py
|
f019fbedb3d2b6a2e6b58ec1b38db612b106568b
|
nltk
| 15
|
|
258,357
| 6
| 10
| 6
| 38
| 7
| 0
| 6
| 20
|
get_prompt_templates
|
feat: Expand LLM support with PromptModel, PromptNode, and PromptTemplate (#3667)
Co-authored-by: ZanSara <sarazanzo94@gmail.com>
|
https://github.com/deepset-ai/haystack.git
|
def get_prompt_templates(cls) -> List[PromptTemplate]:
return list(cls.prompt_templates.values())
| 22
|
prompt_node.py
|
Python
|
haystack/nodes/prompt/prompt_node.py
|
9ebf164cfdfb320503b7161493420c1b0ec577a3
|
haystack
| 1
|
|
203,186
| 9
| 11
| 6
| 49
| 7
| 0
| 9
| 63
|
make_token
|
Fixed #30360 -- Added support for secret key rotation.
Thanks Florian Apolloner for the implementation idea.
Co-authored-by: Andreas Pelme <andreas@pelme.se>
Co-authored-by: Carlton Gibson <carlton.gibson@noumenal.es>
Co-authored-by: Vuyisile Ndlovu <terrameijar@gmail.com>
|
https://github.com/django/django.git
|
def make_token(self, user):
return self._make_token_with_timestamp(
user,
self._num_seconds(self._now()),
self.secret,
)
| 31
|
tokens.py
|
Python
|
django/contrib/auth/tokens.py
|
0dcd549bbe36c060f536ec270d34d9e7d4b8e6c7
|
django
| 1
|
|
97,562
| 22
| 13
| 9
| 112
| 13
| 0
| 26
| 113
|
update_repo_data
|
fix(tests): Fix flaky tests for GitLab updates (#33022)
See API-2585
Prev PR: #33000
In a previous PR I'd suggested we make a change to the naming scheme of repositories coming from GitLab, but as it turns out, we enforce unique constraints on Repositories (with OrganizationId and Name), meaning it doesn't even make sense to be updating the name.
Instead, I've removed the name updates and now we just perform updates on the link and path.
|
https://github.com/getsentry/sentry.git
|
def update_repo_data(self, repo, event):
project = event["project"]
url_from_event = project["web_url"]
path_from_event = project["path_with_namespace"]
if repo.url != url_from_event or repo.config.get("path") != path_from_event:
repo.update(
url=url_from_event,
config=dict(repo.config, path=path_from_event),
)
| 68
|
webhooks.py
|
Python
|
src/sentry/integrations/gitlab/webhooks.py
|
8c2edeae7d3b6d134654cda749050e794b5edc61
|
sentry
| 3
|
|
259,139
| 24
| 10
| 13
| 109
| 17
| 0
| 29
| 140
|
predict
|
MNT Refactor KMeans and MiniBatchKMeans to inherit from a common base class (#22723)
Co-authored-by: Thomas J. Fan <thomasjpfan@gmail.com>
Co-authored-by: Julien Jerphanion <git@jjerphan.xyz>
|
https://github.com/scikit-learn/scikit-learn.git
|
def predict(self, X, sample_weight=None):
check_is_fitted(self)
X = self._check_test_data(X)
x_squared_norms = row_norms(X, squared=True)
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
labels, _ = _labels_inertia_threadpool_limit(
X,
sample_weight,
x_squared_norms,
self.cluster_centers_,
n_threads=self._n_threads,
)
return labels
| 73
|
_kmeans.py
|
Python
|
sklearn/cluster/_kmeans.py
|
6ab950ec081044a1f32c2d082772635bb56144d8
|
scikit-learn
| 1
|
|
134,409
| 39
| 15
| 14
| 187
| 28
| 0
| 48
| 229
|
test_ddppo_compilation
|
[RLlib] AlgorithmConfig: Next steps (volume 01); Algos, RolloutWorker, PolicyMap, WorkerSet use AlgorithmConfig objects under the hood. (#29395)
|
https://github.com/ray-project/ray.git
|
def test_ddppo_compilation(self):
config = ddppo.DDPPOConfig().resources(num_gpus_per_worker=0)
num_iterations = 2
for _ in framework_iterator(config, frameworks="torch"):
algo = config.build(env="CartPole-v0")
for i in range(num_iterations):
results = algo.train()
check_train_results(results)
print(results)
# Make sure, weights on all workers are the same.
weights = algo.workers.foreach_worker(lambda w: w.get_weights())
for w in weights[1:]:
check(w, weights[1])
check_compute_single_action(algo)
algo.stop()
| 112
|
test_ddppo.py
|
Python
|
rllib/algorithms/ddppo/tests/test_ddppo.py
|
182744bbd151c166b8028355eae12a5da63fb3cc
|
ray
| 4
|
|
26,493
| 9
| 9
| 3
| 33
| 4
| 0
| 11
| 16
|
test_validate_subscription_query_invalid
|
Add Webhook payload via graphql subscriptions (#9394)
* Add PoC of webhook subscriptions
* add async webhooks subscription payloads feature
* remove unneeded file
* add translations subscription handling, fixes after review
* remove todo
* add descriptions
* add descriptions, move subsrciption_payloads.py
* refactor
* fix imports, add changelog
* check_document_is_single_subscription refactor
Co-authored-by: Maciej Korycinski <maciej@mirumee.com>
Co-authored-by: Marcin Gębala <5421321+maarcingebala@users.noreply.github.com>
|
https://github.com/saleor/saleor.git
|
def test_validate_subscription_query_invalid():
result = validate_subscription_query("invalid_query")
assert result is False
TEST_VALID_SUBSCRIPTION_QUERY_WITH_FRAGMENT =
| 14
|
test_create_deliveries_for_subscription.py
|
Python
|
saleor/plugins/webhook/tests/subscription_webhooks/test_create_deliveries_for_subscription.py
|
aca6418d6c36956bc1ab530e6ef7e146ec9df90c
|
saleor
| 1
|
|
314,946
| 11
| 12
| 4
| 65
| 9
| 0
| 12
| 44
|
rgbw_color
|
Bump blebox_uniapi to 2.0.0 and adapt integration (#73834)
|
https://github.com/home-assistant/core.git
|
def rgbw_color(self):
if (rgbw_hex := self._feature.rgbw_hex) is None:
return None
return tuple(blebox_uniapi.light.Light.rgb_hex_to_rgb_list(rgbw_hex)[0:4])
| 40
|
light.py
|
Python
|
homeassistant/components/blebox/light.py
|
b5af96e4bb201c9bb43515ea11283bdc8c4212b4
|
core
| 2
|
|
122,245
| 57
| 12
| 15
| 193
| 17
| 0
| 76
| 121
|
dtype
|
implement bint arrays (opaque dtypes), add padding rules
Co-authored-by: Sharad Vikram <sharad.vikram@gmail.com>
|
https://github.com/google/jax.git
|
def dtype(x, *, canonicalize=False):
if x is None:
raise ValueError(f"Invalid argument to dtype: {x}.")
elif isinstance(x, type) and x in python_scalar_dtypes:
dt = python_scalar_dtypes[x]
elif type(x) in python_scalar_dtypes:
dt = python_scalar_dtypes[type(x)]
elif jax.core.is_opaque_dtype(getattr(x, 'dtype', None)):
dt = x.dtype
else:
dt = np.result_type(x)
if dt not in _jax_dtype_set:
raise TypeError(f"Value '{x}' with dtype {dt} is not a valid JAX array "
"type. Only arrays of numeric types are supported by JAX.")
return canonicalize_dtype(dt) if canonicalize else dt
| 112
|
dtypes.py
|
Python
|
jax/_src/dtypes.py
|
6d2aaac2454117d54997243714c1a009827707ca
|
jax
| 8
|
|
33,052
| 34
| 13
| 12
| 186
| 19
| 0
| 48
| 148
|
to_numpy_array
|
Update feature extractor methods to enable type cast before normalize (#18499)
* Update methods to optionally rescale
This is necessary to allow for casting our images / videos to numpy arrays within the feature extractors' call. We want to do this to make sure the behaviour is as expected when flags like are False. If some transformations aren't applied, then the output type can't be unexpected e.g. a list of PIL images instead of numpy arrays.
* Cast images to numpy arrays in call to enable consistent behaviour with different configs
* Remove accidental clip changes
* Update tests to reflect the scaling logic
We write a generic function to handle rescaling of our arrays. In order for the API to be intuitive, we take some factor c and rescale the image values by that. This means, the rescaling done in normalize and to_numpy_array are now done with array * (1/255) instead of array / 255. This leads to small differences in the resulting image. When testing, this was in the order of 1e-8, and so deemed OK
|
https://github.com/huggingface/transformers.git
|
def to_numpy_array(self, image, rescale=None, channel_first=True):
self._ensure_format_supported(image)
if isinstance(image, PIL.Image.Image):
image = np.array(image)
if is_torch_tensor(image):
image = image.numpy()
rescale = isinstance(image.flat[0], np.integer) if rescale is None else rescale
if rescale:
image = self.rescale(image.astype(np.float32), 1 / 255.0)
if channel_first and image.ndim == 3:
image = image.transpose(2, 0, 1)
return image
| 123
|
image_utils.py
|
Python
|
src/transformers/image_utils.py
|
49e44b216b2559e34e945d5dcdbbe2238859e29b
|
transformers
| 7
|
|
60,149
| 29
| 11
| 21
| 91
| 14
| 0
| 37
| 115
|
_get_cluster_uid
|
Add `PREFECT_KUBERNETES_CLUSTER_UID` to allow bypass of `kube-system` namespace read (#7864)
Co-authored-by: Peyton <44583861+peytonrunyan@users.noreply.github.com>
|
https://github.com/PrefectHQ/prefect.git
|
def _get_cluster_uid(self) -> str:
# Default to an environment variable
env_cluster_uid = os.environ.get("PREFECT_KUBERNETES_CLUSTER_UID")
if env_cluster_uid:
return env_cluster_uid
# Read the UID from the cluster namespace
with self.get_client() as client:
namespace = client.read_namespace("kube-system")
cluster_uid = namespace.metadata.uid
return cluster_uid
| 49
|
kubernetes.py
|
Python
|
src/prefect/infrastructure/kubernetes.py
|
9ab65f6480a31ba022d9846fdfbfca1d17da8164
|
prefect
| 2
|
|
101,231
| 10
| 7
| 4
| 35
| 5
| 0
| 11
| 32
|
affine_matrix
|
lib.align updates:
- alignments.py
- Add typed dicts for imported alignments
- Explicitly check for presence of thumb value in alignments dict
- linting
- detected_face.py
- Typing
- Linting
- Legacy support for pre-aligned face
- Update dependencies to new property names
|
https://github.com/deepfakes/faceswap.git
|
def affine_matrix(self) -> np.ndarray:
assert self._affine_matrix is not None
return self._affine_matrix
| 21
|
detected_face.py
|
Python
|
lib/align/detected_face.py
|
5e73437be47f2410439a3c6716de96354e6a0c94
|
faceswap
| 1
|
|
119,979
| 9
| 9
| 3
| 48
| 8
| 0
| 10
| 38
|
bcoo_dot_general
|
[sparse] Update docstrings for bcoo primitives.
PiperOrigin-RevId: 438685829
|
https://github.com/google/jax.git
|
def bcoo_dot_general(lhs, rhs, *, dimension_numbers):
return _bcoo_dot_general(*lhs._bufs, rhs, dimension_numbers=dimension_numbers,
lhs_spinfo=lhs._info)
| 32
|
bcoo.py
|
Python
|
jax/experimental/sparse/bcoo.py
|
3184dd65a222354bffa2466d9a375162f5649132
|
jax
| 1
|
|
146,202
| 4
| 8
| 2
| 24
| 4
| 0
| 4
| 18
|
__len__
|
[serve] Implement Serve Application object (#22917)
The concept of a Serve Application, a data structure containing all information needed to deploy Serve on a Ray cluster, has surfaced during recent design discussions. This change introduces a formal Application data structure and refactors existing code to use it.
|
https://github.com/ray-project/ray.git
|
def __len__(self):
return len(self._deployments)
| 13
|
application.py
|
Python
|
python/ray/serve/application.py
|
1100c982223757f697a410a0d0c3d8bf3ff9c805
|
ray
| 1
|
|
180,540
| 9
| 8
| 20
| 34
| 3
| 0
| 9
| 18
|
update
|
Add gr.update to blocks guide (#1649)
* Add gr.update to guide
* Add to docs page and add step-by-step guide
* Fix documentation tests
* PR reviews
* Use code snippet
* Make section title plural
* Blocks utils in their own section
|
https://github.com/gradio-app/gradio.git
|
def update(**kwargs) -> dict:
kwargs["__type__"] = "generic_update"
return kwargs
| 17
|
blocks.py
|
Python
|
gradio/blocks.py
|
de4458361b359e2333d8d265cb3c57b91bec513b
|
gradio
| 1
|
|
160,534
| 7
| 7
| 34
| 48
| 14
| 2
| 7
| 10
|
traverse
|
ENH: Support character string arrays
TST: added test for issue #18684
ENH: f2py opens files with correct encoding, fixes #635
TST: added test for issue #6308
TST: added test for issue #4519
TST: added test for issue #3425
ENH: Implement user-defined hooks support for post-processing f2py data structure. Implement character BC hook.
ENH: Add support for detecting utf-16 and utf-32 encodings.
|
https://github.com/numpy/numpy.git
|
def traverse(obj, visit, parents=[], result=None, *args, **kwargs):
|
'''Traverse f2py data structurethe following visit
| 238
|
crackfortran.py
|
Python
|
numpy/f2py/crackfortran.py
|
d4e11c7a2eb64861275facb076d47ccd135fa28c
|
numpy
| 11
|
100,575
| 21
| 11
| 14
| 79
| 11
| 0
| 22
| 65
|
_get_free_vram
|
Refactor lib.gpu_stats (#1218)
* inital gpu_stats refactor
* Add dummy CPU Backend
* Update Sphinx documentation
|
https://github.com/deepfakes/faceswap.git
|
def _get_free_vram(self) -> List[float]:
vram = [pynvml.nvmlDeviceGetMemoryInfo(handle).free / (1024 * 1024)
for handle in self._handles]
self._log("debug", f"GPU VRAM free: {vram}")
return vram
| 46
|
nvidia.py
|
Python
|
lib/gpu_stats/nvidia.py
|
bdbbad4d310fb606b6f412aa81e9f57ccd994e97
|
faceswap
| 2
|
|
147,590
| 8
| 6
| 7
| 31
| 6
| 1
| 8
| 21
|
__getstate__
|
[RLlib] AlphaStar polishing (fix logger.info bug). (#22281)
|
https://github.com/ray-project/ray.git
|
def __getstate__(self) -> Dict[str, Any]:
return {}
@ExperimentalAPI
|
@ExperimentalAPI
| 16
|
league_builder.py
|
Python
|
rllib/agents/alpha_star/league_builder.py
|
0bb82f29b65dca348acf5aa516d21ef3f176a3e1
|
ray
| 1
|
140,529
| 8
| 9
| 3
| 36
| 7
| 0
| 9
| 18
|
create_gloo_context
|
Clean up docstyle in python modules and add LINT rule (#25272)
|
https://github.com/ray-project/ray.git
|
def create_gloo_context(rank, world_size):
context = pygloo.rendezvous.Context(rank, world_size)
return context
| 22
|
gloo_util.py
|
Python
|
python/ray/util/collective/collective_group/gloo_util.py
|
905258dbc19753c81039f993477e7ab027960729
|
ray
| 1
|
|
111,759
| 4
| 6
| 2
| 16
| 2
| 0
| 4
| 18
|
configure_architecture_optimizers
|
Lightning implementation for retiarii oneshot nas (#4479)
|
https://github.com/microsoft/nni.git
|
def configure_architecture_optimizers(self):
return None
| 8
|
base_lightning.py
|
Python
|
nni/retiarii/oneshot/pytorch/base_lightning.py
|
8b2eb425274cdb4537fbce4a315aec12a378d6db
|
nni
| 1
|
|
19,662
| 32
| 15
| 14
| 137
| 13
| 0
| 47
| 196
|
safe_import
|
Issue 4993 Add standard pre commit hooks and apply linting. (#4994)
* Add .pre-commit-config.yaml to the project and exclude tests (for now). This does not include the MyPy linting that pip does but does include everything else.
|
https://github.com/pypa/pipenv.git
|
def safe_import(self, name):
# type: (str) -> ModuleType
module = None
if name not in self._modules:
self._modules[name] = importlib.import_module(name)
module = self._modules[name]
if not module:
dist = next(
iter(dist for dist in self.base_working_set if dist.project_name == name),
None,
)
if dist:
dist.activate()
module = importlib.import_module(name)
return module
| 86
|
environment.py
|
Python
|
pipenv/environment.py
|
9a3b3ce70621af6f9adaa9eeac9cf83fa149319c
|
pipenv
| 6
|
|
266,096
| 39
| 13
| 17
| 149
| 21
| 0
| 51
| 293
|
_instantiate_components
|
#10694: Emit post_save signal when creating/updating device components in bulk (#10900)
* Emit post_save signal when creating/updating device components in bulk
* Fix post_save for bulk_update()
|
https://github.com/netbox-community/netbox.git
|
def _instantiate_components(self, queryset, bulk_create=True):
components = [obj.instantiate(device=self) for obj in queryset]
if components and bulk_create:
model = components[0]._meta.model
model.objects.bulk_create(components)
# Manually send the post_save signal for each of the newly created components
for component in components:
post_save.send(
sender=model,
instance=component,
created=True,
raw=False,
using='default',
update_fields=None
)
elif components:
for component in components:
component.save()
| 97
|
devices.py
|
Python
|
netbox/dcim/models/devices.py
|
a57c937aaa565222c21ae8629103070bd5f43c45
|
netbox
| 7
|
|
104,392
| 4
| 7
| 2
| 22
| 3
| 0
| 4
| 18
|
num_columns
|
Update docs to new frontend/UI (#3690)
* WIP: update docs to new UI
* make style
* Rm unused
* inject_arrow_table_documentation __annotations__
* hasattr(arrow_table_method, "__annotations__")
* Update task_template.rst
* Codeblock PT-TF-SPLIT
* Convert loading scripts
* Convert docs to mdx
* Fix mdx
* Add <Tip>
* Convert mdx tables
* Fix codeblock
* Rm unneded hashlinks
* Update index.mdx
* Redo dev change
* Rm circle ci `build_doc` & `deploy_doc`
* Rm unneeded files
* Update docs reamde
* Standardize to `Example::`
* mdx logging levels doc
* Table properties inject_arrow_table_documentation
* ``` to ```py mdx
* Add Tips mdx
* important,None -> <Tip warning={true}>
* More misc
* Center imgs
* Update instllation page
* `setup.py` docs section
* Rm imgs since they are in hf.co
* Update docs/source/access.mdx
Co-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com>
* Update index mdx
* Update docs/source/access.mdx
Co-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com>
* just `Dataset` obj
* Addedversion just italics
* Update ReadInstruction doc example syntax
* Change docstring for `prepare_for_task`
* Chore
* Remove `code` syntax from headings
* Rm `code` syntax from headings
* Hashlink backward compatability
* S3FileSystem doc
* S3FileSystem doc updates
* index.mdx updates
* Add darkmode gifs
* Index logo img css classes
* Index mdx dataset logo img size
* Docs for DownloadMode class
* Doc DownloadMode table
* format docstrings
* style
* Add doc builder scripts (#3790)
* add doc builder scripts
* fix docker image
* Docs new UI actions no self hosted (#3793)
* No self hosted
* replace doc injection by actual docstrings
* Docstring formatted
Co-authored-by: Quentin Lhoest <lhoest.q@gmail.com>
Co-authored-by: Mishig Davaadorj <dmishig@gmail.com>
Co-authored-by: Lysandre Debut <lysandre.debut@reseau.eseo.fr>
Co-authored-by: Mishig Davaadorj <dmishig@gmail.com>
* Rm notebooks from docs actions since they dont exi
* Update tsting branch
* More docstring
* Chore
* bump up node version
* bump up node
* ``` -> ```py for audio_process.mdx
* Update .github/workflows/build_documentation.yml
Co-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com>
* Uodate dev doc build
* remove run on PR
* fix action
* Fix gh doc workflow
* forgot this change when merging master
* Update build doc
Co-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com>
Co-authored-by: Quentin Lhoest <lhoest.q@gmail.com>
Co-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com>
Co-authored-by: Lysandre Debut <lysandre.debut@reseau.eseo.fr>
|
https://github.com/huggingface/datasets.git
|
def num_columns(self):
return self.table.num_columns
| 12
|
table.py
|
Python
|
src/datasets/table.py
|
e35be138148333078284b942ccc9ed7b1d826f97
|
datasets
| 1
|
|
297,903
| 71
| 19
| 26
| 175
| 21
| 0
| 89
| 657
|
_devices
|
String formatting and max line length - Part 3 (#84394)
|
https://github.com/home-assistant/core.git
|
def _devices(self, device_type):
try:
for structure in self.nest.structures:
if structure.name not in self.local_structure:
_LOGGER.debug(
"Ignoring structure %s, not in %s",
structure.name,
self.local_structure,
)
continue
for device in getattr(structure, device_type, []):
try:
# Do not optimize next statement,
# it is here for verify Nest API permission.
device.name_long
except KeyError:
_LOGGER.warning(
(
"Cannot retrieve device name for [%s]"
", please check your Nest developer "
"account permission settings"
),
device.serial,
)
continue
yield (structure, device)
except (AuthorizationError, APIError, OSError) as err:
_LOGGER.error("Connection error while access Nest web service: %s", err)
| 107
|
__init__.py
|
Python
|
homeassistant/components/nest/legacy/__init__.py
|
baef267f335b95ec30cf8791f74e199a104e8148
|
core
| 6
|
|
271,500
| 20
| 13
| 10
| 103
| 13
| 0
| 42
| 112
|
clear_previously_created_nodes
|
Reformatting the codebase with black.
PiperOrigin-RevId: 450093126
|
https://github.com/keras-team/keras.git
|
def clear_previously_created_nodes(layer, created_nodes):
for node in layer._inbound_nodes:
prev_layers = node.inbound_layers
for prev_layer in tf.nest.flatten(prev_layers):
prev_layer._outbound_nodes = [
n for n in prev_layer._outbound_nodes if n not in created_nodes
]
layer._inbound_nodes = [
n for n in layer._inbound_nodes if n not in created_nodes
]
| 68
|
sequential.py
|
Python
|
keras/engine/sequential.py
|
84afc5193d38057e2e2badf9c889ea87d80d8fbf
|
keras
| 7
|
|
120,508
| 31
| 9
| 6
| 78
| 12
| 1
| 31
| 65
|
polar_unitary
|
Change implementation of jax.scipy.linalg.polar() and jax._src.scipy.eigh to use the QDWH decomposition from jax._src.lax.qdwh.
Remove jax._src.lax.polar.
PiperOrigin-RevId: 448241206
|
https://github.com/google/jax.git
|
def polar_unitary(a, *, method="qdwh", eps=None, max_iterations=None):
# TODO(phawkins): delete this function after 2022/8/11.
warnings.warn("jax.scipy.linalg.polar_unitary is deprecated. Call "
"jax.scipy.linalg.polar instead.",
DeprecationWarning)
unitary, _ = polar(a, method, eps, max_iterations)
return unitary
@jit
|
@jit
| 45
|
linalg.py
|
Python
|
jax/_src/scipy/linalg.py
|
7ba36fc1784a7a286aa13ab7c098f84ff64336f1
|
jax
| 1
|
157,209
| 54
| 15
| 30
| 335
| 15
| 0
| 93
| 371
|
sorted_columns
|
Remove statistics-based set_index logic from read_parquet (#9661)
|
https://github.com/dask/dask.git
|
def sorted_columns(statistics, columns=None):
if not statistics:
return []
out = []
for i, c in enumerate(statistics[0]["columns"]):
if columns and c["name"] not in columns:
continue
if not all(
"min" in s["columns"][i] and "max" in s["columns"][i] for s in statistics
):
continue
divisions = [c["min"]]
max = c["max"]
success = c["min"] is not None
for stats in statistics[1:]:
c = stats["columns"][i]
if c["min"] is None:
success = False
break
if c["min"] >= max:
divisions.append(c["min"])
max = c["max"]
else:
success = False
break
if success:
divisions.append(max)
assert divisions == sorted(divisions)
out.append({"name": c["name"], "divisions": divisions})
return out
| 196
|
core.py
|
Python
|
dask/dataframe/io/parquet/core.py
|
945435bfebc223f9a0ca013fc8163801e789caab
|
dask
| 12
|
|
5,820
| 28
| 18
| 21
| 163
| 15
| 0
| 44
| 239
|
get_number_of_posts
|
PR - Fix `extract_text_from_element()`and `find_element*()` to `find_element()` (#6438)
* Updated getUserData() and find_element*
Signed-off-by: elulcao <elulcao@icloud.com>
Thanks @breuerfelix for reviewing, 🚀
People in this thread please let me know if something is not OK, IG changed a lot these days. 🤗 @her
|
https://github.com/InstaPy/InstaPy.git
|
def get_number_of_posts(browser):
try:
num_of_posts = getUserData(
"graphql.user.edge_owner_to_timeline_media.count", browser
)
except WebDriverException:
try:
num_of_posts_txt = browser.find_element(
By.XPATH, read_xpath(get_number_of_posts.__name__, "num_of_posts_txt")
).text
except NoSuchElementException:
num_of_posts_txt = browser.find_element(
By.XPATH,
read_xpath(
get_number_of_posts.__name__, "num_of_posts_txt_no_such_element"
),
).text
num_of_posts_txt = num_of_posts_txt.replace(" ", "")
num_of_posts_txt = num_of_posts_txt.replace(",", "")
num_of_posts = int(num_of_posts_txt)
return num_of_posts
| 95
|
util.py
|
Python
|
instapy/util.py
|
2a157d452611d37cf50ccb7d56ff1a06e9790ecb
|
InstaPy
| 3
|
|
131,786
| 78
| 14
| 61
| 521
| 35
| 0
| 130
| 799
|
testRequestResourcesRaceConditionWithResourceDemands
|
[CI] Format Python code with Black (#21975)
See #21316 and #21311 for the motivation behind these changes.
|
https://github.com/ray-project/ray.git
|
def testRequestResourcesRaceConditionWithResourceDemands(self):
config = copy.deepcopy(MULTI_WORKER_CLUSTER)
config["available_node_types"].update(
{
"empty_node": {
"node_config": {},
"resources": {"CPU": 2, "GPU": 1},
"max_workers": 1,
},
"def_worker": {
"node_config": {},
"resources": {"CPU": 2, "GPU": 1, "WORKER": 1},
"max_workers": 3,
},
}
)
config["idle_timeout_minutes"] = 0
config_path = self.write_config(config)
self.provider = MockProvider()
self.provider.create_node(
{},
{
TAG_RAY_NODE_KIND: "head",
TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE,
TAG_RAY_USER_NODE_TYPE: "empty_node",
},
1,
)
runner = MockProcessRunner()
runner.respond_to_call("json .Config.Env", ["[]" for i in range(2)])
lm = LoadMetrics()
autoscaler = MockAutoscaler(
config_path,
lm,
MockNodeInfoStub(),
max_failures=0,
process_runner=runner,
update_interval_s=0,
)
lm.update(
"127.0.0.0",
mock_raylet_id(),
{"CPU": 2, "GPU": 1},
{"CPU": 2},
{},
waiting_bundles=[{"CPU": 2}],
)
autoscaler.load_metrics.set_resource_requests([{"CPU": 2, "GPU": 1}] * 2)
autoscaler.update()
# 1 head, 1 worker.
self.waitForNodes(2)
lm.update(
"127.0.0.0",
mock_raylet_id(),
{"CPU": 2, "GPU": 1},
{"CPU": 2},
{},
waiting_bundles=[{"CPU": 2}],
)
# make sure it stays consistent.
for _ in range(10):
autoscaler.update()
self.waitForNodes(2)
| 310
|
test_resource_demand_scheduler.py
|
Python
|
python/ray/tests/test_resource_demand_scheduler.py
|
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
|
ray
| 3
|
|
181,658
| 19
| 12
| 11
| 92
| 17
| 0
| 20
| 89
|
test_k_fold_cv
|
Revert "Deployed 7ccda9a with MkDocs version: 1.3.0"
This reverts commit bd9629c40e01241766197119b581a99409b07068.
|
https://github.com/EpistasisLab/tpot.git
|
def test_k_fold_cv():
boston = load_boston()
clf = make_pipeline(
OneHotEncoder(
categorical_features='auto',
sparse=False,
minimum_fraction=0.05
),
LinearRegression()
)
cross_val_score(clf, boston.data, boston.target, cv=KFold(n_splits=10, shuffle=True))
| 60
|
one_hot_encoder_tests.py
|
Python
|
tests/one_hot_encoder_tests.py
|
388616b6247ca4ea8de4e2f340d6206aee523541
|
tpot
| 1
|
|
151,282
| 11
| 10
| 5
| 43
| 4
| 0
| 13
| 56
|
get_trade_duration
|
improve typing, improve docstrings, ensure global tests pass
|
https://github.com/freqtrade/freqtrade.git
|
def get_trade_duration(self):
if self._last_trade_tick is None:
return 0
else:
return self._current_tick - self._last_trade_tick
| 25
|
BaseEnvironment.py
|
Python
|
freqtrade/freqai/RL/BaseEnvironment.py
|
77c360b264c9dee489081c2761cc3be4ba0b01d1
|
freqtrade
| 2
|
|
31,755
| 14
| 8
| 4
| 45
| 6
| 0
| 19
| 54
|
project_group_token
|
Adding GroupViT Models (#17313)
* add group vit and fixed test (except slow)
* passing slow test
* addressed some comments
* fixed test
* fixed style
* fixed copy
* fixed segmentation output
* fixed test
* fixed relative path
* fixed copy
* add ignore non auto configured
* fixed docstring, add doc
* fixed copies
* Apply suggestions from code review
merge suggestions
Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com>
Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>
* resolve comment, renaming model
* delete unused attr
* use fix copies
* resolve comments
* fixed attn
* remove unused vars
* refactor tests
* resolve final comments
* add demo notebook
* fixed inconsitent default
* Apply suggestions from code review
Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com>
* Apply suggestions from code review
Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com>
* rename stage->stages
* Create single GroupViTEncoderLayer class
* Update conversion script
* Simplify conversion script
* Remove cross-attention class in favor of GroupViTAttention
* Convert other model as well, add processor to conversion script
* addressing final comment
* fixed args
* Update src/transformers/models/groupvit/modeling_groupvit.py
Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>
Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com>
Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>
Co-authored-by: Niels Rogge <nielsrogge@Nielss-MacBook-Pro.local>
|
https://github.com/huggingface/transformers.git
|
def project_group_token(self, group_tokens):
# [B, num_output_groups, C] <- [B, num_group_tokens, C]
projected_group_tokens = self.mlp_inter(group_tokens)
projected_group_tokens = self.norm_post_tokens(projected_group_tokens)
return projected_group_tokens
| 26
|
modeling_groupvit.py
|
Python
|
src/transformers/models/groupvit/modeling_groupvit.py
|
6c8f4c9a938a09749ea1b19a5fa2a8dd27e99a29
|
transformers
| 1
|
|
84,160
| 21
| 11
| 12
| 128
| 15
| 0
| 25
| 88
|
test_removed_file_download
|
tests: Refactor away result.json() calls with helpers.
Signed-off-by: Zixuan James Li <p359101898@gmail.com>
|
https://github.com/zulip/zulip.git
|
def test_removed_file_download(self) -> None:
self.login("hamlet")
fp = StringIO("zulip!")
fp.name = "zulip.txt"
result = self.client_post("/json/user_uploads", {"file": fp})
response_dict = self.assert_json_success(result)
destroy_uploads()
response = self.client_get(response_dict["uri"])
self.assertEqual(response.status_code, 404)
| 71
|
test_upload.py
|
Python
|
zerver/tests/test_upload.py
|
a142fbff85302c5e3acb2e204eca2e9c75dbc74b
|
zulip
| 1
|
|
266,774
| 39
| 17
| 15
| 232
| 31
| 0
| 51
| 217
|
delegate
|
ansible-test - Code cleanup and refactoring. (#77169)
* Remove unnecessary PyCharm ignores.
* Ignore intentional undefined attribute usage.
* Add missing type hints. Fix existing type hints.
* Fix docstrings and comments.
* Use function to register completion handler.
* Pass strings to display functions.
* Fix CompositeAction handling of dest argument.
* Use consistent types in expressions/assignments.
* Use custom function to keep linters happy.
* Add missing raise for custom exception.
* Clean up key/value type handling in cloud plugins.
* Use dataclass instead of dict for results.
* Add custom type_guard function to check lists.
* Ignore return type that can't be checked (yet).
* Avoid changing types on local variables.
|
https://github.com/ansible/ansible.git
|
def delegate(args, host_state, exclude, require): # type: (CommonConfig, HostState, t.List[str], t.List[str]) -> None
assert isinstance(args, EnvironmentConfig)
with delegation_context(args, host_state):
if isinstance(args, TestConfig):
args.metadata.ci_provider = get_ci_provider().code
make_dirs(ResultType.TMP.path)
with tempfile.NamedTemporaryFile(prefix='metadata-', suffix='.json', dir=ResultType.TMP.path) as metadata_fd:
args.metadata_path = os.path.join(ResultType.TMP.relative_path, os.path.basename(metadata_fd.name))
args.metadata.to_file(args.metadata_path)
try:
delegate_command(args, host_state, exclude, require)
finally:
args.metadata_path = None
else:
delegate_command(args, host_state, exclude, require)
| 146
|
delegation.py
|
Python
|
test/lib/ansible_test/_internal/delegation.py
|
a06fa496d3f837cca3c437ab6e9858525633d147
|
ansible
| 3
|
|
108,967
| 65
| 16
| 25
| 399
| 31
| 0
| 94
| 508
|
set_aspect
|
Add equalxy, equalyz, equalxz aspect ratios
Update docstrings
|
https://github.com/matplotlib/matplotlib.git
|
def set_aspect(self, aspect, adjustable=None, anchor=None, share=False):
_api.check_in_list(('auto', 'equal', 'equalxy', 'equalyz', 'equalxz'),
aspect=aspect)
super().set_aspect(
aspect='auto', adjustable=adjustable, anchor=anchor, share=share)
if aspect in ('equal', 'equalxy', 'equalxz', 'equalyz'):
if aspect == 'equal':
axis_indices = [0, 1, 2]
elif aspect == 'equalxy':
axis_indices = [0, 1]
elif aspect == 'equalxz':
axis_indices = [0, 2]
elif aspect == 'equalyz':
axis_indices = [1, 2]
view_intervals = np.array([self.xaxis.get_view_interval(),
self.yaxis.get_view_interval(),
self.zaxis.get_view_interval()])
mean = np.mean(view_intervals, axis=1)
delta = np.max(np.ptp(view_intervals, axis=1))
deltas = delta * self._box_aspect / min(self._box_aspect)
for i, set_lim in enumerate((self.set_xlim3d,
self.set_ylim3d,
self.set_zlim3d)):
if i in axis_indices:
set_lim(mean[i] - deltas[i]/2., mean[i] + deltas[i]/2.)
| 255
|
axes3d.py
|
Python
|
lib/mpl_toolkits/mplot3d/axes3d.py
|
31d13198ecf6969b1b693c28a02b0805f3f20420
|
matplotlib
| 8
|
|
155,478
| 43
| 17
| 23
| 305
| 12
| 0
| 86
| 392
|
slice_shift
|
REFACTOR-#3948: Use `__constructor__` in `DataFrame` and `Series` classes (#5485)
Signed-off-by: Anatoly Myachev <anatoly.myachev@intel.com>
|
https://github.com/modin-project/modin.git
|
def slice_shift(self, periods=1, axis=0): # noqa: PR01, RT01, D200
if periods == 0:
return self.copy()
if axis == "index" or axis == 0:
if abs(periods) >= len(self.index):
return self.__constructor__(columns=self.columns)
else:
new_df = self.iloc[:-periods] if periods > 0 else self.iloc[-periods:]
new_df.index = (
self.index[periods:] if periods > 0 else self.index[:periods]
)
return new_df
else:
if abs(periods) >= len(self.columns):
return self.__constructor__(index=self.index)
else:
new_df = (
self.iloc[:, :-periods] if periods > 0 else self.iloc[:, -periods:]
)
new_df.columns = (
self.columns[periods:] if periods > 0 else self.columns[:periods]
)
return new_df
| 194
|
dataframe.py
|
Python
|
modin/pandas/dataframe.py
|
b541b6c18e6fb4515e998b9b4f88528490cf69c6
|
modin
| 10
|
|
87,444
| 48
| 12
| 11
| 143
| 25
| 1
| 53
| 113
|
derive_missing_codemappings
|
feat(code-mappings): Add task to derive missing code mappings (#40528)
Add task to derive and save missing code mappings.
Fixes WOR-2236
|
https://github.com/getsentry/sentry.git
|
def derive_missing_codemappings(dry_run=False) -> None:
organizations = Organization.objects.filter(status=OrganizationStatus.ACTIVE)
for _, organization in enumerate(
RangeQuerySetWrapper(organizations, step=1000, result_value_getter=lambda item: item.id)
):
if not features.has("organizations:derive-code-mappings", organization):
continue
# Create a celery task per organization
derive_code_mappings.delay(organization.id)
@instrumented_task( # type: ignore
name="sentry.tasks.derive_code_mappings.derive_code_mappings",
queue="derive_code_mappings",
max_retries=0, # if we don't backfill it this time, we'll get it the next time
)
|
@instrumented_task( # type: ignore
name="sentry.tasks.derive_code_mappings.derive_code_mappings",
queue="derive_code_mappings",
max_retries=0, # if we don't backfill it this time, we'll get it the next time
)
| 70
|
derive_code_mappings.py
|
Python
|
src/sentry/tasks/derive_code_mappings.py
|
c1b7994345096b15efff054341ce569dea57e76b
|
sentry
| 3
|
287,933
| 5
| 6
| 21
| 17
| 2
| 0
| 5
| 12
|
_async_update_data
|
Enable the move firmware effect on multizone lights (#78918)
Co-authored-by: J. Nick Koston <nick@koston.org>
|
https://github.com/home-assistant/core.git
|
async def _async_update_data(self) -> None:
| 152
|
coordinator.py
|
Python
|
homeassistant/components/lifx/coordinator.py
|
691028dfb4947f28c5a30e6e2d135404ac0a0a60
|
core
| 8
|
|
13,866
| 23
| 11
| 7
| 69
| 7
| 0
| 24
| 108
|
close
|
feat: dynamic batching (#5410)
Co-authored-by: Johannes Messner <messnerjo@gmail.com>
Co-authored-by: Alaeddine Abdessalem <alaeddine-13@live.fr>
|
https://github.com/jina-ai/jina.git
|
async def close(self):
if not self._is_closed:
# debug print amount of requests to be processed.
self._flush_trigger.set()
if self._flush_task:
await self._flush_task
self._cancel_timer_if_pending()
self._is_closed = True
| 38
|
batch_queue.py
|
Python
|
jina/serve/runtimes/worker/batch_queue.py
|
46d7973043e2e599149812cc6fc7671b935c13f8
|
jina
| 3
|
|
21,784
| 8
| 7
| 3
| 28
| 6
| 0
| 8
| 22
|
is_bare
|
Update tomlkit==0.9.2
Used:
python -m invoke vendoring.update --package=tomlkit
|
https://github.com/pypa/pipenv.git
|
def is_bare(self) -> bool:
return self.t == KeyType.Bare
| 16
|
items.py
|
Python
|
pipenv/vendor/tomlkit/items.py
|
8faa74cdc9da20cfdcc69f5ec29b91112c95b4c9
|
pipenv
| 1
|
|
291,386
| 10
| 7
| 3
| 38
| 6
| 0
| 10
| 24
|
async_get_triggers
|
Fix homekit controller triggers not attaching when integration is setup after startup (#82717)
fixes https://github.com/home-assistant/core/issues/78852
|
https://github.com/home-assistant/core.git
|
def async_get_triggers(self) -> Generator[tuple[str, str], None, None]:
yield from self._triggers
| 25
|
device_trigger.py
|
Python
|
homeassistant/components/homekit_controller/device_trigger.py
|
05f89efd2c0f0d954897b2e1d43ec2a8505cb33a
|
core
| 1
|
|
20
| 7
| 6
| 17
| 26
| 4
| 1
| 7
| 20
|
get_protobuf_schema
|
MOVE GetAllRequestsMessage and GetAllRequestsResponseMessage to the proper message file
|
https://github.com/OpenMined/PySyft.git
|
def get_protobuf_schema() -> GeneratedProtocolMessageType:
return GetAllRequestsMessage_PB
@serializable()
|
@serializable()
| 9
|
object_request_messages.py
|
Python
|
packages/syft/src/syft/core/node/common/node_service/object_request/object_request_messages.py
|
05edf746cf5742b562996cf1a319b404152960e5
|
PySyft
| 1
|
241,753
| 106
| 11
| 72
| 644
| 30
| 1
| 249
| 763
|
test_fx_validator_integration
|
Add `LightningModule.lr_scheduler_step` (#10249)
Co-authored-by: Carlos Mocholi <carlossmocholi@gmail.com>
|
https://github.com/Lightning-AI/lightning.git
|
def test_fx_validator_integration(tmpdir):
not_supported = {
None: "`self.trainer` reference is not registered",
"on_before_accelerator_backend_setup": "You can't",
"setup": "You can't",
"configure_sharded_model": "You can't",
"on_configure_sharded_model": "You can't",
"configure_optimizers": "You can't",
"on_fit_start": "You can't",
"on_pretrain_routine_start": "You can't",
"on_pretrain_routine_end": "You can't",
"on_train_dataloader": "You can't",
"train_dataloader": "You can't",
"on_val_dataloader": "You can't",
"val_dataloader": "You can't",
"on_validation_end": "You can't",
"on_train_end": "You can't",
"on_fit_end": "You can't",
"teardown": "You can't",
"on_sanity_check_start": "You can't",
"on_sanity_check_end": "You can't",
"prepare_data": "You can't",
"configure_callbacks": "You can't",
"on_validation_model_eval": "You can't",
"on_validation_model_train": "You can't",
"lr_scheduler_step": "You can't",
"summarize": "not managed by the `Trainer",
}
model = HookedModel(not_supported)
with pytest.warns(UserWarning, match=not_supported[None]):
model.log("foo", 1)
callback = HookedCallback(not_supported)
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=2,
limit_train_batches=1,
limit_val_batches=1,
limit_test_batches=1,
limit_predict_batches=1,
callbacks=callback,
)
with pytest.deprecated_call(match="on_train_dataloader` is deprecated in v1.5"):
trainer.fit(model)
not_supported.update(
{
# `lightning_module` ref is now present from the `fit` call
"on_before_accelerator_backend_setup": "You can't",
"on_test_dataloader": "You can't",
"test_dataloader": "You can't",
"on_test_model_eval": "You can't",
"on_test_model_train": "You can't",
"on_test_end": "You can't",
}
)
with pytest.deprecated_call(match="on_test_dataloader` is deprecated in v1.5"):
trainer.test(model, verbose=False)
not_supported.update({k: "result collection is not registered yet" for k in not_supported})
not_supported.update(
{
"on_predict_dataloader": "result collection is not registered yet",
"predict_dataloader": "result collection is not registered yet",
"on_predict_model_eval": "result collection is not registered yet",
"on_predict_start": "result collection is not registered yet",
"on_predict_epoch_start": "result collection is not registered yet",
"on_predict_batch_start": "result collection is not registered yet",
"predict_step": "result collection is not registered yet",
"on_predict_batch_end": "result collection is not registered yet",
"on_predict_epoch_end": "result collection is not registered yet",
"on_predict_end": "result collection is not registered yet",
}
)
with pytest.deprecated_call(match="on_predict_dataloader` is deprecated in v1.5"):
trainer.predict(model)
@RunIf(min_gpus=2)
|
@RunIf(min_gpus=2)
| 322
|
test_logger_connector.py
|
Python
|
tests/trainer/logging_/test_logger_connector.py
|
82c8875f33addb0becd7761c95e9674ccc98c7ee
|
lightning
| 2
|
140,584
| 20
| 17
| 11
| 108
| 13
| 1
| 25
| 105
|
unbatch
|
Clean up docstyle in python modules and add LINT rule (#25272)
|
https://github.com/ray-project/ray.git
|
def unbatch(batches_struct):
flat_batches = tree.flatten(batches_struct)
out = []
for batch_pos in range(len(flat_batches[0])):
out.append(
tree.unflatten_as(
batches_struct,
[flat_batches[i][batch_pos] for i in range(len(flat_batches))],
)
)
return out
@DeveloperAPI
|
@DeveloperAPI
| 66
|
space_utils.py
|
Python
|
rllib/utils/spaces/space_utils.py
|
905258dbc19753c81039f993477e7ab027960729
|
ray
| 3
|
286,314
| 10
| 10
| 2
| 39
| 6
| 0
| 11
| 20
|
text_adjustment_len
|
[SDK] Allow silencing verbose output in commands that use stocks/load (#3180)
* remove verbose on load
* Revert implementation of the verbosity setting in stocks controller
* Edit docstrings to comply with pydocstyle linting rules
* Fix typos in variable names and help text
* Add verbosity setting to forex load helper as it uses the stocks helper
* Update docstrings to comply with pydocstyle linting rules
* Update tests
* Fix test relying on local sources settings
* Remove old test cassettes
* Add new test data
* WIP: Fix futures tests
* Clean up test file
* Fix futures tests having a time component
* Fix futures model tests
Co-authored-by: James Maslek <jmaslek11@gmail.com>
Co-authored-by: Theodore Aptekarev <aptekarev@gmail.com>
|
https://github.com/OpenBB-finance/OpenBBTerminal.git
|
def text_adjustment_len(self, text):
# return compat.strlen(self.ansi_regx.sub("", text), encoding=self.encoding)
return len(self.ansi_regx.sub("", text))
| 22
|
helper_funcs.py
|
Python
|
openbb_terminal/helper_funcs.py
|
47549cbd9f52a436c06b040fda5b88a7d2bf700a
|
OpenBBTerminal
| 1
|
|
301,831
| 21
| 14
| 10
| 79
| 10
| 0
| 23
| 121
|
async_sync_entities_all
|
Sync entities when enabling/disabling Google Assistant (#72791)
|
https://github.com/home-assistant/core.git
|
async def async_sync_entities_all(self):
if not self._store.agent_user_ids:
return 204
res = await gather(
*(
self.async_sync_entities(agent_user_id)
for agent_user_id in self._store.agent_user_ids
)
)
return max(res, default=204)
| 48
|
helpers.py
|
Python
|
homeassistant/components/google_assistant/helpers.py
|
6d74149f22e7211173412682d999b500ccbeff42
|
core
| 3
|
|
46,224
| 60
| 15
| 35
| 361
| 47
| 0
| 91
| 502
|
test_deactivate_stale_dags
|
Reduce DB load incurred by Stale DAG deactivation (#21399)
Deactivating stale DAGs periodically in bulk
By moving this logic into the DagFileProcessorManager and running it across all processed file periodically, we can prevent the use of un-indexed queries.
The basic logic is that we can look at the last processed time of a file (for a given processor) and compare that to the last_parsed_time of an entry in the dag table. If the file has been processed significantly more recently than the DAG has been updated, then its safe to assume that the DAG is missing and can be marked inactive.
|
https://github.com/apache/airflow.git
|
def test_deactivate_stale_dags(self):
manager = DagFileProcessorManager(
dag_directory='directory',
max_runs=1,
processor_timeout=timedelta(minutes=10),
signal_conn=MagicMock(),
dag_ids=[],
pickle_dags=False,
async_mode=True,
)
test_dag_path = str(TEST_DAG_FOLDER / 'test_example_bash_operator.py')
dagbag = DagBag(test_dag_path, read_dags_from_db=False)
with create_session() as session:
# Add stale DAG to the DB
dag = dagbag.get_dag('test_example_bash_operator')
dag.last_parsed_time = timezone.utcnow()
dag.sync_to_db()
# Add DAG to the file_parsing_stats
stat = DagFileStat(
num_dags=1,
import_errors=0,
last_finish_time=timezone.utcnow() + timedelta(hours=1),
last_duration=1,
run_count=1,
)
manager._file_paths = [test_dag_path]
manager._file_stats[test_dag_path] = stat
active_dags = (
session.query(DagModel).filter(DagModel.is_active, DagModel.fileloc == test_dag_path).all()
)
assert len(active_dags) == 1
manager._file_stats[test_dag_path] = stat
manager._deactivate_stale_dags()
active_dags = (
session.query(DagModel).filter(DagModel.is_active, DagModel.fileloc == test_dag_path).all()
)
assert len(active_dags) == 0
| 226
|
test_manager.py
|
Python
|
tests/dag_processing/test_manager.py
|
f309ea78f7d8b62383bc41eac217681a0916382b
|
airflow
| 1
|
|
6,278
| 7
| 7
| 2
| 35
| 6
| 0
| 8
| 14
|
sparsemax_loss
|
Removes dependency on entmax from PyPI, adds entmax source to utils (#1778)
* Removes dependency on entmax from PyPi, add entmax source code into utils instead.
* Removes build status and image from README
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* Fix python formatting in docs for pre-commit.
* Removes __main__ from test_losses.py
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* Update entmax imports.
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
Co-authored-by: Daniel Treiman <daniel@predibase.com>
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
|
https://github.com/ludwig-ai/ludwig.git
|
def sparsemax_loss(X, target, k=None):
return SparsemaxLossFunction.apply(X, target, k)
| 23
|
losses.py
|
Python
|
ludwig/utils/entmax/losses.py
|
20a8a6fdb516e543d4598c852063ba0fb407f3ba
|
ludwig
| 1
|
|
247,607
| 6
| 7
| 10
| 34
| 4
| 0
| 6
| 27
|
test_can_create_as_private_room_after_rejection
|
Add type hints to some tests/handlers files. (#12224)
|
https://github.com/matrix-org/synapse.git
|
def test_can_create_as_private_room_after_rejection(self) -> None:
self.test_denied_without_publication_permission()
self.test_allowed_when_creating_private_room()
| 18
|
test_directory.py
|
Python
|
tests/handlers/test_directory.py
|
5dd949bee6158a8b651db9f2ae417a62c8184bfd
|
synapse
| 1
|
|
100,709
| 62
| 14
| 36
| 297
| 24
| 0
| 90
| 372
|
_total_stats
|
Bugfixes:
- Stats graph - Handle NaNs in data
- logger - de-elevate matplotlib font messages
|
https://github.com/deepfakes/faceswap.git
|
def _total_stats(self) -> dict:
logger.debug("Compiling Totals")
elapsed = 0
examples = 0
iterations = 0
batchset = set()
total_summaries = len(self._per_session_stats)
for idx, summary in enumerate(self._per_session_stats):
if idx == 0:
starttime = summary["start"]
if idx == total_summaries - 1:
endtime = summary["end"]
elapsed += summary["elapsed"]
examples += ((summary["batch"] * 2) * summary["iterations"])
batchset.add(summary["batch"])
iterations += summary["iterations"]
batch = ",".join(str(bs) for bs in batchset)
totals = {"session": "Total",
"start": starttime,
"end": endtime,
"elapsed": elapsed,
"rate": examples / elapsed if elapsed != 0 else 0,
"batch": batch,
"iterations": iterations}
logger.debug(totals)
return totals
| 172
|
stats.py
|
Python
|
lib/gui/analysis/stats.py
|
afec52309326304f4323029039e49bfcf928ef43
|
faceswap
| 6
|
|
175,317
| 20
| 10
| 9
| 104
| 17
| 0
| 25
| 64
|
global_enum
|
bpo-40066: [Enum] update str() and format() output (GH-30582)
Undo rejected PEP-663 changes:
- restore `repr()` to its 3.10 status
- restore `str()` to its 3.10 status
New changes:
- `IntEnum` and `IntFlag` now leave `__str__` as the original `int.__str__` so that str() and format() return the same result
- zero-valued flags without a name have a slightly changed repr(), e.g. `repr(Color(0)) == '<Color: 0>'`
- update `dir()` for mixed-in types to return all the methods and attributes of the mixed-in type
- added `_numeric_repr_` to `Flag` to control display of unnamed values
- enums without doc strings have a more comprehensive doc string added
- `ReprEnum` added -- inheriting from this makes it so only `__repr__` is replaced, not `__str__` nor `__format__`; `IntEnum`, `IntFlag`, and `StrEnum` all inherit from `ReprEnum`
|
https://github.com/python/cpython.git
|
def global_enum(cls, update_str=False):
if issubclass(cls, Flag):
cls.__repr__ = global_flag_repr
else:
cls.__repr__ = global_enum_repr
if not issubclass(cls, ReprEnum) or update_str:
cls.__str__ = global_str
sys.modules[cls.__module__].__dict__.update(cls.__members__)
return cls
| 65
|
enum.py
|
Python
|
Lib/enum.py
|
acf7403f9baea3ae1119fc6b4a3298522188bf96
|
cpython
| 4
|
|
292,545
| 8
| 6
| 2
| 26
| 5
| 0
| 8
| 15
|
device_info
|
Expose Samsung wrapper as async (#67042)
Co-authored-by: epenet <epenet@users.noreply.github.com>
|
https://github.com/home-assistant/core.git
|
async def async_device_info(self) -> dict[str, Any] | None:
| 15
|
bridge.py
|
Python
|
homeassistant/components/samsungtv/bridge.py
|
a60c37cdb8cc9d0b9bad1dedb92b6068cd9d1244
|
core
| 1
|
|
39,443
| 12
| 11
| 4
| 64
| 9
| 0
| 12
| 28
|
lexicographers_mutual_information
|
Add new item similarity metrics for SAR (#1754)
* Add mutual information similarity in SAR
* Add lexicographers mutual information similarity for SAR
* Add cosine similarity for SAR
* Add inclusion index for SAR
* Typos
* Change SARSingleNode to SAR
* Convert item similarity matrix to np.array
* Update
* Update SAR tests
* Remove unused imports
* Add explanations for new similarity metrics
|
https://github.com/microsoft/recommenders.git
|
def lexicographers_mutual_information(cooccurrence):
with np.errstate(invalid="ignore", divide="ignore"):
result = cooccurrence * mutual_information(cooccurrence)
return np.array(result)
| 35
|
python_utils.py
|
Python
|
recommenders/utils/python_utils.py
|
1d7341e93d1f03387699fb3c6ae0b6c0e464296f
|
recommenders
| 1
|
|
156,002
| 84
| 12
| 9
| 184
| 23
| 0
| 127
| 178
|
slice_with_int_dask_array
|
absolufy-imports - No relative - PEP8 (#8796)
Conversation in https://github.com/dask/distributed/issues/5889
|
https://github.com/dask/dask.git
|
def slice_with_int_dask_array(x, idx, offset, x_size, axis):
from dask.array.utils import asarray_safe, meta_from_array
idx = asarray_safe(idx, like=meta_from_array(x))
# Needed when idx is unsigned
idx = idx.astype(np.int64)
# Normalize negative indices
idx = np.where(idx < 0, idx + x_size, idx)
# A chunk of the offset dask Array is a numpy array with shape (1, ).
# It indicates the index of the first element along axis of the current
# chunk of x.
idx = idx - offset
# Drop elements of idx that do not fall inside the current chunk of x
idx_filter = (idx >= 0) & (idx < x.shape[axis])
idx = idx[idx_filter]
# np.take does not support slice indices
# return np.take(x, idx, axis)
return x[tuple(idx if i == axis else slice(None) for i in range(x.ndim))]
| 118
|
chunk.py
|
Python
|
dask/array/chunk.py
|
cccb9d8d8e33a891396b1275c2448c352ef40c27
|
dask
| 3
|
|
260,773
| 42
| 14
| 15
| 165
| 14
| 0
| 70
| 167
|
_compute_n_patches
|
MAINT Parameter validation for `PatchExtractor` (#24215)
Co-authored-by: jeremie du boisberranger <jeremiedbb@yahoo.fr>
|
https://github.com/scikit-learn/scikit-learn.git
|
def _compute_n_patches(i_h, i_w, p_h, p_w, max_patches=None):
n_h = i_h - p_h + 1
n_w = i_w - p_w + 1
all_patches = n_h * n_w
if max_patches:
if isinstance(max_patches, (Integral)) and max_patches < all_patches:
return max_patches
elif isinstance(max_patches, (Integral)) and max_patches >= all_patches:
return all_patches
elif isinstance(max_patches, (Real)) and 0 < max_patches < 1:
return int(max_patches * all_patches)
else:
raise ValueError("Invalid value for max_patches: %r" % max_patches)
else:
return all_patches
| 106
|
image.py
|
Python
|
sklearn/feature_extraction/image.py
|
5e9fa423011ac793c1e0ec2725486c2a33beae42
|
scikit-learn
| 8
|
|
37,505
| 7
| 10
| 2
| 37
| 5
| 0
| 7
| 13
|
require_librosa
|
Update all require decorators to use skipUnless when possible (#16999)
|
https://github.com/huggingface/transformers.git
|
def require_librosa(test_case):
return unittest.skipUnless(is_librosa_available(), "test requires librosa")(test_case)
| 20
|
testing_utils.py
|
Python
|
src/transformers/testing_utils.py
|
57e6464ac9a31156f1c93e59107323e6ec01309e
|
transformers
| 1
|
|
166,197
| 8
| 10
| 4
| 41
| 5
| 0
| 8
| 40
|
__dlpack__
|
ENH: Implement DataFrame interchange protocol (#46141)
|
https://github.com/pandas-dev/pandas.git
|
def __dlpack__(self):
if _NUMPY_HAS_DLPACK:
return self._x.__dlpack__()
raise NotImplementedError("__dlpack__")
| 22
|
buffer.py
|
Python
|
pandas/core/exchange/buffer.py
|
90140f055892a46f473bd26affab88a7f171e394
|
pandas
| 2
|
|
40,643
| 37
| 11
| 14
| 231
| 14
| 0
| 54
| 153
|
_handle_wrapping
|
Refactor figure setup and subplot metadata tracking into Subplots class
Squashed commit of the following:
commit e6f99078d46947eab678b9dd0303657a3129f9fc
Author: Michael Waskom <mwaskom@nyu.edu>
Date: Sun Aug 1 17:56:49 2021 -0400
Address a couple TODOs
commit c48ba3af8095973b7dca9554934a695751f58726
Author: Michael Waskom <mwaskom@nyu.edu>
Date: Mon Jul 26 06:42:29 2021 -0400
Add docstrings in Subplots
commit 97e6465b0f998f541b445b189682fbf134869391
Author: Michael Waskom <mwaskom@nyu.edu>
Date: Sun Jul 25 17:53:22 2021 -0400
Fix unshared label visibility test
commit e2d93a28313c2cb9170e56b2e4b373987993be7c
Author: Michael Waskom <mwaskom@nyu.edu>
Date: Sun Jul 25 17:16:41 2021 -0400
Add more label visibility tests
commit 698ee72b5d5f9f3939c50cde9e2baacdf5487807
Author: Michael Waskom <mwaskom@nyu.edu>
Date: Sat Jul 24 11:08:32 2021 -0400
Begin adding label visibility tests
commit 97167b4701532eeccadaa899520d57e38c26dd43
Author: Michael Waskom <mwaskom@nyu.edu>
Date: Mon Jul 19 06:55:48 2021 -0400
Fix interior tick labels with unshared axes
commit 9331d5d91a7861aebfe03fa86ee122902c0d1d8a
Author: Michael Waskom <mwaskom@nyu.edu>
Date: Sat Jul 17 17:03:48 2021 -0400
Fix interior labels for wrapped plots
commit 38f2efa7e732958430c006f24827c6ac69640ef3
Author: Michael Waskom <mwaskom@nyu.edu>
Date: Sat Jul 17 16:03:34 2021 -0400
Fix non-cartesian interior labels
commit 3c07f981110890d38aee19b38c43080863132122
Author: Michael Waskom <mwaskom@nyu.edu>
Date: Sat Jul 17 15:44:48 2021 -0400
Integrate Subplots into Plot
commit 841a3c998eae8f8cc85fd65af7ea8e6f32fc5510
Author: Michael Waskom <mwaskom@nyu.edu>
Date: Sat Jul 17 13:00:09 2021 -0400
Complete subplots tests
commit 8ceb7e6c35ea0cbcd014067035d7ea219204f464
Author: Michael Waskom <mwaskom@nyu.edu>
Date: Fri Jul 16 19:45:29 2021 -0400
Continue building out subplot tests
commit b0ce0e7a9e3534fdad04ef9e287e4c6bb19fe684
Author: Michael Waskom <mwaskom@nyu.edu>
Date: Thu Jul 15 21:35:21 2021 -0400
Continue building out subplots tests
commit 5f4b67d4d90cde7d0d899527b1fd8607348a5f5b
Author: Michael Waskom <mwaskom@nyu.edu>
Date: Wed Jul 14 20:57:35 2021 -0400
Add some tests for Subplots functionality
commit 58fbf8e3f349174f4d1d29f71fa867ad4b49d264
Author: Michael Waskom <mwaskom@nyu.edu>
Date: Sun Jul 11 20:49:29 2021 -0400
Begin refactoring figure setup into Subplots class
commit 6bb853e20ad3b42b2728d212a51ed8de2ff47bde
Author: Michael Waskom <mwaskom@nyu.edu>
Date: Sun Jul 11 16:02:26 2021 -0400
Fix overlooked lint and test
|
https://github.com/mwaskom/seaborn.git
|
def _handle_wrapping(self) -> None:
self.wrap = wrap = self.facet_spec.get("wrap") or self.pair_spec.get("wrap")
if not wrap:
return
wrap_dim = "row" if self.subplot_spec["nrows"] > 1 else "col"
flow_dim = {"row": "col", "col": "row"}[wrap_dim]
n_subplots = self.subplot_spec[f"n{wrap_dim}s"]
flow = int(np.ceil(n_subplots / wrap))
if wrap < self.subplot_spec[f"n{wrap_dim}s"]:
self.subplot_spec[f"n{wrap_dim}s"] = wrap
self.subplot_spec[f"n{flow_dim}s"] = flow
self.n_subplots = n_subplots
self.wrap_dim = wrap_dim
| 125
|
subplots.py
|
Python
|
seaborn/_core/subplots.py
|
c16180493bd44fd76092fdd9ea0060bac91e47fe
|
seaborn
| 5
|
|
145,083
| 9
| 8
| 3
| 34
| 5
| 0
| 9
| 23
|
_is_read_stage
|
[data] Stage fusion optimizations, off by default (#22373)
This PR adds the following stage fusion optimizations (off by default). In a later PR, I plan to enable this by default for DatasetPipelines.
- Stage fusion: Whether to fuse compatible OneToOne stages.
- Read stage fusion: Whether to fuse read stages into downstream OneToOne stages. This is accomplished by rewriting the read stage (LazyBlockList) into a transformation over a collection of read tasks (BlockList -> MapBatches(do_read)).
- Shuffle stage fusion: Whether to fuse compatible OneToOne stages into shuffle stages that support specifying a map-side block UDF.
Stages are considered compatible if their compute strategy is the same ("tasks" vs "actors"), and they have the same Ray remote args. Currently, the PR is ignoring the remote args of read tasks, but this will be fixed as a followup (I didn't want to change the read tasks default here).
|
https://github.com/ray-project/ray.git
|
def _is_read_stage(self) -> bool:
return self._has_read_stage() and not self._stages
| 19
|
plan.py
|
Python
|
python/ray/data/impl/plan.py
|
786c5759dee02b57c8e10b39f1c1bed07f05eb5a
|
ray
| 2
|
|
150,869
| 49
| 10
| 10
| 169
| 20
| 0
| 67
| 180
|
_handle_analyzed_df_message
|
Refactoring, minor improvements, data provider improvements
|
https://github.com/freqtrade/freqtrade.git
|
def _handle_analyzed_df_message(self, type, data):
key, value = data["key"], data["value"]
pair, timeframe, candle_type = key
# Skip any pairs that we don't have in the pairlist?
# leader_pairlist = self._freqtrade.pairlists._whitelist
# if pair not in leader_pairlist:
# return
dataframe = json_to_dataframe(value)
if self._config.get('external_signal', {}).get('remove_signals_analyzed_df', False):
dataframe = remove_entry_exit_signals(dataframe)
logger.debug(f"Handling analyzed dataframe for {pair}")
logger.debug(dataframe.tail())
# Add the dataframe to the dataprovider
dataprovider = self._freqtrade.dataprovider
dataprovider.add_external_df(pair, timeframe, dataframe, candle_type)
| 98
|
rpc.py
|
Python
|
freqtrade/rpc/rpc.py
|
2b5f0678772bea0abaf4abe93efc55de43ea3e0e
|
freqtrade
| 2
|
|
249,634
| 20
| 12
| 8
| 128
| 14
| 0
| 26
| 54
|
make_request
|
Indicate what endpoint came back with a JSON response we were unable to parse (#14097)
**Before:**
```
WARNING - POST-11 - Unable to parse JSON: Expecting value: line 1 column 1 (char 0) (b'')
```
**After:**
```
WARNING - POST-11 - Unable to parse JSON from POST /_matrix/client/v3/join/%21ZlmJtelqFroDRJYZaq:hs1?server_name=hs1 response: Expecting value: line 1 column 1 (char 0) (b'')
```
---
It's possible to figure out which endpoint these warnings were coming from before but you had to follow the request ID `POST-11` to the log line that says `Completed request [...]`. Including this key information next to the JSON parsing error makes it much easier to reason whether it matters or not.
```
2022-09-29T08:23:25.7875506Z synapse_main | 2022-09-29 08:21:10,336 - synapse.http.matrixfederationclient - 299 - INFO - POST-11 - {GET-O-13} [hs1] Completed request: 200 OK in 0.53 secs, got 450 bytes - GET matrix://hs1/_matrix/federation/v1/make_join/%21ohtKoQiXlPePSycXwp%3Ahs1/%40charlie%3Ahs2?ver=1&ver=2&ver=3&ver=4&ver=5&ver=6&ver=org.matrix.msc2176&ver=7&ver=8&ver=9&ver=org.matrix.msc3787&ver=10&ver=org.matrix.msc2716v4
```
---
As a note, having no `body` is normal for the `/join` endpoint and it can handle it.
https://github.com/matrix-org/synapse/blob/0c853e09709d52783efd37060ed9e8f55a4fc704/synapse/rest/client/room.py#L398-L403
Alternatively we could remove these extra logs but they are probably more usually helpful to figure out what went wrong.
|
https://github.com/matrix-org/synapse.git
|
def make_request(content):
request = Mock(spec=["method", "uri", "content"])
if isinstance(content, dict):
content = json.dumps(content).encode("utf8")
request.method = bytes("STUB_METHOD", "ascii")
request.uri = bytes("/test_stub_uri", "ascii")
request.content = BytesIO(content)
return request
| 71
|
test_servlet.py
|
Python
|
tests/http/test_servlet.py
|
1bf2832714abdfc5e10395e8e76aecc591ad265f
|
synapse
| 2
|
|
20,237
| 6
| 8
| 3
| 30
| 5
| 0
| 6
| 20
|
site_data_path
|
check point progress on only bringing in pip==22.0.4 (#4966)
* vendor in pip==22.0.4
* updating vendor packaging version
* update pipdeptree to fix pipenv graph with new version of pip.
* Vendoring of pip-shims 0.7.0
* Vendoring of requirementslib 1.6.3
* Update pip index safety restrictions patch for pip==22.0.4
* Update patches
* exclude pyptoject.toml from black to see if that helps.
* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
|
https://github.com/pypa/pipenv.git
|
def site_data_path(self) -> Path:
return self._first_item_as_path_if_multipath(self.site_data_dir)
| 17
|
unix.py
|
Python
|
pipenv/patched/notpip/_vendor/platformdirs/unix.py
|
f3166e673fe8d40277b804d35d77dcdb760fc3b3
|
pipenv
| 1
|
|
224,014
| 10
| 9
| 2
| 34
| 4
| 0
| 10
| 24
|
static_pages
|
Remove spaces at the ends of docstrings, normalize quotes
|
https://github.com/mkdocs/mkdocs.git
|
def static_pages(self):
return [file for file in self if file.is_static_page()]
| 20
|
files.py
|
Python
|
mkdocs/structure/files.py
|
e7f07cc82ab2be920ab426ba07456d8b2592714d
|
mkdocs
| 3
|
|
195,279
| 13
| 10
| 4
| 60
| 10
| 0
| 15
| 43
|
_reshape_tensor
|
Patch 8322 (#4709)
* add dafetymix teacher
* safety_mix teacher
* safety_mix teacher pos and neg teachers
* add tests for teacher
* add license info
* improvement
* add task list
* add task list and lint
* add init.py
* adding some patch to director
* seeker changes
* th
* 3
* jing
* changes
* z and r
* remove .opts
* fix docs
* add contrractions
* lint
Co-authored-by: Dexter Ju <da.ju.fr@gmail.com>
Co-authored-by: Jing Xu <jingxu23@fb.com>
|
https://github.com/facebookresearch/ParlAI.git
|
def _reshape_tensor(self, new_len, tensor, indices):
reshaped_tensor = torch.zeros(new_len, device=tensor.device, dtype=tensor.dtype)
reshaped_tensor[indices] = tensor
return reshaped_tensor
| 40
|
director_bb2.py
|
Python
|
projects/fits/agents/director_bb2.py
|
b1acb681207559da56a787ba96e16f0e23697d92
|
ParlAI
| 1
|
|
268,987
| 17
| 9
| 5
| 98
| 10
| 0
| 21
| 26
|
binary_matches
|
Added util metric method for binary_matches. Decoupled from public metric binarry_acc
|
https://github.com/keras-team/keras.git
|
def binary_matches(y_true, y_pred, threshold=0.5):
y_pred = tf.convert_to_tensor(y_pred)
threshold = tf.cast(threshold, y_pred.dtype)
y_pred = tf.cast(y_pred > threshold, y_pred.dtype)
return tf.cast(tf.equal(y_true, y_pred), tf.int8)
| 66
|
metrics_utils.py
|
Python
|
keras/utils/metrics_utils.py
|
119cd4655d01570a70c70879dff4461ea46161bf
|
keras
| 1
|
|
265,023
| 34
| 16
| 13
| 159
| 22
| 0
| 39
| 198
|
draw_terminations
|
Update SVG trace rendering to support multiple terminations per cable end
|
https://github.com/netbox-community/netbox.git
|
def draw_terminations(self, terminations):
x = self.width / 2 - len(terminations) * TERMINATION_WIDTH / 2
for i, term in enumerate(terminations):
t = self._draw_box(
x=x + i * TERMINATION_WIDTH,
width=TERMINATION_WIDTH,
color=self._get_color(term),
url=term.get_absolute_url(),
labels=self._get_labels(term),
radius=5,
reset_cursor=bool(i + 1 != len(terminations))
)
self.terminations.append(t)
| 104
|
cables.py
|
Python
|
netbox/dcim/svg/cables.py
|
bab6fb0de24d568371c8a55bcb22768b2d60f515
|
netbox
| 2
|
|
210,756
| 44
| 13
| 23
| 318
| 36
| 0
| 58
| 260
|
predict
|
Develop branch: add fight action for pphuman (#6160)
* add fight for PP-Human
* add short_size and target_size for fight recognition
* add short_size and target_size for fight_infer
* modify code according to the reviews
* add the wrong deleted lines`
* Update pipeline.py
* Update infer_cfg.yml
* visualize fight when fight action occur
* 乱码修改
* delete useless parmas
* delete useless code str2bool
|
https://github.com/PaddlePaddle/PaddleDetection.git
|
def predict(self, input):
input_names = self.predictor.get_input_names()
input_tensor = self.predictor.get_input_handle(input_names[0])
output_names = self.predictor.get_output_names()
output_tensor = self.predictor.get_output_handle(output_names[0])
# preprocess
self.recognize_times.preprocess_time_s.start()
if type(input) == str:
inputs = self.preprocess_video(input)
else:
inputs = self.preprocess_frames(input)
self.recognize_times.preprocess_time_s.end()
inputs = np.expand_dims(
inputs, axis=0).repeat(
self.batch_size, axis=0).copy()
input_tensor.copy_from_cpu(inputs)
# model prediction
self.recognize_times.inference_time_s.start()
self.predictor.run()
self.recognize_times.inference_time_s.end()
output = output_tensor.copy_to_cpu()
# postprocess
self.recognize_times.postprocess_time_s.start()
classes, scores = self.postprocess(output)
self.recognize_times.postprocess_time_s.end()
return classes, scores
| 193
|
video_action_infer.py
|
Python
|
deploy/python/video_action_infer.py
|
67f16ed9cac254612ddb141fcd8a14db3dbfd6d6
|
PaddleDetection
| 2
|
|
248,620
| 19
| 9
| 7
| 82
| 13
| 0
| 20
| 55
|
test_left_room
|
Add more tests for room upgrades (#13074)
Signed-off-by: Sean Quah <seanq@element.io>
|
https://github.com/matrix-org/synapse.git
|
def test_left_room(self) -> None:
# Remove the user from the room.
self.helper.leave(self.room_id, self.creator, tok=self.creator_token)
channel = self._upgrade_room(self.creator_token)
self.assertEqual(403, channel.code, channel.result)
| 52
|
test_upgrade_room.py
|
Python
|
tests/rest/client/test_upgrade_room.py
|
99d3931974e65865d1102ee79d7b7e2b017a3180
|
synapse
| 1
|
|
308,789
| 30
| 13
| 14
| 100
| 8
| 0
| 35
| 141
|
test_secure_device_pin_config
|
Enable local fulfillment google assistant (#63218)
Co-authored-by: Paulus Schoutsen <paulus@home-assistant.io>
|
https://github.com/home-assistant/core.git
|
async def test_secure_device_pin_config(hass):
secure_pin = "TEST"
secure_config = GOOGLE_ASSISTANT_SCHEMA(
{
"project_id": "1234",
"service_account": {
"private_key": "-----BEGIN PRIVATE KEY-----\nMIICdwIBADANBgkqhkiG9w0BAQEFAASCAmEwggJdAgEAAoGBAKYscIlwm7soDsHAz6L6YvUkCvkrX19rS6yeYOmovvhoK5WeYGWUsd8V72zmsyHB7XO94YgJVjvxfzn5K8bLePjFzwoSJjZvhBJ/ZQ05d8VmbvgyWUoPdG9oEa4fZ/lCYrXoaFdTot2xcJvrb/ZuiRl4s4eZpNeFYvVK/Am7UeFPAgMBAAECgYAUetOfzLYUudofvPCaKHu7tKZ5kQPfEa0w6BAPnBF1Mfl1JiDBRDMryFtKs6AOIAVwx00dY/Ex0BCbB3+Cr58H7t4NaPTJxCpmR09pK7o17B7xAdQv8+SynFNud9/5vQ5AEXMOLNwKiU7wpXT6Z7ZIibUBOR7ewsWgsHCDpN1iqQJBAOMODPTPSiQMwRAUHIc6GPleFSJnIz2PAoG3JOG9KFAL6RtIc19lob2ZXdbQdzKtjSkWo+O5W20WDNAl1k32h6MCQQC7W4ZCIY67mPbL6CxXfHjpSGF4Dr9VWJ7ZrKHr6XUoOIcEvsn/pHvWonjMdy93rQMSfOE8BKd/I1+GHRmNVgplAkAnSo4paxmsZVyfeKt7Jy2dMY+8tVZe17maUuQaAE7Sk00SgJYegwrbMYgQnWCTL39HBfj0dmYA2Zj8CCAuu6O7AkEAryFiYjaUAO9+4iNoL27+ZrFtypeeadyov7gKs0ZKaQpNyzW8A+Zwi7TbTeSqzic/E+z/bOa82q7p/6b7141xsQJBANCAcIwMcVb6KVCHlQbOtKspo5Eh4ZQi8bGl+IcwbQ6JSxeTx915IfAldgbuU047wOB04dYCFB2yLDiUGVXTifU=\n-----END PRIVATE KEY-----\n",
"client_email": "dummy@dummy.iam.gserviceaccount.com",
},
"secure_devices_pin": secure_pin,
}
)
config = GoogleConfig(hass, secure_config)
assert config.secure_devices_pin == secure_pin
| 51
|
test_http.py
|
Python
|
tests/components/google_assistant/test_http.py
|
25fe213f222f8f49a8126130a8e507fa15e63c83
|
core
| 1
|
|
100,406
| 129
| 17
| 38
| 534
| 43
| 0
| 182
| 654
|
conda_installer
|
Update code to support Tensorflow versions up to 2.8 (#1213)
* Update maximum tf version in setup + requirements
* - bump max version of tf version in launcher
- standardise tf version check
* update keras get_custom_objects for tf>2.6
* bugfix: force black text in GUI file dialogs (linux)
* dssim loss - Move to stock tf.ssim function
* Update optimizer imports for compatibility
* fix logging for tf2.8
* Fix GUI graphing for TF2.8
* update tests
* bump requirements.txt versions
* Remove limit on nvidia-ml-py
* Graphing bugfixes
- Prevent live graph from displaying if data not yet available
* bugfix: Live graph. Collect loss labels correctly
* fix: live graph - swallow inconsistent loss errors
* Bugfix: Prevent live graph from clearing during training
* Fix graphing for AMD
|
https://github.com/deepfakes/faceswap.git
|
def conda_installer(self, package, channel=None, verbose=False, conda_only=False):
# Packages with special characters need to be enclosed in double quotes
success = True
condaexe = ["conda", "install", "-y"]
if not verbose or self.env.updater:
condaexe.append("-q")
if channel:
condaexe.extend(["-c", channel])
if package.startswith("tensorflow-gpu"):
# Here we will install the cuda/cudnn toolkits, currently only available from
# conda-forge, but fail tensorflow itself so that it can be handled by pip.
specs = Requirement.parse(package).specs
for key, val in TENSORFLOW_REQUIREMENTS.items():
req_specs = Requirement.parse("foobar" + key).specs
if all(item in req_specs for item in specs):
cuda, cudnn = val
break
condaexe.extend(["-c", "conda-forge", f"cudatoolkit={cuda}", f"cudnn={cudnn}"])
package = "Cuda Toolkit"
success = False
if package != "Cuda Toolkit":
if any(char in package for char in (" ", "<", ">", "*", "|")):
package = f"\"{package}\""
condaexe.append(package)
clean_pkg = package.replace("\"", "")
self.output.info(f"Installing {clean_pkg}")
shell = self.env.os_version[0] == "Windows"
try:
if verbose:
run(condaexe, check=True, shell=shell)
else:
with open(os.devnull, "w", encoding="utf8") as devnull:
run(condaexe, stdout=devnull, stderr=devnull, check=True, shell=shell)
except CalledProcessError:
if not conda_only:
self.output.info(f"{package} not available in Conda. Installing with pip")
else:
self.output.warning(f"Couldn't install {package} with Conda. "
"Please install this package manually")
success = False
return success
| 298
|
setup.py
|
Python
|
setup.py
|
c1512fd41d86ef47a5d1ce618d6d755ef7cbacdf
|
faceswap
| 14
|
|
271,587
| 4
| 6
| 3
| 22
| 4
| 0
| 4
| 7
|
reduce_per_replica
|
Reformatting the codebase with black.
PiperOrigin-RevId: 450093126
|
https://github.com/keras-team/keras.git
|
def reduce_per_replica(values, strategy, reduction="first"):
| 25
|
training.py
|
Python
|
keras/engine/training.py
|
84afc5193d38057e2e2badf9c889ea87d80d8fbf
|
keras
| 1
|
|
321,496
| 12
| 9
| 3
| 58
| 10
| 0
| 12
| 33
|
test_empty_message
|
Fix missed enum scope changes
For some reason, QtMsgType was not included and missing.
(cherry picked from commit 6afa00c465327a118dbcff46fa85b6df53037263)
For completiondelegate.py, we accessed the enum members via self instead of
properly using the class.
(cherry picked from commit d37cc4ac73545c6a2615456a3487536c2ec00803)
For interceptor.py, line breaks broke our script.
QKeyEvent.KeyPress was used inherited from QEvent.KeyPress, thus not
renamed.
|
https://github.com/qutebrowser/qutebrowser.git
|
def test_empty_message(self, caplog):
log.qt_message_handler(QtCore.QtMsgType.QtDebugMsg, self.Context(), "")
assert caplog.messages == ["Logged empty message!"]
| 34
|
test_log.py
|
Python
|
tests/unit/utils/test_log.py
|
76f9262defc0217289443467927cab7c211aff73
|
qutebrowser
| 1
|
|
118,573
| 65
| 12
| 25
| 189
| 23
| 0
| 87
| 359
|
serialize_final_report_to_files
|
Rename and refactor `Report` machinery (#4141)
This refactor renames (almost) everything related to the outdated "report" concept with more precise concepts that we use throughout our code, primarily "script run", "session", and "app".
|
https://github.com/streamlit/streamlit.git
|
def serialize_final_report_to_files(self):
LOGGER.debug("Serializing final report")
messages = [
copy.deepcopy(msg)
for msg in self._master_queue
if _should_save_report_msg(msg)
]
manifest = self._build_manifest(
status=StaticManifest.DONE, num_messages=len(messages)
)
# Build a list of message tuples: (message_location, serialized_message)
message_tuples = [
(
"reports/%(id)s/%(idx)s.pb"
% {"id": self.script_run_id, "idx": msg_idx},
msg.SerializeToString(),
)
for msg_idx, msg in enumerate(messages)
]
manifest_tuples = [
(
"reports/%(id)s/manifest.pb" % {"id": self.script_run_id},
manifest.SerializeToString(),
)
]
# Manifest must be at the end, so clients don't connect and read the
# manifest while the deltas haven't been saved yet.
return message_tuples + manifest_tuples
| 113
|
session_data.py
|
Python
|
lib/streamlit/session_data.py
|
704eab3478cf69847825b23dabf15813a8ac9fa2
|
streamlit
| 4
|
|
85,884
| 77
| 12
| 54
| 425
| 42
| 0
| 107
| 667
|
test_sends_issue_notification
|
chore(notification): Pass User ID into notification analytics (#38924)
We pass in the actor_id to notification analytics events but we should
also include a user_id if the recipient is a user
|
https://github.com/getsentry/sentry.git
|
def test_sends_issue_notification(self, record_analytics):
action_data = {
"id": "sentry.mail.actions.NotifyEmailAction",
"targetType": "Member",
"targetIdentifier": str(self.user.id),
}
Rule.objects.create(
project=self.project,
label="a rule",
data={
"match": "all",
"actions": [action_data],
},
)
min_ago = iso_format(before_now(minutes=1))
event = self.store_event(
data={
"message": "Hello world",
"timestamp": min_ago,
},
project_id=self.project.id,
)
cache_key = write_event_to_cache(event)
with self.tasks():
post_process_group(
is_new=True,
is_regression=False,
is_new_group_environment=True,
group_id=event.group_id,
cache_key=cache_key,
)
msg = mail.outbox[0]
# check the txt version
assert "Details\n-------\n\n" in msg.body
# check the html version
assert "Hello world</pre>" in msg.alternatives[0][0]
attachment, text = get_attachment()
assert attachment["title"] == "Hello world"
assert (
attachment["footer"]
== f"{self.project.slug} | <http://testserver/settings/account/notifications/alerts/?referrer=issue_alert-slack-user|Notification Settings>"
)
assert analytics_called_with_args(
record_analytics,
"integrations.email.notification_sent",
user_id=self.user.id,
actor_id=self.user.actor_id,
organization_id=self.organization.id,
)
assert analytics_called_with_args(
record_analytics,
"integrations.slack.notification_sent",
user_id=self.user.id,
actor_id=self.user.actor_id,
organization_id=self.organization.id,
)
| 254
|
test_notifications.py
|
Python
|
tests/sentry/notifications/test_notifications.py
|
afbf9a3334ce9cad1a62fced372d7fcee40a3133
|
sentry
| 1
|
|
313,646
| 29
| 10
| 5
| 91
| 15
| 0
| 30
| 49
|
test_no_recursive_secrets
|
Significantly improve yaml load times when the C loader is available (#73337)
|
https://github.com/home-assistant/core.git
|
def test_no_recursive_secrets(caplog, try_both_loaders):
files = {YAML_CONFIG_FILE: "key: !secret a", yaml.SECRET_YAML: "a: 1\nb: !secret a"}
with patch_yaml_files(files), pytest.raises(HomeAssistantError) as e:
load_yaml_config_file(YAML_CONFIG_FILE)
assert e.value.args == ("Secrets not supported in this YAML file",)
| 51
|
test_init.py
|
Python
|
tests/util/yaml/test_init.py
|
dca4d3cd61d7f872621ee4021450cc6a0fbd930e
|
core
| 1
|
|
271,435
| 43
| 16
| 14
| 133
| 12
| 0
| 53
| 207
|
dtype
|
Reformatting the codebase with black.
PiperOrigin-RevId: 450093126
|
https://github.com/keras-team/keras.git
|
def dtype(self):
type_spec = self._type_spec
if not hasattr(type_spec, "dtype"):
raise AttributeError(
f"KerasTensor wraps TypeSpec {type(type_spec).__qualname__}, "
"which does not have a dtype."
)
if not isinstance(type_spec.dtype, tf.DType):
raise TypeError(
"KerasTensor requires that wrapped TypeSpec's dtype is a DType; got "
f"TypeSpec {type(type_spec).__qualname__}, whose dtype field has "
f"unexpected type {type(type_spec.dtype).__qualname__}."
)
return type_spec.dtype
| 53
|
keras_tensor.py
|
Python
|
keras/engine/keras_tensor.py
|
84afc5193d38057e2e2badf9c889ea87d80d8fbf
|
keras
| 3
|
|
5,880
| 88
| 11
| 46
| 393
| 41
| 0
| 117
| 366
|
test_visualization_compare_classifiers_changing_k_output_pdf
|
Use tempfile to automatically garbage collect data and modeling artifacts in ludwig integration tests. (#1642)
* Use tmpdir to automatically garbage collect data and modeling artifacts in ludwig integration tests.
|
https://github.com/ludwig-ai/ludwig.git
|
def test_visualization_compare_classifiers_changing_k_output_pdf(csv_filename):
input_features = [category_feature(vocab_size=10)]
output_features = [category_feature(vocab_size=2, reduce_input="sum")]
# Generate test data
rel_path = generate_data(input_features, output_features, csv_filename)
exp_dir_name = run_experiment_with_visualization(input_features, output_features, dataset=rel_path)
vis_output_pattern_pdf = os.path.join(exp_dir_name, "*.pdf")
vis_output_pattern_png = os.path.join(exp_dir_name, "*.png")
output_feature_name = get_output_feature_name(exp_dir_name)
probability = os.path.join(exp_dir_name, PREDICTIONS_PARQUET_FILE_NAME)
experiment_source_data_name = csv_filename.split(".")[0]
ground_truth = experiment_source_data_name + ".csv"
split_file = experiment_source_data_name + ".split.csv"
ground_truth_metadata = exp_dir_name + "/model/training_set_metadata.json"
test_cmd_pdf = [
"python",
"-m",
"ludwig.visualize",
"--visualization",
"compare_classifiers_performance_changing_k",
"--output_feature_name",
output_feature_name,
"--split_file",
split_file,
"--ground_truth_metadata",
ground_truth_metadata,
"--probabilities",
probability,
probability,
"--model_names",
"Model1",
"Model2",
"--ground_truth",
ground_truth,
"--top_n_classes",
"6",
"-od",
exp_dir_name,
]
test_cmd_png = test_cmd_pdf.copy() + ["-ff", "png"]
commands = [test_cmd_pdf, test_cmd_png]
vis_patterns = [vis_output_pattern_pdf, vis_output_pattern_png]
for command, viz_pattern in zip(commands, vis_patterns):
result = subprocess.run(command)
figure_cnt = glob.glob(viz_pattern)
assert 0 == result.returncode
assert 1 == len(figure_cnt)
| 238
|
test_visualization.py
|
Python
|
tests/integration_tests/test_visualization.py
|
4fb8f63181f5153b4f6778c6ef8dad61022c4f3f
|
ludwig
| 2
|
|
42,468
| 56
| 16
| 11
| 159
| 18
| 0
| 84
| 174
|
sentence_ribes
|
Update black to 22.3.0
The most recent release of Click (8.1.0) was breaking Black. See psf/black#2964
|
https://github.com/nltk/nltk.git
|
def sentence_ribes(references, hypothesis, alpha=0.25, beta=0.10):
best_ribes = -1.0
# Calculates RIBES for each reference and returns the best score.
for reference in references:
# Collects the *worder* from the ranked correlation alignments.
worder = word_rank_alignment(reference, hypothesis)
nkt = kendall_tau(worder)
# Calculates the brevity penalty
bp = min(1.0, math.exp(1.0 - len(reference) / len(hypothesis)))
# Calculates the unigram precision, *p1*
p1 = len(worder) / len(hypothesis)
_ribes = nkt * (p1**alpha) * (bp**beta)
if _ribes > best_ribes: # Keeps the best score.
best_ribes = _ribes
return best_ribes
| 108
|
ribes_score.py
|
Python
|
nltk/translate/ribes_score.py
|
0fac0c0f8e4618c2bdd3d2137d5fb8a80f581246
|
nltk
| 3
|
|
159,094
| 13
| 9
| 6
| 70
| 9
| 0
| 15
| 33
|
test_cli_missing_log_level_env_var_used
|
Configurable logging for libraries (#10614)
* Make library level logging to be configurable
Fixes https://github.com/RasaHQ/rasa/issues/10203
* Create log level documentation under cheatsheet in Rasa docs
* Add log docs to `rasa shell --debug` (and others)
|
https://github.com/RasaHQ/rasa.git
|
def test_cli_missing_log_level_env_var_used():
configure_logging_and_warnings()
rasa_logger = logging.getLogger("rasa")
rasa_logger.level == logging.WARNING
matplotlib_logger = logging.getLogger("matplotlib")
matplotlib_logger.level == logging.INFO
| 38
|
test_common.py
|
Python
|
tests/utils/test_common.py
|
f00148b089d326c952880a0e5e6bd4b2dcb98ce5
|
rasa
| 1
|
|
64,092
| 15
| 10
| 6
| 70
| 9
| 0
| 19
| 13
|
get_indexed_packed_items_table
|
fix: Linter and minor code refactor
- Create an indexed map of stale packed items table to avoid loops to check if packed item row exists
- Reset packed items if row deletion takes place
- Renamed functions to self-explain them
- Split long function
- Reduce function calls inside function (makes it harder to follow through)
|
https://github.com/frappe/erpnext.git
|
def get_indexed_packed_items_table(doc):
indexed_table = {}
for packed_item in doc.get("packed_items"):
key = (packed_item.parent_item, packed_item.item_code, packed_item.parent_detail_docname)
indexed_table[key] = packed_item
return indexed_table
| 43
|
packed_item.py
|
Python
|
erpnext/stock/doctype/packed_item/packed_item.py
|
4c677eafe958a448074b3efc859334c9a088be2c
|
erpnext
| 2
|
|
309,832
| 79
| 14
| 39
| 391
| 37
| 0
| 106
| 403
|
async_handle_message
|
Suppress Alexa state reports when not authorized (#64064)
|
https://github.com/home-assistant/core.git
|
async def async_handle_message(hass, config, request, context=None, enabled=True):
assert request[API_DIRECTIVE][API_HEADER]["payloadVersion"] == "3"
if context is None:
context = ha.Context()
directive = AlexaDirective(request)
try:
if not enabled:
raise AlexaBridgeUnreachableError(
"Alexa API not enabled in Home Assistant configuration"
)
await config.set_authorized(True)
if directive.has_endpoint:
directive.load_entity(hass, config)
funct_ref = HANDLERS.get((directive.namespace, directive.name))
if funct_ref:
response = await funct_ref(hass, config, directive, context)
if directive.has_endpoint:
response.merge_context_properties(directive.endpoint)
else:
_LOGGER.warning(
"Unsupported API request %s/%s", directive.namespace, directive.name
)
response = directive.error()
except AlexaError as err:
response = directive.error(
error_type=err.error_type, error_message=err.error_message
)
request_info = {"namespace": directive.namespace, "name": directive.name}
if directive.has_endpoint:
request_info["entity_id"] = directive.entity_id
hass.bus.async_fire(
EVENT_ALEXA_SMART_HOME,
{
"request": request_info,
"response": {"namespace": response.namespace, "name": response.name},
},
context=context,
)
return response.serialize()
| 241
|
smart_home.py
|
Python
|
homeassistant/components/alexa/smart_home.py
|
e6899416e13214df63ccc5edc035039e318613fe
|
core
| 8
|
|
42,549
| 20
| 11
| 5
| 76
| 11
| 0
| 23
| 62
|
collocations
|
Docstring tests (#3050)
* fixed pytests
* fixed more pytests
* fixed more pytest and changed multiline pytest issues fixes for snowball.py and causal.py
* fixed pytests (mainly multiline or rounding issues)
* fixed treebank pytests, removed test for return_string=True (deprecated)
* fixed destructive.py pytests, removed test for return_string=True (deprecated)
* fixed pytest (rounding issues)
* fixed pytest (initialised missing object)
* fixed pytest (formatting issues)
* fixed pytest (formatting issues)
* fixed pytest (formatting issues)
* added pytest +SKIP for deprecated module stanford
* updated AUTHORS.md
* changed docstring corrections by usage of ELLIPSIS and different roundings
* fixed AUTHORS.md to be consistent
* Fix framenet doctest formatting with pprint
* Change docstring on MultiListBox.__init__
I believe the original typo was misinterpreted and changed to something that was not originally intended.
Co-authored-by: Jan Lennartz <jan.lennartz@ing.com>
Co-authored-by: Tom Aarsen <37621491+tomaarsen@users.noreply.github.com>
Co-authored-by: Tom Aarsen <Cubiegamedev@gmail.com>
|
https://github.com/nltk/nltk.git
|
def collocations(self, num=20, window_size=2):
collocation_strings = [
w1 + " " + w2 for w1, w2 in self.collocation_list(num, window_size)
]
print(tokenwrap(collocation_strings, separator="; "))
| 47
|
text.py
|
Python
|
nltk/text.py
|
8a4cf5d94eb94b6427c5d1d7907ba07b119932c5
|
nltk
| 2
|
|
309,256
| 50
| 15
| 28
| 151
| 12
| 0
| 57
| 329
|
test_check_loop_async_integration_non_strict
|
Warn on`time.sleep` in event loop (#63766)
Co-authored-by: Martin Hjelmare <marhje52@gmail.com>
|
https://github.com/home-assistant/core.git
|
async def test_check_loop_async_integration_non_strict(caplog):
with patch(
"homeassistant.util.async_.extract_stack",
return_value=[
Mock(
filename="/home/paulus/homeassistant/core.py",
lineno="23",
line="do_something()",
),
Mock(
filename="/home/paulus/homeassistant/components/hue/light.py",
lineno="23",
line="self.light.is_on",
),
Mock(
filename="/home/paulus/aiohue/lights.py",
lineno="2",
line="something()",
),
],
):
hasync.check_loop(strict=False)
assert (
"Detected blocking call inside the event loop. This is causing stability issues. "
"Please report issue for hue doing blocking calls at "
"homeassistant/components/hue/light.py, line 23: self.light.is_on"
in caplog.text
)
| 84
|
test_async.py
|
Python
|
tests/util/test_async.py
|
dc58bc375ae203e3d394225f9c3a5a14d43cb2f3
|
core
| 1
|
|
310,240
| 19
| 12
| 7
| 90
| 14
| 0
| 20
| 49
|
test_device_diagnostics_error
|
Add zwave_js device diagnostics (#64504)
* Add zwave_js device diagnostics
* Add diagnostics as a dependency in manifest
* Add failure scenario test
* fix device diagnostics helper and remove dependency
* tweak
|
https://github.com/home-assistant/core.git
|
async def test_device_diagnostics_error(hass, integration):
dev_reg = async_get(hass)
device = dev_reg.async_get_or_create(
config_entry_id=integration.entry_id, identifiers={("test", "test")}
)
with pytest.raises(ValueError):
await async_get_device_diagnostics(hass, integration, device)
| 53
|
test_diagnostics.py
|
Python
|
tests/components/zwave_js/test_diagnostics.py
|
11d0dcf7ac4ddc2638f403ef0ee6b796ac5bbceb
|
core
| 1
|
|
134,330
| 89
| 12
| 25
| 273
| 31
| 1
| 135
| 320
|
disconnect
|
Try fixing the issue. (#29657)
Looks like we are using PyOpenSSL < 22.0 which can cause issues with the newer version of cryptography module that causes AttributeError: module 'lib' has no attribute 'X509_V_FLAG_CB_ISSUER_CHECK' . Check boto/botocore#2744
urllib3/urllib3#2680
for more details.
The problem was that when we call install-dependencies, it downloads requirements.txt and then requirement_test.txt. When we download requirement_test.txt, we don't have -U flag, and then this means some of dependencies are not upgraded as stated inside requirement_test.txt. I tried adding -U flag to the PR
Signed-off-by: SangBin Cho <rkooo567@gmail.com>
|
https://github.com/ray-project/ray.git
|
def disconnect(exiting_interpreter=False):
# Reset the list of cached remote functions and actors so that if more
# remote functions or actors are defined and then connect is called again,
# the remote functions will be exported. This is mostly relevant for the
# tests.
worker = global_worker
if worker.connected:
# Shutdown all of the threads that we've started. TODO(rkn): This
# should be handled cleanly in the worker object's destructor and not
# in this disconnect method.
worker.threads_stopped.set()
worker.gcs_function_key_subscriber.close()
worker.gcs_error_subscriber.close()
worker.gcs_log_subscriber.close()
if hasattr(worker, "import_thread"):
worker.import_thread.join_import_thread()
if hasattr(worker, "listener_thread"):
worker.listener_thread.join()
if hasattr(worker, "logger_thread"):
worker.logger_thread.join()
worker.threads_stopped.clear()
worker._session_index += 1
global_worker_stdstream_dispatcher.remove_handler("ray_print_logs")
worker.node = None # Disconnect the worker from the node.
worker.cached_functions_to_run = []
worker.serialization_context_map.clear()
try:
ray_actor = ray.actor
except AttributeError:
ray_actor = None # This can occur during program termination
if ray_actor is not None:
ray_actor._ActorClassMethodMetadata.reset_cache()
@contextmanager
|
@contextmanager
| 151
|
worker.py
|
Python
|
python/ray/_private/worker.py
|
bf22325eb518c4a42242a88031909869da003850
|
ray
| 7
|
266,043
| 11
| 9
| 5
| 56
| 8
| 0
| 13
| 48
|
save_object
|
4347 Add JSON/YAML import support for all objects (#10367)
* 4347 initial code for json import
* 4347 initial code for json import
* Clean up form processing logic
* Consolidate import forms
* Consolidate object import/update logic
* Clean up bulk import view
Co-authored-by: jeremystretch <jstretch@ns1.com>
|
https://github.com/netbox-community/netbox.git
|
def save_object(self, obj_form, request):
instance = obj_form.save(commit=False)
instance.user = request.user
instance.save()
return instance
| 34
|
views.py
|
Python
|
netbox/dcim/views.py
|
93e7457e0d84ad24cba22cc5c0811777ddebf94e
|
netbox
| 1
|
|
259,101
| 107
| 16
| 31
| 421
| 34
| 0
| 161
| 434
|
compute_class_weight
|
FIX Support extra class_weights in compute_class_weight (#22595)
|
https://github.com/scikit-learn/scikit-learn.git
|
def compute_class_weight(class_weight, *, classes, y):
# Import error caused by circular imports.
from ..preprocessing import LabelEncoder
if set(y) - set(classes):
raise ValueError("classes should include all valid labels that can be in y")
if class_weight is None or len(class_weight) == 0:
# uniform class weights
weight = np.ones(classes.shape[0], dtype=np.float64, order="C")
elif class_weight == "balanced":
# Find the weight of each class as present in y.
le = LabelEncoder()
y_ind = le.fit_transform(y)
if not all(np.in1d(classes, le.classes_)):
raise ValueError("classes should have valid labels that are in y")
recip_freq = len(y) / (len(le.classes_) * np.bincount(y_ind).astype(np.float64))
weight = recip_freq[le.transform(classes)]
else:
# user-defined dictionary
weight = np.ones(classes.shape[0], dtype=np.float64, order="C")
if not isinstance(class_weight, dict):
raise ValueError(
"class_weight must be dict, 'balanced', or None, got: %r" % class_weight
)
unweighted_classes = []
for i, c in enumerate(classes):
if c in class_weight:
weight[i] = class_weight[c]
else:
unweighted_classes.append(c)
n_weighted_classes = len(classes) - len(unweighted_classes)
if unweighted_classes and n_weighted_classes != len(class_weight):
raise ValueError(
f"The classes, {unweighted_classes}, are not in class_weight"
)
return weight
| 254
|
class_weight.py
|
Python
|
sklearn/utils/class_weight.py
|
3605c140af992b6ac52f04f1689c58509cc0b5b2
|
scikit-learn
| 11
|
|
259,898
| 39
| 9
| 18
| 167
| 20
| 1
| 47
| 129
|
test_fetch_openml_equivalence_array_dataframe
|
ENH improve ARFF parser using pandas (#21938)
Co-authored-by: Thomas J. Fan <thomasjpfan@gmail.com>
Co-authored-by: Olivier Grisel <olivier.grisel@gmail.com>
Co-authored-by: Adrin Jalali <adrin.jalali@gmail.com>
|
https://github.com/scikit-learn/scikit-learn.git
|
def test_fetch_openml_equivalence_array_dataframe(monkeypatch, parser):
pytest.importorskip("pandas")
data_id = 61
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response=True)
bunch_as_frame_true = fetch_openml(
data_id=data_id,
as_frame=True,
cache=False,
parser=parser,
)
bunch_as_frame_false = fetch_openml(
data_id=data_id,
as_frame=False,
cache=False,
parser=parser,
)
assert_allclose(bunch_as_frame_false.data, bunch_as_frame_true.data)
assert_array_equal(bunch_as_frame_false.target, bunch_as_frame_true.target)
# Known failure of PyPy for OpenML. See the following issue:
# https://github.com/scikit-learn/scikit-learn/issues/18906
@fails_if_pypy
@pytest.mark.parametrize("parser", ["liac-arff", "pandas"])
|
@fails_if_pypy
@pytest.mark.parametrize("parser", ["liac-arff", "pandas"])
| 89
|
test_openml.py
|
Python
|
sklearn/datasets/tests/test_openml.py
|
a47d569e670fd4102af37c3165c9b1ddf6fd3005
|
scikit-learn
| 1
|
258,993
| 12
| 10
| 7
| 66
| 8
| 0
| 15
| 48
|
tosequence
|
DOC Ensure that tosequence passes numpydoc validation (#22494)
Co-authored-by: Guillaume Lemaitre <g.lemaitre58@gmail.com>
|
https://github.com/scikit-learn/scikit-learn.git
|
def tosequence(x):
if isinstance(x, np.ndarray):
return np.asarray(x)
elif isinstance(x, Sequence):
return x
else:
return list(x)
| 40
|
__init__.py
|
Python
|
sklearn/utils/__init__.py
|
8abc6d890e8bb4be7abe2984b3f373585f8f3c57
|
scikit-learn
| 3
|
|
259,648
| 93
| 17
| 35
| 371
| 19
| 0
| 141
| 422
|
_check_reg_targets
|
ENH add D2 pinbal score and D2 absolute error score (#22118)
|
https://github.com/scikit-learn/scikit-learn.git
|
def _check_reg_targets(y_true, y_pred, multioutput, dtype="numeric"):
check_consistent_length(y_true, y_pred)
y_true = check_array(y_true, ensure_2d=False, dtype=dtype)
y_pred = check_array(y_pred, ensure_2d=False, dtype=dtype)
if y_true.ndim == 1:
y_true = y_true.reshape((-1, 1))
if y_pred.ndim == 1:
y_pred = y_pred.reshape((-1, 1))
if y_true.shape[1] != y_pred.shape[1]:
raise ValueError(
"y_true and y_pred have different number of output ({0}!={1})".format(
y_true.shape[1], y_pred.shape[1]
)
)
n_outputs = y_true.shape[1]
allowed_multioutput_str = ("raw_values", "uniform_average", "variance_weighted")
if isinstance(multioutput, str):
if multioutput not in allowed_multioutput_str:
raise ValueError(
"Allowed 'multioutput' string values are {}. "
"You provided multioutput={!r}".format(
allowed_multioutput_str, multioutput
)
)
elif multioutput is not None:
multioutput = check_array(multioutput, ensure_2d=False)
if n_outputs == 1:
raise ValueError("Custom weights are useful only in multi-output cases.")
elif n_outputs != len(multioutput):
raise ValueError(
"There must be equally many custom weights (%d) as outputs (%d)."
% (len(multioutput), n_outputs)
)
y_type = "continuous" if n_outputs == 1 else "continuous-multioutput"
return y_type, y_true, y_pred, multioutput
| 234
|
_regression.py
|
Python
|
sklearn/metrics/_regression.py
|
aeeac1c1d634dc80abc93fb30b3fe48e1d709b64
|
scikit-learn
| 10
|
|
107,012
| 47
| 10
| 12
| 110
| 8
| 0
| 87
| 232
|
_gci
|
Rewrite AxesStack independently of cbook.Stack.
AxesStack is fairly independent from cbook.Stack: cbook.Stack handles
the forward/back/home buttons of the navbar, and therefore additionally
maintains a movable "cursor" in the stack; AxesStack, on the other hand,
needs to keep track both of "original" order and of "gca" order.
Rewriting it from scratch, and using "original" order as main storage
order (the "gca" stack being tracked using indices) shortens the
implementation and simplifies it (as there's no more need to figure out
what the super()calls do).
|
https://github.com/matplotlib/matplotlib.git
|
def _gci(self):
# Helper for `~matplotlib.pyplot.gci`. Do not use elsewhere.
# Look first for an image in the current Axes.
ax = self._axstack.current()
if ax is None:
return None
im = ax._gci()
if im is not None:
return im
# If there is no image in the current Axes, search for
# one in a previously created Axes. Whether this makes
# sense is debatable, but it is the documented behavior.
for ax in reversed(self.axes):
im = ax._gci()
if im is not None:
return im
return None
| 64
|
figure.py
|
Python
|
lib/matplotlib/figure.py
|
8669c4636ce3b6ac6f4905c365ab41685186da56
|
matplotlib
| 5
|
|
105,266
| 81
| 21
| 13
| 176
| 17
| 0
| 109
| 295
|
_scrub_json
|
Support streaming cfq dataset (#4579)
* Support streaming cfq dataset
* Fix style
* Fix remaining code
* Fix tags and documentation card
* Fix task tags
* Fix task tag
* Refactor parsing to reduce RAM usage
* Add license
* Update metadata JSON
* Update dummy data
* Use less RAM by loading only samples needed
* Yield immediately each sample or buffer it
* Update dummy data to have dataset.json as last archive member
* Rename license tag
|
https://github.com/huggingface/datasets.git
|
def _scrub_json(self, content):
# Loading of json data with the standard Python library is very inefficient:
# For the 4GB dataset file it requires more than 40GB of RAM and takes 3min.
# There are more efficient libraries but in order to avoid additional
# dependencies we use a simple (perhaps somewhat brittle) regexp to reduce
# the content to only what is needed.
question_regex = re.compile(r'("%s":\s*"[^"]*")' % _QUESTION_FIELD)
query_regex = re.compile(r'("%s":\s*"[^"]*")' % _QUERY_FIELD)
question_match = None
for line in content:
line = line.decode("utf-8")
if not question_match:
question_match = question_regex.match(line)
else:
query_match = query_regex.match(line)
if query_match:
yield json.loads("{" + question_match.group(1) + "," + query_match.group(1) + "}")
question_match = None
| 99
|
cfq.py
|
Python
|
datasets/cfq/cfq.py
|
de2f6ef2bc14022d0e9212f293b8e7b200aa7e75
|
datasets
| 4
|
|
83,834
| 34
| 10
| 20
| 159
| 17
| 0
| 36
| 191
|
test_stream_admin_remove_multiple_users_from_stream
|
message_flags: Short-circuit if no messages changed.
Omit sending an event, and updating the database, if there are no
matching messages.
|
https://github.com/zulip/zulip.git
|
def test_stream_admin_remove_multiple_users_from_stream(self) -> None:
target_users = [
self.example_user(name) for name in ["cordelia", "prospero", "othello", "hamlet", "ZOE"]
]
result = self.attempt_unsubscribe_of_principal(
query_count=26,
cache_count=9,
target_users=target_users,
is_realm_admin=False,
is_stream_admin=True,
is_subbed=True,
invite_only=False,
target_users_subbed=True,
)
json = self.assert_json_success(result)
self.assert_length(json["removed"], 5)
self.assert_length(json["not_removed"], 0)
| 101
|
test_subs.py
|
Python
|
zerver/tests/test_subs.py
|
803982e87254e3b1ebcb16ed795e224afceea3a3
|
zulip
| 2
|
|
140,569
| 110
| 17
| 18
| 279
| 33
| 0
| 149
| 384
|
custom_loss
|
Clean up docstyle in python modules and add LINT rule (#25272)
|
https://github.com/ray-project/ray.git
|
def custom_loss(self, policy_loss, loss_inputs):
# Get the next batch from our input files.
batch = self.reader.next()
# Define a secondary loss by building a graph copy with weight sharing.
obs = restore_original_dimensions(
torch.from_numpy(batch["obs"]).float().to(policy_loss[0].device),
self.obs_space,
tensorlib="torch",
)
logits, _ = self.forward({"obs": obs}, [], None)
# You can also add self-supervised losses easily by referencing tensors
# created during _build_layers_v2(). For example, an autoencoder-style
# loss can be added as follows:
# ae_loss = squared_diff(
# loss_inputs["obs"], Decoder(self.fcnet.last_layer))
print("FYI: You can also use these tensors: {}, ".format(loss_inputs))
# Compute the IL loss.
action_dist = TorchCategorical(logits, self.model_config)
imitation_loss = torch.mean(
-action_dist.logp(
torch.from_numpy(batch["actions"]).to(policy_loss[0].device)
)
)
self.imitation_loss_metric = imitation_loss.item()
self.policy_loss_metric = np.mean([loss.item() for loss in policy_loss])
# Add the imitation loss to each already calculated policy loss term.
# Alternatively (if custom loss has its own optimizer):
# return policy_loss + [10 * self.imitation_loss]
return [loss_ + 10 * imitation_loss for loss_ in policy_loss]
| 167
|
custom_loss_model.py
|
Python
|
rllib/examples/models/custom_loss_model.py
|
905258dbc19753c81039f993477e7ab027960729
|
ray
| 3
|
|
106,353
| 27
| 12
| 9
| 130
| 19
| 0
| 30
| 101
|
_call_downloader
|
[utils, etc] Kill child processes when yt-dl is killed
* derived from PR #26592, closes #26592
Authored by: Unrud
|
https://github.com/ytdl-org/youtube-dl.git
|
def _call_downloader(self, tmpfilename, info_dict):
cmd = [encodeArgument(a) for a in self._make_cmd(tmpfilename, info_dict)]
self._debug_cmd(cmd)
p = subprocess.Popen(
cmd, stderr=subprocess.PIPE)
_, stderr = process_communicate_or_kill(p)
if p.returncode != 0:
self.to_stderr(stderr.decode('utf-8', 'replace'))
return p.returncode
| 81
|
external.py
|
Python
|
youtube_dl/downloader/external.py
|
0700fde6403aa9eec1ff02bff7323696a205900c
|
youtube-dl
| 3
|
|
39,802
| 68
| 16
| 12
| 152
| 17
| 0
| 91
| 196
|
load_components
|
Adding prop reorder exceptions (#1866)
* Adding prop reorder exceptions
* Reworking prop order flag
* Removing unnecessary variable
* Reverting build:backends script changes
* Adding operator
* Adding default positional arg
* Updating docstring function
* Updated radioitems prop order
* Prop order changes for dcc
* Updated DataTable prop order
* Update Checklist prop order
* Updated DataTable prop order
* Update DatePickerSingle, DatePickerRange, Input, Link prop orders
* Re-running tests
* Re-running tests
|
https://github.com/plotly/dash.git
|
def load_components(metadata_path, namespace="default_namespace"):
# Register the component lib for index include.
ComponentRegistry.registry.add(namespace)
components = []
data = _get_metadata(metadata_path)
# Iterate over each property name (which is a path to the component)
for componentPath in data:
componentData = data[componentPath]
# Extract component name from path
# e.g. src/components/MyControl.react.js
# TODO Make more robust - some folks will write .jsx and others
# will be on windows. Unfortunately react-docgen doesn't include
# the name of the component atm.
name = componentPath.split("/").pop().split(".")[0]
component = generate_class(
name, componentData["props"], componentData["description"], namespace, None
)
components.append(component)
return components
| 87
|
component_loader.py
|
Python
|
dash/development/component_loader.py
|
0f1b299dce356dbec6c669731663ba7ce6ef057d
|
dash
| 2
|
|
208,214
| 31
| 13
| 11
| 189
| 15
| 0
| 39
| 128
|
stamp
|
Fixed bug in group, chord, chain stamp() method, where the visitor overrides the
previously stamps in tasks of these objects (e.g. The tasks of the group had their
previous stamps overridden partially)
|
https://github.com/celery/celery.git
|
def stamp(self, visitor=None, **headers):
headers = headers.copy()
if visitor is not None:
headers.update(visitor.on_signature(self, **headers))
else:
headers["stamped_headers"] = [header for header in headers.keys() if header not in self.options]
_merge_dictionaries(headers, self.options)
stamped_headers = set(self.options.get("stamped_headers", []))
stamped_headers.update(headers["stamped_headers"])
headers["stamped_headers"] = list(stamped_headers)
return self.set(**headers)
| 115
|
canvas.py
|
Python
|
celery/canvas.py
|
7d4fe22d03dabe1de2cf5009cc6ea1064b46edcb
|
celery
| 4
|
|
153,802
| 163
| 16
| 68
| 694
| 48
| 0
| 304
| 1,145
|
_copartition
|
PERF-#4493: Use partition size caches more in Modin dataframe. (#4495)
Co-authored-by: Devin Petersohn <devin-petersohn@users.noreply.github.com>
Co-authored-by: Yaroslav Igoshev <Poolliver868@mail.ru>
Signed-off-by: mvashishtha <mahesh@ponder.io>
|
https://github.com/modin-project/modin.git
|
def _copartition(self, axis, other, how, sort, force_repartition=False):
if isinstance(other, type(self)):
other = [other]
self_index = self.axes[axis]
others_index = [o.axes[axis] for o in other]
joined_index, make_reindexer = self._join_index_objects(
axis, [self_index] + others_index, how, sort
)
frames = [self] + other
non_empty_frames_idx = [
i for i, o in enumerate(frames) if o._partitions.size != 0
]
# If all frames are empty
if len(non_empty_frames_idx) == 0:
return (
self._partitions,
[o._partitions for o in other],
joined_index,
# There are no partition sizes because the resulting dataframe
# has no partitions.
[],
)
base_frame_idx = non_empty_frames_idx[0]
other_frames = frames[base_frame_idx + 1 :]
# Picking first non-empty frame
base_frame = frames[non_empty_frames_idx[0]]
base_index = base_frame.axes[axis]
# define conditions for reindexing and repartitioning `self` frame
do_reindex_base = not base_index.equals(joined_index)
do_repartition_base = force_repartition or do_reindex_base
# Perform repartitioning and reindexing for `base_frame` if needed.
# Also define length of base and frames. We will need to know the
# lengths for alignment.
if do_repartition_base:
reindexed_base = base_frame._partition_mgr_cls.map_axis_partitions(
axis,
base_frame._partitions,
make_reindexer(do_reindex_base, base_frame_idx),
)
if axis:
base_lengths = [obj.width() for obj in reindexed_base[0]]
else:
base_lengths = [obj.length() for obj in reindexed_base.T[0]]
else:
reindexed_base = base_frame._partitions
base_lengths = self._column_widths if axis else self._row_lengths
others_lengths = [o._axes_lengths[axis] for o in other_frames]
# define conditions for reindexing and repartitioning `other` frames
do_reindex_others = [
not o.axes[axis].equals(joined_index) for o in other_frames
]
do_repartition_others = [None] * len(other_frames)
for i in range(len(other_frames)):
do_repartition_others[i] = (
force_repartition
or do_reindex_others[i]
or others_lengths[i] != base_lengths
)
# perform repartitioning and reindexing for `other_frames` if needed
reindexed_other_list = [None] * len(other_frames)
for i in range(len(other_frames)):
if do_repartition_others[i]:
# indices of others frame start from `base_frame_idx` + 1
reindexed_other_list[i] = other_frames[
i
]._partition_mgr_cls.map_axis_partitions(
axis,
other_frames[i]._partitions,
make_reindexer(do_repartition_others[i], base_frame_idx + 1 + i),
lengths=base_lengths,
)
else:
reindexed_other_list[i] = other_frames[i]._partitions
reindexed_frames = (
[frames[i]._partitions for i in range(base_frame_idx)]
+ [reindexed_base]
+ reindexed_other_list
)
return (reindexed_frames[0], reindexed_frames[1:], joined_index, base_lengths)
| 462
|
dataframe.py
|
Python
|
modin/core/dataframe/pandas/dataframe/dataframe.py
|
cca9468648521e9317de1cb69cf8e6b1d5292d21
|
modin
| 21
|
|
167,406
| 12
| 8
| 25
| 54
| 8
| 0
| 15
| 27
|
validate_kwargs
|
TYP: Missing return annotations in util/tseries/plotting (#47510)
* TYP: Missing return annotations in util/tseries/plotting
* the more tricky parts
|
https://github.com/pandas-dev/pandas.git
|
def validate_kwargs(fname, kwargs, compat_args) -> None:
kwds = kwargs.copy()
_check_for_invalid_keys(fname, kwargs, compat_args)
_check_for_default_values(fname, kwds, compat_args)
| 35
|
_validators.py
|
Python
|
pandas/util/_validators.py
|
4bb1fd50a63badd38b5d96d9c4323dae7bc36d8d
|
pandas
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.