id
int64 20
338k
| vocab_size
int64 2
671
| ast_levels
int64 4
32
| nloc
int64 1
451
| n_ast_nodes
int64 12
5.6k
| n_identifiers
int64 1
186
| n_ast_errors
int64 0
10
| n_words
int64 2
2.17k
| n_whitespaces
int64 2
13.8k
| fun_name
stringlengths 2
73
| commit_message
stringlengths 51
15.3k
| url
stringlengths 31
59
| code
stringlengths 51
31k
| ast_errors
stringlengths 0
1.46k
| token_counts
int64 6
3.32k
| file_name
stringlengths 5
56
| language
stringclasses 1
value | path
stringlengths 7
134
| commit_id
stringlengths 40
40
| repo
stringlengths 3
28
| complexity
int64 1
153
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
153,784
| 10
| 9
| 35
| 51
| 9
| 0
| 11
| 32
|
test_pipeline_simple
|
FEAT-#4412: Add Batch Pipeline API to Modin (#4452)
Co-authored-by: Yaroslav Igoshev <Poolliver868@mail.ru>
Co-authored-by: Mahesh Vashishtha <mvashishtha@users.noreply.github.com>
Signed-off-by: Rehan Durrani <rehan@ponder.io>
|
https://github.com/modin-project/modin.git
|
def test_pipeline_simple(self):
arr = np.random.randint(0, 1000, (1000, 1000))
df = pd.DataFrame(arr)
| 257
|
test_pipeline.py
|
Python
|
modin/experimental/batch/test/test_pipeline.py
|
3d4404e9d9a9b2a3327f8aee664a8e71ac1f18b8
|
modin
| 6
|
|
269,633
| 5
| 7
| 2
| 26
| 3
| 1
| 5
| 10
|
epsilon
|
Reformatting the codebase with black.
PiperOrigin-RevId: 450093126
|
https://github.com/keras-team/keras.git
|
def epsilon():
return _EPSILON
@keras_export("keras.backend.set_epsilon")
|
@keras_export("keras.backend.set_epsilon")
| 7
|
backend_config.py
|
Python
|
keras/backend_config.py
|
84afc5193d38057e2e2badf9c889ea87d80d8fbf
|
keras
| 1
|
294,617
| 13
| 8
| 7
| 60
| 9
| 1
| 14
| 42
|
mock_av_open
|
Generic IP Camera configflow 2 (#52360)
Co-authored-by: J. Nick Koston <nick@koston.org>
|
https://github.com/home-assistant/core.git
|
def mock_av_open():
fake = Mock()
fake.streams.video = ["fakevid"]
return patch(
"homeassistant.components.generic.config_flow.av.open",
return_value=fake,
)
@pytest.fixture
|
@pytest.fixture
| 29
|
conftest.py
|
Python
|
tests/components/generic/conftest.py
|
c1a2be72fc8b76b55cfde1823c5688100e397369
|
core
| 1
|
315,483
| 22
| 9
| 8
| 95
| 10
| 0
| 31
| 58
|
test_hub_not_support_wireless
|
Add type hints and code cleanup for mikrotik (#74296)
* Add type hints and code cleanup for mikrotik
* update test and increase coverage
* move setup_mikrotik_entry to __init__.py
|
https://github.com/home-assistant/core.git
|
async def test_hub_not_support_wireless(hass, mock_device_registry_devices):
await setup_mikrotik_entry(hass, support_wireless=False)
device_1 = hass.states.get("device_tracker.device_1")
assert device_1
assert device_1.state == "home"
# device_2 is added from DHCP
device_2 = hass.states.get("device_tracker.device_2")
assert device_2
assert device_2.state == "home"
| 53
|
test_device_tracker.py
|
Python
|
tests/components/mikrotik/test_device_tracker.py
|
b09aaba421d6d6178d582bef9ea363017e55639d
|
core
| 1
|
|
211,512
| 56
| 11
| 13
| 218
| 20
| 0
| 124
| 191
|
check_points_in_polys
|
add fcosr model (#6765)
* add fcosr
* fix some problem
* add docs for fcosr
* modify code
* modify focsr reader
* finish tensorrt deployment with dynamic shape
* modify according to review comment
Co-authored-by: wangxinxin08 <>
|
https://github.com/PaddlePaddle/PaddleDetection.git
|
def check_points_in_polys(points, polys):
# [1, L, 2] -> [1, 1, L, 2]
points = points.unsqueeze(0)
# [B, N, 4, 2] -> [B, N, 1, 2]
a, b, c, d = polys.split(4, axis=2)
ab = b - a
ad = d - a
# [B, N, L, 2]
ap = points - a
# [B, N, 1]
norm_ab = paddle.sum(ab * ab, axis=-1)
# [B, N, 1]
norm_ad = paddle.sum(ad * ad, axis=-1)
# [B, N, L] dot product
ap_dot_ab = paddle.sum(ap * ab, axis=-1)
# [B, N, L] dot product
ap_dot_ad = paddle.sum(ap * ad, axis=-1)
# [B, N, L] <A, B> = |A|*|B|*cos(theta)
is_in_polys = (ap_dot_ab >= 0) & (ap_dot_ab <= norm_ab) & (
ap_dot_ad >= 0) & (ap_dot_ad <= norm_ad)
return is_in_polys
| 136
|
rbox_utils.py
|
Python
|
ppdet/modeling/rbox_utils.py
|
92078713cced4f0d9450a6fc80a449fa75fd8c10
|
PaddleDetection
| 1
|
|
269,927
| 73
| 14
| 19
| 240
| 31
| 1
| 90
| 219
|
keras_model_summary
|
Reformatting the codebase with black.
PiperOrigin-RevId: 450093126
|
https://github.com/keras-team/keras.git
|
def keras_model_summary(name, data, step=None):
summary_metadata = tf.compat.v1.SummaryMetadata()
# Hard coding a plugin name. Please refer to go/tb-plugin-name-hardcode for
# the rationale.
summary_metadata.plugin_data.plugin_name = "graph_keras_model"
# version number = 1
summary_metadata.plugin_data.content = b"1"
try:
json_string = data.to_json()
except Exception as exc: # pylint: disable=broad-except
# An exception should not break a model code.
logging.warning(
"Model failed to serialize as JSON. Ignoring... %s", exc
)
return False
with tf.summary.experimental.summary_scope(
name, "graph_keras_model", [data, step]
) as (tag, _):
with tf.device("cpu:0"):
tensor = tf.constant(json_string, dtype=tf.string)
return tf.summary.write(
tag=tag, tensor=tensor, step=step, metadata=summary_metadata
)
@keras_export("keras.callbacks.TensorBoard", v1=[])
|
@keras_export("keras.callbacks.TensorBoard", v1=[])
| 133
|
callbacks.py
|
Python
|
keras/callbacks.py
|
84afc5193d38057e2e2badf9c889ea87d80d8fbf
|
keras
| 2
|
307,913
| 6
| 7
| 3
| 28
| 4
| 0
| 6
| 20
|
is_on
|
Code Quality Improvements for Advantage Air (#77695)
Co-authored-by: epenet <6771947+epenet@users.noreply.github.com>
|
https://github.com/home-assistant/core.git
|
def is_on(self) -> bool:
return self._ac["filterCleanStatus"]
| 15
|
binary_sensor.py
|
Python
|
homeassistant/components/advantage_air/binary_sensor.py
|
fa7f04c34ba2927151af0a9b42c044677b1c5d1a
|
core
| 1
|
|
247,371
| 31
| 10
| 22
| 97
| 6
| 0
| 38
| 303
|
test_content_type_validation
|
Add type hints to `tests/rest` (#12146)
* Add type hints to `tests/rest`
* newsfile
* change import from `SigningKey`
|
https://github.com/matrix-org/synapse.git
|
def test_content_type_validation(self) -> None:
self._test_path_validation(
[
"local_media_thumbnail_rel",
"local_media_thumbnail",
"remote_media_thumbnail_rel",
"remote_media_thumbnail",
"remote_media_thumbnail_rel_legacy",
"url_cache_thumbnail_rel",
"url_cache_thumbnail",
],
parameter="content_type",
valid_values=[
"image/jpeg",
],
invalid_values=[
"", # ValueError: not enough values to unpack
"image/jpeg/abc", # ValueError: too many values to unpack
"image/jpeg\x00",
],
)
| 52
|
test_filepath.py
|
Python
|
tests/rest/media/v1/test_filepath.py
|
7e91107be1a4287873266e588a3c5b415279f4c8
|
synapse
| 1
|
|
278,829
| 108
| 15
| 46
| 614
| 48
| 0
| 196
| 664
|
call
|
Remove pylint comments.
PiperOrigin-RevId: 452353044
|
https://github.com/keras-team/keras.git
|
def call(self, inputs, state):
_check_rnn_cell_input_dtypes([inputs, state])
num_proj = self._num_units if self._num_proj is None else self._num_proj
sigmoid = tf.sigmoid
if self._state_is_tuple:
(c_prev, m_prev) = state
else:
c_prev = tf.slice(state, [0, 0], [-1, self._num_units])
m_prev = tf.slice(state, [0, self._num_units], [-1, num_proj])
input_size = inputs.get_shape().with_rank(2).dims[1].value
if input_size is None:
raise ValueError(
"Could not infer input size from inputs.get_shape()[-1]."
f"Received input shape: {inputs.get_shape()}"
)
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
lstm_matrix = tf.matmul(tf.concat([inputs, m_prev], 1), self._kernel)
lstm_matrix = tf.nn.bias_add(lstm_matrix, self._bias)
i, j, f, o = tf.split(value=lstm_matrix, num_or_size_splits=4, axis=1)
# Diagonal connections
if self._use_peepholes:
c = sigmoid(
f + self._forget_bias + self._w_f_diag * c_prev
) * c_prev + sigmoid(
i + self._w_i_diag * c_prev
) * self._activation(
j
)
else:
c = sigmoid(f + self._forget_bias) * c_prev + sigmoid(
i
) * self._activation(j)
if self._cell_clip is not None:
c = tf.clip_by_value(c, -self._cell_clip, self._cell_clip)
if self._use_peepholes:
m = sigmoid(o + self._w_o_diag * c) * self._activation(c)
else:
m = sigmoid(o) * self._activation(c)
if self._num_proj is not None:
m = tf.matmul(m, self._proj_kernel)
if self._proj_clip is not None:
m = tf.clip_by_value(m, -self._proj_clip, self._proj_clip)
new_state = (
LSTMStateTuple(c, m)
if self._state_is_tuple
else tf.concat([c, m], 1)
)
return m, new_state
| 397
|
legacy_cells.py
|
Python
|
keras/layers/rnn/legacy_cells.py
|
3613c3defc39c236fb1592c4f7ba1a9cc887343a
|
keras
| 10
|
|
126,245
| 12
| 11
| 8
| 115
| 10
| 0
| 19
| 87
|
testReporterDetection
|
[air] Add annotation for Tune module. (#27060)
Co-authored-by: Kai Fricke <kai@anyscale.com>
|
https://github.com/ray-project/ray.git
|
def testReporterDetection(self):
reporter = _detect_reporter()
self.assertTrue(isinstance(reporter, CLIReporter))
self.assertFalse(isinstance(reporter, JupyterNotebookReporter))
with patch("ray.tune.progress_reporter.IS_NOTEBOOK", True):
reporter = _detect_reporter()
self.assertFalse(isinstance(reporter, CLIReporter))
self.assertTrue(isinstance(reporter, JupyterNotebookReporter))
| 68
|
test_progress_reporter.py
|
Python
|
python/ray/tune/tests/test_progress_reporter.py
|
eb69c1ca286a2eec594f02ddaf546657a8127afd
|
ray
| 1
|
|
32,803
| 49
| 16
| 19
| 169
| 20
| 0
| 57
| 238
|
prepare_video_inputs
|
Add VideoMAE (#17821)
* First draft
* Add VideoMAEForVideoClassification
* Improve conversion script
* Add VideoMAEForPreTraining
* Add VideoMAEFeatureExtractor
* Improve VideoMAEFeatureExtractor
* Improve docs
* Add first draft of model tests
* Improve VideoMAEForPreTraining
* Fix base_model_prefix
* Make model take pixel_values of shape (B, T, C, H, W)
* Add loss computation of VideoMAEForPreTraining
* Improve tests
* Improve model testsé
* Make all tests pass
* Add VideoMAE to main README
* Add tests for VideoMAEFeatureExtractor
* Add integration test
* Improve conversion script
* Rename patch embedding class
* Remove VideoMAELayer from init
* Update design of patch embeddings
* Improve comments
* Improve conversion script
* Improve conversion script
* Add conversion of pretrained model
* Add loss verification of pretrained model
* Add loss verification of unnormalized targets
* Add integration test for pretraining model
* Apply suggestions from code review
* Fix bug to make feature extractor resize only shorter edge
* Address more comments
* Improve normalization of videos
* Add doc examples
* Move constants to dedicated script
* Remove scripts
* Transfer checkpoints, fix docs
* Update script
* Update image mean and std
* Fix doc tests
* Set return_tensors to NumPy by default
* Revert the previous change
Co-authored-by: Niels Rogge <nielsrogge@Nielss-MacBook-Pro.local>
|
https://github.com/huggingface/transformers.git
|
def prepare_video_inputs(feature_extract_tester, equal_resolution=False, numpify=False, torchify=False):
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
video_inputs = []
for i in range(feature_extract_tester.batch_size):
if equal_resolution:
width = height = feature_extract_tester.max_resolution
else:
width, height = np.random.choice(
np.arange(feature_extract_tester.min_resolution, feature_extract_tester.max_resolution), 2
)
video = prepare_video(
feature_extract_tester=feature_extract_tester,
width=width,
height=height,
numpify=numpify,
torchify=torchify,
)
video_inputs.append(video)
return video_inputs
| 111
|
test_feature_extraction_common.py
|
Python
|
tests/test_feature_extraction_common.py
|
f9a0008d2d3082a665f711b24f5314e4a8205fab
|
transformers
| 4
|
|
300,665
| 7
| 10
| 3
| 38
| 6
| 0
| 7
| 21
|
async_press
|
Add QNAP QSW Button platform (#70980)
Co-authored-by: J. Nick Koston <nick@koston.org>
|
https://github.com/home-assistant/core.git
|
async def async_press(self) -> None:
await self.entity_description.press_action(self.coordinator.qsw)
| 21
|
button.py
|
Python
|
homeassistant/components/qnap_qsw/button.py
|
abe78b1212602d8b19562d6acc0adf9361302327
|
core
| 1
|
|
298,113
| 11
| 12
| 7
| 54
| 5
| 0
| 12
| 45
|
reolink_connect_fixture
|
Add reolink IP NVR/Camera integration (#84081)
Co-authored-by: J. Nick Koston <nick@koston.org>
|
https://github.com/home-assistant/core.git
|
def reolink_connect_fixture(mock_get_source_ip):
with patch(
"homeassistant.components.reolink.async_setup_entry", return_value=True
), patch(
"homeassistant.components.reolink.host.Host", return_value=get_mock_info()
):
yield
| 28
|
test_config_flow.py
|
Python
|
tests/components/reolink/test_config_flow.py
|
a06b1eaf69ce333222c572cf8cb9bceafa7db211
|
core
| 1
|
|
297,471
| 12
| 10
| 6
| 54
| 7
| 0
| 12
| 55
|
as_dict
|
Update intent response (#83858)
* Add language to conversation and intent response
* Move language to intent response instead of speech
* Extend intent response for voice MVP
* Add tests for error conditions in conversation/process
* Move intent response type data into "data" field
* Move intent response error message back to speech
* Remove "success" from intent response
* Add id to target in intent response
* target defaults to None
* Update homeassistant/helpers/intent.py
* Fix test
* Return conversation_id and multiple targets
* Clean up git mess
* Fix linting errors
* Fix more async_handle signatures
* Separate conversation_id and IntentResponse
* Add unknown error code
* Add ConversationResult
* Don't set domain on single entity
* Language is required for intent response
* Add partial_action_done
* Default language in almond agent
Co-authored-by: Paulus Schoutsen <balloob@gmail.com>
|
https://github.com/home-assistant/core.git
|
def as_dict(self) -> dict[str, Any]:
return {
"response": self.response.as_dict(),
"conversation_id": self.conversation_id,
}
| 32
|
agent.py
|
Python
|
homeassistant/components/conversation/agent.py
|
961c8cc167bfbd4d18e1644a9044af2210a2e9f1
|
core
| 1
|
|
130,485
| 25
| 14
| 14
| 122
| 12
| 0
| 30
| 116
|
resources_avail_summary
|
[CI] Format Python code with Black (#21975)
See #21316 and #21311 for the motivation behind these changes.
|
https://github.com/ray-project/ray.git
|
def resources_avail_summary(self) -> str:
total_resources = (
reduce(add_resources, self.static_resources_by_ip.values())
if self.static_resources_by_ip
else {}
)
out = "{} CPUs".format(int(total_resources.get("CPU", 0)))
if "GPU" in total_resources:
out += ", {} GPUs".format(int(total_resources["GPU"]))
return out
| 70
|
load_metrics.py
|
Python
|
python/ray/autoscaler/_private/load_metrics.py
|
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
|
ray
| 3
|
|
142,798
| 6
| 9
| 2
| 30
| 5
| 0
| 6
| 20
|
can_stage
|
[tune/structure] Introduce execution package (#26015)
Execution-specific packages are moved to tune.execution.
Co-authored-by: Xiaowei Jiang <xwjiang2010@gmail.com>
|
https://github.com/ray-project/ray.git
|
def can_stage(self):
return len(self._staging_futures) < self._max_staging
| 17
|
placement_groups.py
|
Python
|
python/ray/tune/execution/placement_groups.py
|
0959f44b6fc217a4f2766ed46a721eb79b067b2c
|
ray
| 1
|
|
154,552
| 16
| 11
| 6
| 64
| 9
| 0
| 17
| 63
|
_add_projection
|
FEAT-#4946: Replace OmniSci with HDK (#4947)
Co-authored-by: Iaroslav Igoshev <Poolliver868@mail.ru>
Signed-off-by: Andrey Pavlenko <andrey.a.pavlenko@gmail.com>
|
https://github.com/modin-project/modin.git
|
def _add_projection(self, frame):
proj = CalciteProjectionNode(
frame._table_cols, [self._ref(frame, col) for col in frame._table_cols]
)
self._push(proj)
return proj
| 41
|
calcite_builder.py
|
Python
|
modin/experimental/core/execution/native/implementations/hdk_on_native/calcite_builder.py
|
e5b1888cd932909e49194d58035da34b210b91c4
|
modin
| 2
|
|
297,153
| 13
| 8
| 15
| 50
| 10
| 0
| 13
| 25
|
test_thermo_off
|
Blebox add thermoBox to climate (#81090)
Co-authored-by: Martin Hjelmare <marhje52@gmail.com>
|
https://github.com/home-assistant/core.git
|
async def test_thermo_off(thermobox, hass, caplog):
caplog.set_level(logging.ERROR)
feature_mock, entity_id = thermobox
await async_setup_entity(hass, entity_id)
| 108
|
test_climate.py
|
Python
|
tests/components/blebox/test_climate.py
|
923fa473e171fcdf396556ea200612e378f9b0a5
|
core
| 1
|
|
259,209
| 41
| 13
| 13
| 143
| 14
| 0
| 68
| 217
|
_compute_n_features_outs
|
ENH Adds infrequent categories to OneHotEncoder (#16018)
* ENH Completely adds infrequent categories
* STY Linting
* STY Linting
* DOC Improves wording
* DOC Lint
* BUG Fixes
* CLN Address comments
* CLN Address comments
* DOC Uses math to description float min_frequency
* DOC Adds comment regarding drop
* BUG Fixes method name
* DOC Clearer docstring
* TST Adds more tests
* FIX Fixes mege
* CLN More pythonic
* CLN Address comments
* STY Flake8
* CLN Address comments
* DOC Fix
* MRG
* WIP
* ENH Address comments
* STY Fix
* ENH Use functiion call instead of property
* ENH Adds counts feature
* CLN Rename variables
* DOC More details
* CLN Remove unneeded line
* CLN Less lines is less complicated
* CLN Less diffs
* CLN Improves readiabilty
* BUG Fix
* CLN Address comments
* TST Fix
* CLN Address comments
* CLN Address comments
* CLN Move docstring to userguide
* DOC Better wrapping
* TST Adds test to handle_unknown='error'
* ENH Spelling error in docstring
* BUG Fixes counter with nan values
* BUG Removes unneeded test
* BUG Fixes issue
* ENH Sync with main
* DOC Correct settings
* DOC Adds docstring
* DOC Immprove user guide
* DOC Move to 1.0
* DOC Update docs
* TST Remove test
* DOC Update docstring
* STY Linting
* DOC Address comments
* ENH Neater code
* DOC Update explaination for auto
* Update sklearn/preprocessing/_encoders.py
Co-authored-by: Roman Yurchak <rth.yurchak@gmail.com>
* TST Uses docstring instead of comments
* TST Remove call to fit
* TST Spelling error
* ENH Adds support for drop + infrequent categories
* ENH Adds infrequent_if_exist option
* DOC Address comments for user guide
* DOC Address comments for whats_new
* DOC Update docstring based on comments
* CLN Update test with suggestions
* ENH Adds computed property infrequent_categories_
* DOC Adds where the infrequent column is located
* TST Adds more test for infrequent_categories_
* DOC Adds docstring for _compute_drop_idx
* CLN Moves _convert_to_infrequent_idx into its own method
* TST Increases test coverage
* TST Adds failing test
* CLN Careful consideration of dropped and inverse_transform
* STY Linting
* DOC Adds docstrinb about dropping infrequent
* DOC Uses only
* DOC Numpydoc
* TST Includes test for get_feature_names_out
* DOC Move whats new
* DOC Address docstring comments
* DOC Docstring changes
* TST Better comments
* TST Adds check for handle_unknown='ignore' for infrequent
* CLN Make _infrequent_indices private
* CLN Change min_frequency default to None
* DOC Adds comments
* ENH adds support for max_categories=1
* ENH Describe lexicon ordering for ties
* DOC Better docstring
* STY Fix
* CLN Error when explicity dropping an infrequent category
* STY Grammar
Co-authored-by: Joel Nothman <joel.nothman@gmail.com>
Co-authored-by: Roman Yurchak <rth.yurchak@gmail.com>
Co-authored-by: Guillaume Lemaitre <g.lemaitre58@gmail.com>
|
https://github.com/scikit-learn/scikit-learn.git
|
def _compute_n_features_outs(self):
output = [len(cats) for cats in self.categories_]
if self.drop_idx_ is not None:
for i, drop_idx in enumerate(self.drop_idx_):
if drop_idx is not None:
output[i] -= 1
if not self._infrequent_enabled:
return output
# infrequent is enabled, the number of features out are reduced
# because the infrequent categories are grouped together
for i, infreq_idx in enumerate(self._infrequent_indices):
if infreq_idx is None:
continue
output[i] -= infreq_idx.size - 1
return output
| 90
|
_encoders.py
|
Python
|
sklearn/preprocessing/_encoders.py
|
7f0006c8aad1a09621ad19c3db19c3ff0555a183
|
scikit-learn
| 8
|
|
101,230
| 14
| 9
| 4
| 56
| 11
| 0
| 19
| 54
|
_func_mapping
|
lib.align updates:
- alignments.py
- Add typed dicts for imported alignments
- Explicitly check for presence of thumb value in alignments dict
- linting
- detected_face.py
- Typing
- Linting
- Legacy support for pre-aligned face
- Update dependencies to new property names
|
https://github.com/deepfakes/faceswap.git
|
def _func_mapping(self) -> Dict[Literal["gaussian", "normalized"], Callable]:
return dict(gaussian=cv2.GaussianBlur, # pylint: disable = no-member
normalized=cv2.blur) # pylint: disable = no-member
| 33
|
detected_face.py
|
Python
|
lib/align/detected_face.py
|
5e73437be47f2410439a3c6716de96354e6a0c94
|
faceswap
| 1
|
|
85,890
| 25
| 10
| 35
| 118
| 14
| 0
| 27
| 87
|
create_counter_function
|
fix(hybrid): Add silo mode to "model exists" conditions (#38836)
In several places where the existence of a model is checked via
`app_config.get_model`, replace with a new utility function that also
checks whether the model is available in the current silo mode.
Promote the `__silo_limit` meta-attributes to publicly visible
attributes, dropping the leading underscores.
|
https://github.com/getsentry/sentry.git
|
def create_counter_function(app_config, using, **kwargs):
if app_config and app_config.name != "sentry":
return
if not get_model_if_available(app_config, "Counter"):
return
cursor = connections[using].cursor()
try:
cursor.execute(
)
finally:
cursor.close()
post_migrate.connect(create_counter_function, dispatch_uid="create_counter_function", weak=False)
| 55
|
counter.py
|
Python
|
src/sentry/models/counter.py
|
729b8112ebd7becdcecb503ba62bd69d97163efa
|
sentry
| 5
|
|
311,812
| 7
| 8
| 5
| 55
| 8
| 0
| 7
| 35
|
add_entities
|
Add missing type hints to homekit_controller (#65368)
|
https://github.com/home-assistant/core.git
|
def add_entities(self) -> None:
self._add_new_entities(self.listeners)
self._add_new_entities_for_accessory(self.accessory_factories)
self._add_new_entities_for_char(self.char_factories)
| 32
|
connection.py
|
Python
|
homeassistant/components/homekit_controller/connection.py
|
9f5d77e0df957c20a2af574d706140786f0a551a
|
core
| 1
|
|
309,595
| 10
| 9
| 5
| 49
| 5
| 0
| 10
| 38
|
shutdown
|
Add LG webOS Smart TV config flow support (#64117)
* Add webOS Smart TV config flow support (#53256)
* Add Webostv config flow
* Fix tests mocks and apply review comments
* Apply review comments
* Change config flow to use ssdp UDN as unique_id
* Fix device info
* More review comments
* Fix _async_check_configured_entry
* Remove turn on script
* Add webOS Smart TV device triggers (#53752)
* Add webOS Smart TV config flow support (#53256)
* Add Webostv config flow
* Fix tests mocks and apply review comments
* Apply review comments
* Change config flow to use ssdp UDN as unique_id
* Fix device info
* More review comments
* Fix _async_check_configured_entry
* Remove turn on script
* Add webOS Smart TV device triggers (#53752)
* Fix webOS Smart TV mypy and pylint errors (#62620)
* Change webOS Smart TV PyPi aiopylgtv package to bscpylgtv (#62633)
* Change webOS Smart TV PyPi aiopylgtv package to bscpylgtv
* Update bscpylgtv to 0.2.8 (revised websockets requirment)
* Change webOS Smart TV PyPi package to aiowebostv (#63759)
* Change webOS Smart TV PyPi package to aiowebostv
* Apply suggestions from code review
Co-authored-by: Martin Hjelmare <marhje52@gmail.com>
Co-authored-by: Martin Hjelmare <marhje52@gmail.com>
* webOS TV check UUID for user added device (#63817)
* webOS TV check uuid when for user added device
* Apply suggestions from code review
Co-authored-by: Martin Hjelmare <marhje52@gmail.com>
* Add test for form abort and host update
Co-authored-by: Martin Hjelmare <marhje52@gmail.com>
* Rework webOS Smart TV device trigger to custom trigger platform (#63950)
* Rework webOS Smart TV device trigger to custom trigger platform
* Review comments and add tests
* Fix webOS TV import from YAML (#63996)
* Fix webOS TV import from YAML
* Fix requirements
* Migrate YAML entities unique id to UUID
* Add backoff to migration task delay
* Assert result data and unique_id
* Apply suggestions from code review
Co-authored-by: Martin Hjelmare <marhje52@gmail.com>
* Add codeowner
Co-authored-by: Martin Hjelmare <marhje52@gmail.com>
|
https://github.com/home-assistant/core.git
|
async def shutdown(self) -> None:
assert self.client
self.client.clear_state_update_callbacks()
await self.client.disconnect()
| 27
|
__init__.py
|
Python
|
homeassistant/components/webostv/__init__.py
|
dee843bf6e5ca84a94f336a239f6a6138c4c28e6
|
core
| 1
|
|
190,121
| 21
| 14
| 11
| 104
| 15
| 0
| 25
| 146
|
get_mobject_family_members
|
Replaced renderer strings with :class:`.RendererType` enum entries (#3017)
* remove unused constants
* remove deprecated --use_opengl_renderer flag
* remove unnecessary workaround with class initialization
* add OpenGLMobject.name to get rid of one renderer check
* add VMobject.n_points_per_curve property to get rid of more renderer checks
* replace renderer string checks with enum check
* added mobject.utils module with renderer-dependent class getters
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* ensure that capitalization of passed renderer type is irrelevant
* remove unused entries from mobject.utils.__all__
* fixed isort ignore in manim.__init__
* fixed lower-case casting of passed renderer
* fixed doctests
* more documentation + doctests for mobject.utils
* removed incorrect paragraph about ConverToOpenGL metaclass
* added docstring for RendererType enum
* renderer compatibility section in plugin dev documentation
* added mobject.utils to reference manual
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* Remove actual doctest (it ran the compatibility code)
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Co-authored-by: Naveen M K <naveen521kk@gmail.com>
|
https://github.com/ManimCommunity/manim.git
|
def get_mobject_family_members(self):
if config.renderer == RendererType.OPENGL:
family_members = []
for mob in self.mobjects:
family_members.extend(mob.get_family())
return family_members
elif config.renderer == RendererType.CAIRO:
return extract_mobject_family_members(
self.mobjects,
use_z_index=self.renderer.camera.use_z_index,
)
| 65
|
scene.py
|
Python
|
manim/scene/scene.py
|
bd844f46d804c8cad50d06ad20ab5bebaee9987b
|
manim
| 4
|
|
34,704
| 29
| 13
| 10
| 189
| 19
| 0
| 42
| 116
|
test_create_position_ids_respects_padding_index
|
Add support for XLM-R XL and XXL models by modeling_xlm_roberta_xl.py (#13727)
* add xlm roberta xl
* add convert xlm xl fairseq checkpoint to pytorch
* fix init and documents for xlm-roberta-xl
* fix indention
* add test for XLM-R xl,xxl
* fix model hub name
* fix some stuff
* up
* correct init
* fix more
* fix as suggestions
* add torch_device
* fix default values of doc strings
* fix leftovers
* merge to master
* up
* correct hub names
* fix docs
* fix model
* up
* finalize
* last fix
* Apply suggestions from code review
Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>
* add copied from
* make style
Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com>
Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>
|
https://github.com/huggingface/transformers.git
|
def test_create_position_ids_respects_padding_index(self):
config = self.model_tester.prepare_config_and_inputs()[0]
model = XLMRobertaXLEmbeddings(config=config)
input_ids = torch.as_tensor([[12, 31, 13, model.padding_idx]])
expected_positions = torch.as_tensor(
[[0 + model.padding_idx + 1, 1 + model.padding_idx + 1, 2 + model.padding_idx + 1, model.padding_idx]]
)
position_ids = create_position_ids_from_input_ids(input_ids, model.padding_idx)
self.assertEqual(position_ids.shape, expected_positions.shape)
self.assertTrue(torch.all(torch.eq(position_ids, expected_positions)))
| 124
|
test_modeling_xlm_roberta_xl.py
|
Python
|
tests/test_modeling_xlm_roberta_xl.py
|
e09473a817c5e5871e11cc81004355ef30250502
|
transformers
| 1
|
|
276,285
| 17
| 10
| 6
| 68
| 9
| 0
| 21
| 71
|
_prefix_output_keys
|
Reformatting the codebase with black.
PiperOrigin-RevId: 450093126
|
https://github.com/keras-team/keras.git
|
def _prefix_output_keys(self, output_dict, output_name):
new_outputs = {}
for key, val in output_dict.items():
key = self._prefix_key(key, output_name)
new_outputs[key] = val
return new_outputs
| 43
|
export_output.py
|
Python
|
keras/saving/utils_v1/export_output.py
|
84afc5193d38057e2e2badf9c889ea87d80d8fbf
|
keras
| 2
|
|
278,793
| 31
| 12
| 5
| 53
| 8
| 0
| 35
| 60
|
_delegate_property
|
Remove pylint comments.
PiperOrigin-RevId: 452353044
|
https://github.com/keras-team/keras.git
|
def _delegate_property(keras_tensor_cls, property_name):
# We use a lambda because we can't create a Keras layer at import time
# due to dynamic layer class versioning.
property_access = property(
lambda self: InstanceProperty(property_name)(self)
)
setattr(keras_tensor_cls, property_name, property_access)
| 31
|
tf_op_layer.py
|
Python
|
keras/layers/core/tf_op_layer.py
|
3613c3defc39c236fb1592c4f7ba1a9cc887343a
|
keras
| 1
|
|
255,399
| 9
| 9
| 19
| 40
| 4
| 0
| 10
| 32
|
test_case_name_collision_prefix
|
Use Python type annotations rather than comments (#3962)
* These have been supported since Python 3.5.
ONNX doesn't support Python < 3.6, so we can use the annotations.
Diffs generated by https://pypi.org/project/com2ann/.
Signed-off-by: Gary Miguel <garymiguel@microsoft.com>
* Remove MYPY conditional logic in gen_proto.py
It breaks the type annotations and shouldn't be needed.
Signed-off-by: Gary Miguel <garymiguel@microsoft.com>
* Get rid of MYPY bool from more scripts
Signed-off-by: Gary Miguel <garymiguel@microsoft.com>
* move Descriptors class above where its referenced in type annotation
Signed-off-by: Gary Miguel <garymiguel@microsoft.com>
* fixes
Signed-off-by: Gary Miguel <garymiguel@microsoft.com>
* remove extra blank line
Signed-off-by: Gary Miguel <garymiguel@microsoft.com>
* fix type annotations
Signed-off-by: Gary Miguel <garymiguel@microsoft.com>
* fix type annotation in gen_docs
Signed-off-by: Gary Miguel <garymiguel@microsoft.com>
* fix Operators.md
Signed-off-by: Gary Miguel <garymiguel@microsoft.com>
* fix TestCoverage.md
Signed-off-by: Gary Miguel <garymiguel@microsoft.com>
* fix protoc-gen-mypy.py
Signed-off-by: Gary Miguel <garymiguel@microsoft.com>
|
https://github.com/onnx/onnx.git
|
def test_case_name_collision_prefix(self) -> None:
m1_def =
io_map = [("C", "A")]
| 42
|
compose_test.py
|
Python
|
onnx/test/compose_test.py
|
83fa57c74edfd13ddac9548b8a12f9e3e2ed05bd
|
onnx
| 1
|
|
292,466
| 5
| 8
| 2
| 34
| 6
| 1
| 5
| 10
|
set_tz
|
Improve Vallox filter remaining time sensor (#66763)
|
https://github.com/home-assistant/core.git
|
def set_tz(request):
return request.getfixturevalue(request.param)
@pytest.fixture
|
@pytest.fixture
| 15
|
test_sensor.py
|
Python
|
tests/components/vallox/test_sensor.py
|
744a2013cd4a9bf98935397a3262f15f35047b7e
|
core
| 1
|
142,397
| 6
| 9
| 3
| 38
| 6
| 0
| 6
| 15
|
global_gc
|
[api] Annotate as public / move ray-core APIs to _private and add enforcement rule (#25695)
Enable checking of the ray core module, excluding serve, workflows, and tune, in ./ci/lint/check_api_annotations.py. This required moving many files to ray._private and associated fixes.
|
https://github.com/ray-project/ray.git
|
def global_gc():
worker = ray._private.worker.global_worker
worker.core_worker.global_gc()
| 21
|
internal_api.py
|
Python
|
python/ray/_private/internal_api.py
|
43aa2299e6623c8f8c7c4a1b80133459d0aa68b0
|
ray
| 1
|
|
176,681
| 85
| 14
| 20
| 495
| 37
| 0
| 128
| 287
|
betweenness_centrality_parallel
|
Remove redundant py2 numeric conversions (#5661)
* Remove redundant float conversion
* Remove redundant int conversion
* Use integer division
Co-authored-by: Miroslav Šedivý <6774676+eumiro@users.noreply.github.com>
|
https://github.com/networkx/networkx.git
|
def betweenness_centrality_parallel(G, processes=None):
p = Pool(processes=processes)
node_divisor = len(p._pool) * 4
node_chunks = list(chunks(G.nodes(), G.order() // node_divisor))
num_chunks = len(node_chunks)
bt_sc = p.starmap(
nx.betweenness_centrality_subset,
zip(
[G] * num_chunks,
node_chunks,
[list(G)] * num_chunks,
[True] * num_chunks,
[None] * num_chunks,
),
)
# Reduce the partial solutions
bt_c = bt_sc[0]
for bt in bt_sc[1:]:
for n in bt:
bt_c[n] += bt[n]
return bt_c
G_ba = nx.barabasi_albert_graph(1000, 3)
G_er = nx.gnp_random_graph(1000, 0.01)
G_ws = nx.connected_watts_strogatz_graph(1000, 4, 0.1)
for G in [G_ba, G_er, G_ws]:
print("")
print("Computing betweenness centrality for:")
print(nx.info(G))
print("\tParallel version")
start = time.time()
bt = betweenness_centrality_parallel(G)
print(f"\t\tTime: {(time.time() - start):.4F} seconds")
print(f"\t\tBetweenness centrality for node 0: {bt[0]:.5f}")
print("\tNon-Parallel version")
start = time.time()
bt = nx.betweenness_centrality(G)
print(f"\t\tTime: {(time.time() - start):.4F} seconds")
print(f"\t\tBetweenness centrality for node 0: {bt[0]:.5f}")
print("")
nx.draw(G_ba, node_size=100)
plt.show()
| 127
|
plot_parallel_betweenness.py
|
Python
|
examples/algorithms/plot_parallel_betweenness.py
|
2a05ccdb07cff88e56661dee8a9271859354027f
|
networkx
| 3
|
|
132,279
| 48
| 13
| 11
| 153
| 13
| 0
| 65
| 201
|
__call__
|
[CI] Format Python code with Black (#21975)
See #21316 and #21311 for the motivation behind these changes.
|
https://github.com/ray-project/ray.git
|
def __call__(self, trial_id, result):
self._top_values.append(result[self._metric])
if self._mode == "min":
self._top_values = sorted(self._top_values)[: self._top]
else:
self._top_values = sorted(self._top_values)[-self._top :]
# If the current iteration has to stop
if self.has_plateaued():
# we increment the total counter of iterations
self._iterations += 1
else:
# otherwise we reset the counter
self._iterations = 0
# and then call the method that re-executes
# the checks, including the iterations.
return self.stop_all()
| 90
|
stopper.py
|
Python
|
python/ray/tune/stopper.py
|
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
|
ray
| 3
|
|
291,317
| 14
| 12
| 9
| 102
| 10
| 0
| 21
| 80
|
test_text_value_outside_bounds
|
Add `text` platform (#79454)
Co-authored-by: Franck Nijhof <frenck@frenck.nl>
Co-authored-by: Franck Nijhof <git@frenck.dev>
|
https://github.com/home-assistant/core.git
|
async def test_text_value_outside_bounds(hass):
with pytest.raises(ValueError):
MockTextEntity(
"hello world", native_min=2, native_max=5, pattern=r"[a-z]"
).state
with pytest.raises(ValueError):
MockTextEntity(
"hello world", native_min=15, native_max=20, pattern=r"[a-z]"
).state
| 60
|
test_init.py
|
Python
|
tests/components/text/test_init.py
|
003e4224c89a6da381960dc5347750d1521d85c9
|
core
| 1
|
|
154,064
| 6
| 7
| 2
| 39
| 7
| 1
| 6
| 11
|
_nullcontext
|
FEAT-#4147: Add partial compatibility with Python 3.6 and pandas 1.1 (#4301)
Signed-off-by: Devin Petersohn <devin.petersohn@gmail.com>
Signed-off-by: Vasily Litvinov <fam1ly.n4me@yandex.ru>
Co-authored-by: Alexey Prutskov <lehaprutskov@gmail.com>
Co-authored-by: Rehan Durrani <rehan@ponder.io>
Co-authored-by: Igoshev, Yaroslav <Poolliver868@mail.ru>
Co-authored-by: Myachev, Anatoly <anatoly.myachev@intel.com>
|
https://github.com/modin-project/modin.git
|
def _nullcontext():
yield
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
|
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
| 6
|
test_general.py
|
Python
|
modin/pandas/test/test_general.py
|
6ce9cf4daec7f9996038205289bce2186be87611
|
modin
| 1
|
107,021
| 7
| 6
| 2
| 32
| 6
| 0
| 7
| 21
|
get_window_extent
|
Deprecate accepting arbitrary parameters in some get_window_extent() methods
|
https://github.com/matplotlib/matplotlib.git
|
def get_window_extent(self, renderer=None, *args, **kwargs):
return self.bbox
| 20
|
_base.py
|
Python
|
lib/matplotlib/axes/_base.py
|
56b1ccbf7ab2eb177ad1ab2f957d58a378ff9b24
|
matplotlib
| 1
|
|
248,138
| 7
| 8
| 16
| 34
| 5
| 0
| 7
| 21
|
is_out_of_band_membership
|
Exclude OOB memberships from the federation sender (#12570)
As the comment says, there is no need to process such events, and indeed we
need to avoid doing so.
Fixes #12509.
|
https://github.com/matrix-org/synapse.git
|
def is_out_of_band_membership(self) -> bool:
return self._dict.get("out_of_band_membership", False)
| 19
|
__init__.py
|
Python
|
synapse/events/__init__.py
|
db2edf5a65c5bcac565e052b2dbd74253755a717
|
synapse
| 1
|
|
122,210
| 48
| 15
| 16
| 269
| 28
| 0
| 75
| 115
|
sample_product_testcases
|
Add an internal jtu.sample_product test decorator.
This decorator samples from a cartesian product of parameterized tests
without materializing the full product explicitly.
Update lax_test.py to use the new decorator.
On my desktop machine, this improves the timing for `pytest
--collect-only tests/lax_test.py` from 6.8s to 1.9s.
|
https://github.com/google/jax.git
|
def sample_product_testcases(*args, **kw):
args = [list(arg) for arg in args]
kw = [(k, list(v)) for k, v in kw.items()]
n = prod(len(a) for a in args) * prod(len(v) for _, v in kw)
rng = np.random.RandomState(42)
testcases = []
for i in rng.choice(n, size=min(n, FLAGS.num_generated_cases), replace=False):
testcase = {}
for a in args:
testcase.update(a[i % len(a)])
i //= len(a)
for k, v in kw:
testcase[k] = v[i % len(v)]
i //= len(v)
testcases.append(testcase)
return testcases
| 166
|
test_util.py
|
Python
|
jax/_src/test_util.py
|
c7e5d3dc9576790b76a9f0f222a9bcc280f033cc
|
jax
| 8
|
|
268,718
| 6
| 7
| 3
| 28
| 4
| 0
| 6
| 20
|
pid
|
ansible-test - Improve container management. (#78550)
See changelogs/fragments/ansible-test-container-management.yml for details.
|
https://github.com/ansible/ansible.git
|
def pid(self) -> int:
return self.state['Pid']
| 15
|
docker_util.py
|
Python
|
test/lib/ansible_test/_internal/docker_util.py
|
cda16cc5e9aa8703fb4e1ac0a0be6b631d9076cc
|
ansible
| 1
|
|
154,347
| 11
| 7
| 2
| 30
| 5
| 0
| 11
| 26
|
_iloc
|
PERF-#4866: `iloc` function that used in `partition.mask` should be serialized only once (#4901)
Co-authored-by: Vasily Litvinov <fam1ly.n4me@yandex.ru>
Signed-off-by: Myachev <anatoly.myachev@intel.com>
|
https://github.com/modin-project/modin.git
|
def _iloc(df, row_labels, col_labels): # noqa: RT01, PR01
return df.iloc[row_labels, col_labels]
| 19
|
partition.py
|
Python
|
modin/core/dataframe/pandas/partitioning/partition.py
|
5ff947b9d1237164753e8ba81998933f13f1e243
|
modin
| 1
|
|
190,069
| 13
| 11
| 4
| 73
| 13
| 0
| 13
| 41
|
write_subcaption_file
|
Migrate more `os.path` to `pathlib` (#2980)
* Migrate more `os.path` to `pathlib`
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* fix type errors with recent pathlib code
* pathlib fixes
* more pathlib fixes
* remove unused imports introduced by pathlib migration
* convert `open()` calls to pathlib
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* Migrate tex_file_writing to pathlib
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* converted more old code to pathlib, and fixed a bug in module_ops
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* fix test failures
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* fix test failures
* Apply suggestions from code review
Co-authored-by: Benjamin Hackl <devel@benjamin-hackl.at>
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Co-authored-by: Benjamin Hackl <devel@benjamin-hackl.at>
|
https://github.com/ManimCommunity/manim.git
|
def write_subcaption_file(self):
subcaption_file = Path(config.output_file).with_suffix(".srt")
subcaption_file.write_text(srt.compose(self.subcaptions))
logger.info(f"Subcaption file has been written as {subcaption_file}")
| 39
|
scene_file_writer.py
|
Python
|
manim/scene/scene_file_writer.py
|
9d1f066d637cb15baea10e6907ab85efff8fb36f
|
manim
| 1
|
|
141,550
| 59
| 14
| 20
| 259
| 31
| 0
| 86
| 324
|
get_q_value_distributions
|
[RLlib] Issue 25503: Replace torch.range with torch.arange. (#25640)
|
https://github.com/ray-project/ray.git
|
def get_q_value_distributions(self, model_out):
action_scores = self.advantage_module(model_out)
if self.num_atoms > 1:
# Distributional Q-learning uses a discrete support z
# to represent the action value distribution
z = torch.arange(0.0, self.num_atoms, dtype=torch.float32).to(
action_scores.device
)
z = self.v_min + z * (self.v_max - self.v_min) / float(self.num_atoms - 1)
support_logits_per_action = torch.reshape(
action_scores, shape=(-1, self.action_space.n, self.num_atoms)
)
support_prob_per_action = nn.functional.softmax(
support_logits_per_action, dim=-1
)
action_scores = torch.sum(z * support_prob_per_action, dim=-1)
logits = support_logits_per_action
probs = support_prob_per_action
return action_scores, z, support_logits_per_action, logits, probs
else:
logits = torch.unsqueeze(torch.ones_like(action_scores), -1)
return action_scores, logits, logits
| 171
|
dqn_torch_model.py
|
Python
|
rllib/algorithms/dqn/dqn_torch_model.py
|
730df436569646be54db5330e1fdb6be8f31b8c0
|
ray
| 2
|
|
211,407
| 5
| 7
| 2
| 25
| 4
| 0
| 5
| 19
|
_get_imganno
|
pose3d metro datasets part (#6611)
* pose3d metro datasets
* delete extra comment lines
|
https://github.com/PaddlePaddle/PaddleDetection.git
|
def _get_imganno(self, idx):
return self.annos[idx]
| 15
|
pose3d_cmb.py
|
Python
|
ppdet/data/source/pose3d_cmb.py
|
c98230948356f43c03576b16ecbde77a816bb11e
|
PaddleDetection
| 1
|
|
248,735
| 13
| 10
| 10
| 60
| 9
| 0
| 13
| 78
|
get_appservice_last_pos
|
Federation Sender & Appservice Pusher Stream Optimisations (#13251)
* Replace `get_new_events_for_appservice` with `get_all_new_events_stream`
The functions were near identical and this brings the AS worker closer
to the way federation senders work which can allow for multiple workers
to handle AS traffic.
* Pull received TS alongside events when processing the stream
This avoids an extra query -per event- when both federation sender
and appservice pusher process events.
|
https://github.com/matrix-org/synapse.git
|
async def get_appservice_last_pos(self) -> int:
return await self.db_pool.simple_select_one_onecol(
table="appservice_stream_position",
retcol="stream_ordering",
keyvalues={},
desc="get_appservice_last_pos",
)
| 34
|
appservice.py
|
Python
|
synapse/storage/databases/main/appservice.py
|
21eeacc99551febcddcef21db96a2bd82166fc7e
|
synapse
| 1
|
|
288,474
| 32
| 11
| 9
| 92
| 8
| 0
| 38
| 128
|
target_temperature_high
|
Set zwave_js climate entity target temp attributes based on current mode (#79575)
* Report temperature correctly
* DRY
* Add test assertions
* Don't catch TypeError (revert)
|
https://github.com/home-assistant/core.git
|
def target_temperature_high(self) -> float | None:
if (
self._current_mode and self._current_mode.value is None
) or not self._current_mode_setpoint_enums:
# guard missing value
return None
if len(self._current_mode_setpoint_enums) < 2:
# current mode has a single temperature
return None
return self._setpoint_temperature(self._current_mode_setpoint_enums[1])
| 56
|
climate.py
|
Python
|
homeassistant/components/zwave_js/climate.py
|
c040a7a15254794c45b23f788c034f5b30de2a25
|
core
| 5
|
|
303,498
| 14
| 9
| 9
| 57
| 6
| 0
| 18
| 57
|
pause_time
|
Add switch to wilight (#62873)
* Created switch.py and support
* updated support.py
* test for wilight switch
* Update for Test
* Updated test_switch.py
* Trigger service with index
* Updated support.py and switch.py
* Updated support.py
* Updated switch.py as PR#63614
* Updated switch.py
* add type hints
* Updated support.py
* Updated switch.py
* Updated switch.py and services.yaml
* Updated pywilight
* Update homeassistant/components/wilight/switch.py
Co-authored-by: Martin Hjelmare <marhje52@gmail.com>
* Update homeassistant/components/wilight/switch.py
Co-authored-by: Martin Hjelmare <marhje52@gmail.com>
* Update homeassistant/components/wilight/switch.py
Co-authored-by: Martin Hjelmare <marhje52@gmail.com>
* Update homeassistant/components/wilight/switch.py
Co-authored-by: Martin Hjelmare <marhje52@gmail.com>
* Update ci.yaml
* Update ci.yaml
* Updated as pywilight
Renamed Device as PyWiLightDevice in pywilight.
* Updated as pywilight
Renamed Device as PyWiLightDevice in pywilight.
* Updated as pywilight
Renamed Device as PyWiLightDevice in pywilight.
* Updated as pywilight
Renamed Device as PyWiLightDevice in pywilight.
* Update switch.py
* Update homeassistant/components/wilight/support.py
Co-authored-by: Martin Hjelmare <marhje52@gmail.com>
* Update support.py
* Update switch.py
* Update support.py
* Update support.py
* Update switch.py
* Update switch.py
* Update services.yaml
* Update switch.py
* Update services.yaml
* Update switch.py
* Update homeassistant/components/wilight/switch.py
Co-authored-by: Martin Hjelmare <marhje52@gmail.com>
* Update homeassistant/components/wilight/switch.py
Co-authored-by: Martin Hjelmare <marhje52@gmail.com>
* Update homeassistant/components/wilight/switch.py
Co-authored-by: Martin Hjelmare <marhje52@gmail.com>
* Update switch.py
* Update switch.py
* Update switch.py
* Update test_switch.py
* Update test_switch.py
* Update test_switch.py
* Decrease exception scope
* Clean up
Co-authored-by: Martin Hjelmare <marhje52@gmail.com>
|
https://github.com/home-assistant/core.git
|
def pause_time(self) -> int | None:
pause_time = self._status.get("timer_target")
if pause_time is not None:
return wilight_to_hass_pause_time(pause_time)
return pause_time
| 33
|
switch.py
|
Python
|
homeassistant/components/wilight/switch.py
|
34984a8af8efc5ef6d1d204404c517e7f7c2d1bb
|
core
| 2
|
|
69,552
| 38
| 13
| 30
| 169
| 15
| 0
| 49
| 32
|
get_rfq_containing_supplier
|
refactor: search queries (#33004)
- guard clauses for readability
- use values or format
|
https://github.com/frappe/erpnext.git
|
def get_rfq_containing_supplier(doctype, txt, searchfield, start, page_len, filters):
conditions = ""
if txt:
conditions += "and rfq.name like '%%" + txt + "%%' "
if filters.get("transaction_date"):
conditions += "and rfq.transaction_date = '{0}'".format(filters.get("transaction_date"))
rfq_data = frappe.db.sql(
f,
{
"page_len": page_len,
"start": start,
"company": filters.get("company"),
"supplier": filters.get("supplier"),
},
as_dict=1,
)
return rfq_data
| 96
|
request_for_quotation.py
|
Python
|
erpnext/buying/doctype/request_for_quotation/request_for_quotation.py
|
34e4903ed7936c35176d6031a16d1a27654dcb40
|
erpnext
| 3
|
|
289,844
| 6
| 8
| 3
| 29
| 4
| 0
| 6
| 20
|
is_opening
|
Add Velbus cover opening/closing (#79851)
* Velbus cover/blind: indicate opening/closing
* Add docstrings because flake8 requirement
Co-authored-by: Niels Laukens <niels@dest-unreach.be>
|
https://github.com/home-assistant/core.git
|
def is_opening(self) -> bool:
return self._channel.is_opening()
| 16
|
cover.py
|
Python
|
homeassistant/components/velbus/cover.py
|
8e196fbe0619f854ba916599cc18992ba9d9cdf4
|
core
| 1
|
|
22,022
| 16
| 12
| 6
| 79
| 11
| 0
| 19
| 53
|
get_all_lexers
|
Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.
|
https://github.com/pypa/pipenv.git
|
def get_all_lexers(plugins=True):
for item in LEXERS.values():
yield item[1:]
if plugins:
for lexer in find_plugin_lexers():
yield lexer.name, lexer.aliases, lexer.filenames, lexer.mimetypes
| 49
|
__init__.py
|
Python
|
pipenv/patched/pip/_vendor/pygments/lexers/__init__.py
|
cd5a9683be69c86c8f3adcd13385a9bc5db198ec
|
pipenv
| 4
|
|
275,000
| 9
| 12
| 5
| 76
| 6
| 0
| 10
| 33
|
create_mirrored_strategy
|
Reformatting the codebase with black.
PiperOrigin-RevId: 450093126
|
https://github.com/keras-team/keras.git
|
def create_mirrored_strategy():
if tf.config.list_logical_devices("GPU"):
return tf.distribute.MirroredStrategy(["cpu:0", "gpu:0"])
else:
return tf.distribute.MirroredStrategy(["cpu:0"])
| 41
|
layer_test.py
|
Python
|
keras/mixed_precision/layer_test.py
|
84afc5193d38057e2e2badf9c889ea87d80d8fbf
|
keras
| 2
|
|
248,296
| 19
| 11
| 9
| 65
| 9
| 0
| 19
| 62
|
refresh_stats
|
Add config flags to allow for cache auto-tuning (#12701)
|
https://github.com/matrix-org/synapse.git
|
def refresh_stats(self) -> None:
try:
self._mallctl("epoch", read=False, write=1)
except Exception as e:
logger.warning("Failed to reload jemalloc stats: %s", e)
| 37
|
jemalloc.py
|
Python
|
synapse/metrics/jemalloc.py
|
cde8af9a495cbc7f3d0207e3f17c37eddaee34e1
|
synapse
| 2
|
|
8,302
| 10
| 8
| 4
| 40
| 7
| 0
| 12
| 40
|
__reduce__
|
Fixed hyperopt trial syncing to remote filesystems for Ray 2.0 (#2617)
|
https://github.com/ludwig-ai/ludwig.git
|
def __reduce__(self):
deserializer = RemoteSyncer
serialized_data = (self.sync_period, self.creds)
return deserializer, serialized_data
| 24
|
syncer.py
|
Python
|
ludwig/hyperopt/syncer.py
|
d8a0d8f1ace6a546d6d1875aa604b84e386c6ee1
|
ludwig
| 1
|
|
160,877
| 24
| 9
| 6
| 60
| 10
| 0
| 24
| 85
|
mini
|
ENH: Adding __array_ufunc__ capability to MaskedArrays.
This enables any ufunc numpy operations that are called on a
MaskedArray to use the masked version of that function automatically
without needing to resort to np.ma.func() calls.
|
https://github.com/numpy/numpy.git
|
def mini(self, axis=None):
# 2016-04-13, 1.13.0, gh-8764
warnings.warn(
"`mini` is deprecated; use the `min` method or "
"`np.ma.minimum.reduce instead.",
DeprecationWarning, stacklevel=2)
return MaskedArray(np.min(self, axis))
| 35
|
core.py
|
Python
|
numpy/ma/core.py
|
6d77c591c59b5678f14ae5af2127eebb7d2415bc
|
numpy
| 1
|
|
160,140
| 31
| 16
| 9
| 155
| 21
| 1
| 33
| 95
|
test_debugcapi
|
TST: Initialize f2py2e tests of the F2PY CLI (#20668)
Increases F2PY coverage by around 15 percent. For the CLI itself it covers the major features (around 70 percent), with the exception of mostly numpy.distutils stuff.
More importantly, sets the groundwork for #20056, in that passing the same testsuite should indicate feature parity.
|
https://github.com/numpy/numpy.git
|
def test_debugcapi(capfd, hello_world_f90, monkeypatch):
ipath = Path(hello_world_f90)
mname = "blah"
monkeypatch.setattr(sys, "argv",
f'f2py -m {mname} {ipath} --debug-capi'.split())
with util.switchdir(ipath.parent):
f2pycli()
with Path(f"./{mname}module.c").open() as ocmod:
assert r"#define DEBUGCFUNCS" in ocmod.read()
@pytest.mark.xfail(reason="Consistently fails on CI.")
|
@pytest.mark.xfail(reason="Consistently fails on CI.")
| 69
|
test_f2py2e.py
|
Python
|
numpy/f2py/tests/test_f2py2e.py
|
729ad4f92420231e2a7009b3223c6c7620b8b808
|
numpy
| 1
|
285,631
| 5
| 6
| 64
| 20
| 2
| 0
| 5
| 12
|
tab_clickable_and_save_evt
|
Allow reports comments to be saved in a new HTML (#2507)
* allow reports comments to be saved in a new HTML
* clear output of equity report
Co-authored-by: minhhoang1023 <40023817+minhhoang1023@users.noreply.github.com>
|
https://github.com/OpenBB-finance/OpenBBTerminal.git
|
def tab_clickable_and_save_evt() -> str:
return
| 9
|
widget_helpers.py
|
Python
|
openbb_terminal/reports/widget_helpers.py
|
3381a9a907fe6f99ba6a190475a55e1325d9a4a9
|
OpenBBTerminal
| 1
|
|
289,805
| 55
| 14
| 33
| 319
| 14
| 0
| 72
| 355
|
test_load_values_when_added_to_hass
|
Bayesian - support `unique_id:` (#79879)
* support unique_id
* adds test for unique_ids
|
https://github.com/home-assistant/core.git
|
async def test_load_values_when_added_to_hass(hass):
config = {
"binary_sensor": {
"name": "Test_Binary",
"platform": "bayesian",
"unique_id": "3b4c9563-5e84-4167-8fe7-8f507e796d72",
"device_class": "connectivity",
"observations": [
{
"platform": "state",
"entity_id": "sensor.test_monitored",
"to_state": "off",
"prob_given_true": 0.8,
"prob_given_false": 0.4,
}
],
"prior": 0.2,
"probability_threshold": 0.32,
}
}
hass.states.async_set("sensor.test_monitored", "off")
await hass.async_block_till_done()
assert await async_setup_component(hass, "binary_sensor", config)
await hass.async_block_till_done()
entity_registry = async_get_entities(hass)
assert (
entity_registry.entities["binary_sensor.test_binary"].unique_id
== "bayesian-3b4c9563-5e84-4167-8fe7-8f507e796d72"
)
state = hass.states.get("binary_sensor.test_binary")
assert state.attributes.get("device_class") == "connectivity"
assert state.attributes.get("observations")[0]["prob_given_true"] == 0.8
assert state.attributes.get("observations")[0]["prob_given_false"] == 0.4
| 183
|
test_binary_sensor.py
|
Python
|
tests/components/bayesian/test_binary_sensor.py
|
fe7402375d2f899a7edd6ac326d2c1998b4c43da
|
core
| 1
|
|
312,694
| 10
| 8
| 4
| 45
| 7
| 0
| 11
| 39
|
get_new_data
|
Add Z-Wave.Me integration (#65473)
* Add support of Z-Wave.Me Z-Way and RaZberry server (#61182)
Co-authored-by: Paulus Schoutsen <paulus@home-assistant.io>
Co-authored-by: Martin Hjelmare <marhje52@gmail.com>
Co-authored-by: LawfulChaos <kerbalspacema@gmail.com>
* Add switch platform to Z-Wave.Me integration (#64957)
Co-authored-by: Martin Hjelmare <marhje52@gmail.com>
Co-authored-by: Dmitry Vlasov <kerbalspacema@gmail.com>
* Add button platform to Z-Wave.Me integration (#65109)
Co-authored-by: epenet <6771947+epenet@users.noreply.github.com>
Co-authored-by: Dmitry Vlasov <kerbalspacema@gmail.com>
Co-authored-by: Martin Hjelmare <marhje52@gmail.com>
* Fix button controller access (#65117)
* Add lock platform to Z-Wave.Me integration #65109 (#65114)
Co-authored-by: epenet <6771947+epenet@users.noreply.github.com>
Co-authored-by: Dmitry Vlasov <kerbalspacema@gmail.com>
Co-authored-by: Martin Hjelmare <marhje52@gmail.com>
* Add sensor platform to Z-Wave.Me integration (#65132)
* Sensor Entity
* Sensor fixes
* Apply suggestions from code review
Co-authored-by: Martin Hjelmare <marhje52@gmail.com>
* Inline descriotion according to review proposal
* State Classes for sensor
* Generic sensor
* Generic sensor
Co-authored-by: Dmitry Vlasov <kerbalspacema@gmail.com>
Co-authored-by: Martin Hjelmare <marhje52@gmail.com>
* Add binary sensor platform to Z-Wave.Me integration (#65306)
* Binary Sensor Entity
* Update docstring
Co-authored-by: Dmitry Vlasov <kerbalspacema@gmail.com>
Co-authored-by: Martin Hjelmare <marhje52@gmail.com>
* Add Light Entity platform to Z-Wave.Me integration (#65331)
* Light Entity
* mypy fix
* Fixes, ZWaveMePlatforms enum
* Apply suggestions from code review
Co-authored-by: Martin Hjelmare <marhje52@gmail.com>
* Fixes
* Fixes
* Fixes
Co-authored-by: Dmitry Vlasov <kerbalspacema@gmail.com>
Co-authored-by: Martin Hjelmare <marhje52@gmail.com>
* Add Thermostat platform to Z-Wave.Me integration #65331 (#65371)
* Climate entity
* Climate entity
* Apply suggestions from code review
Co-authored-by: Martin Hjelmare <marhje52@gmail.com>
* Climate entity fix
* Clean up
* cleanup
* Import order fix
* Correct naming
Co-authored-by: Dmitry Vlasov <kerbalspacema@gmail.com>
Co-authored-by: Martin Hjelmare <marhje52@gmail.com>
* Correct zwave_me .coveragerc (#65491)
Co-authored-by: Martin Hjelmare <marhje52@gmail.com>
Co-authored-by: Paulus Schoutsen <paulus@home-assistant.io>
Co-authored-by: Martin Hjelmare <marhje52@gmail.com>
Co-authored-by: LawfulChaos <kerbalspacema@gmail.com>
Co-authored-by: epenet <6771947+epenet@users.noreply.github.com>
|
https://github.com/home-assistant/core.git
|
def get_new_data(self, new_data):
self.device = new_data
self._attr_available = not new_data.isFailed
self.async_write_ha_state()
| 26
|
__init__.py
|
Python
|
homeassistant/components/zwave_me/__init__.py
|
3c5a667d9784bb5f2fab426b133b5582706c6e68
|
core
| 1
|
|
191,826
| 59
| 7
| 57
| 227
| 40
| 0
| 78
| 414
|
get_html_renderable_mapping
|
feat: added filter to locate columns (#1115)
* feat: added filter to locate columns
* Update .pre-commit-config.yaml
* Apply suggestions from code review
Co-authored-by: Aarni Koskela <akx@iki.fi>
* feat: added support for variable search in ipywidgets
* fix: variables not being filtered according to dropdown
* fix: variables not being filtered according to dropdown
* fix: fixed order of sections
Co-authored-by: Aarni Koskela <akx@iki.fi>
|
https://github.com/ydataai/ydata-profiling.git
|
def get_html_renderable_mapping() -> Dict[Type[Renderable], Type[Renderable]]:
from pandas_profiling.report.presentation.core import (
HTML,
Alerts,
Collapse,
Container,
Dropdown,
Duplicate,
FrequencyTable,
FrequencyTableSmall,
Image,
Root,
Sample,
Table,
ToggleButton,
Variable,
VariableInfo,
)
from pandas_profiling.report.presentation.flavours.html import (
HTMLHTML,
HTMLAlerts,
HTMLCollapse,
HTMLContainer,
HTMLDropdown,
HTMLDuplicate,
HTMLFrequencyTable,
HTMLFrequencyTableSmall,
HTMLImage,
HTMLRoot,
HTMLSample,
HTMLTable,
HTMLToggleButton,
HTMLVariable,
HTMLVariableInfo,
)
return {
Container: HTMLContainer,
Variable: HTMLVariable,
VariableInfo: HTMLVariableInfo,
Table: HTMLTable,
HTML: HTMLHTML,
Root: HTMLRoot,
Image: HTMLImage,
FrequencyTable: HTMLFrequencyTable,
FrequencyTableSmall: HTMLFrequencyTableSmall,
Alerts: HTMLAlerts,
Duplicate: HTMLDuplicate,
Dropdown: HTMLDropdown,
Sample: HTMLSample,
ToggleButton: HTMLToggleButton,
Collapse: HTMLCollapse,
}
| 165
|
flavours.py
|
Python
|
src/pandas_profiling/report/presentation/flavours/flavours.py
|
c2f817d09a38094dcf83b0e49d86e3c87d822c7b
|
ydata-profiling
| 1
|
|
146,198
| 7
| 9
| 18
| 35
| 5
| 0
| 7
| 13
|
get_deployment_statuses
|
[serve] Implement Serve Application object (#22917)
The concept of a Serve Application, a data structure containing all information needed to deploy Serve on a Ray cluster, has surfaced during recent design discussions. This change introduces a formal Application data structure and refactors existing code to use it.
|
https://github.com/ray-project/ray.git
|
def get_deployment_statuses() -> Dict[str, DeploymentStatusInfo]:
return internal_get_global_client().get_deployment_statuses()
| 20
|
api.py
|
Python
|
python/ray/serve/api.py
|
1100c982223757f697a410a0d0c3d8bf3ff9c805
|
ray
| 1
|
|
337,287
| 7
| 9
| 3
| 44
| 8
| 0
| 7
| 28
|
clip_grad_value_
|
Convert documentation to the new front (#271)
* Main conversion
* Doc styling
* Style
* New front deploy
* Fixes
* Fixes
* Fix new docstrings
* Style
|
https://github.com/huggingface/accelerate.git
|
def clip_grad_value_(self, parameters, clip_value):
self.unscale_gradients()
torch.nn.utils.clip_grad_value_(parameters, clip_value)
| 27
|
accelerator.py
|
Python
|
src/accelerate/accelerator.py
|
fb5ed62c102c0323486b89805e1888495de3db15
|
accelerate
| 1
|
|
3,759
| 20
| 12
| 12
| 121
| 10
| 0
| 31
| 148
|
state
|
🎉 🎉 Source FB Marketing: performance and reliability fixes (#9805)
* Facebook Marketing performance improvement
* add comments and little refactoring
* fix integration tests with the new config
* improve job status handling, limit concurrency to 10
* fix campaign jobs, refactor manager
* big refactoring of async jobs, support random order of slices
* update source _read_incremental to hook new state logic
* fix issues with timeout
* remove debugging and clean up, improve retry logic
* merge changes from #8234
* fix call super _read_increment
* generalize batch execution, add use_batch flag
* improve coverage, do some refactoring of spec
* update test, remove overrides of source
* add split by AdSet
* add smaller insights
* fix end_date < start_date case
* add account_id to PK
* add notes
* fix new streams
* fix reversed incremental stream
* update spec.json for SAT
* upgrade CDK and bump version
Co-authored-by: Dmytro Rezchykov <dmitry.rezchykov@zazmic.com>
Co-authored-by: Eugene Kulak <kulak.eugene@gmail.com>
|
https://github.com/airbytehq/airbyte.git
|
def state(self) -> MutableMapping[str, Any]:
if self._cursor_value:
return {
self.cursor_field: self._cursor_value.isoformat(),
"slices": [d.isoformat() for d in self._completed_slices],
}
if self._completed_slices:
return {
"slices": [d.isoformat() for d in self._completed_slices],
}
return {}
| 76
|
base_insight_streams.py
|
Python
|
airbyte-integrations/connectors/source-facebook-marketing/source_facebook_marketing/streams/base_insight_streams.py
|
a3aae8017a0a40ff2006e2567f71dccb04c997a5
|
airbyte
| 5
|
|
257,543
| 88
| 13
| 27
| 273
| 15
| 0
| 122
| 304
|
add_metadata_summerizer
|
Passing the meta-data in the summerizer response (#2179)
* Passing the all the meta-data in the summerizer
* Disable metadata forwarding if `generate_single_summary` is `True`
* Update Documentation & Code Style
* simplify tests
* Update Documentation & Code Style
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
|
https://github.com/deepset-ai/haystack.git
|
def add_metadata_summerizer():
docs = [
Document(
content=,
meta={
"sub_content": "Pegasus Example",
"topic": "California's Electricity",
"context": "Dummy - PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires.",
},
),
Document(
content=,
meta={"sub_content": "Paris best tour best tour", "topic": "Eiffel tower"},
),
]
# Original input is overwrote after the "predict". So adding the same input as check_output to assess the output
check_output = deepcopy(docs)
summarizer = TransformersSummarizer(model_name_or_path="google/pegasus-xsum")
summary = summarizer.predict(documents=docs)
assert len(summary[0].meta) == len(check_output[0].meta)
assert len(summary[1].meta) - 1 == len(check_output[1].meta)
assert (
summary[0].meta["context"]
==
)
summary = summarizer.predict(documents=docs, generate_single_summary=True)
assert len(summary) == 1
assert not summary[0].meta # Metadata is not returned in case of a single summary
| 162
|
test_summarizer.py
|
Python
|
test/nodes/test_summarizer.py
|
4d8f40425bc4e7346359b7609720a50ac10b8af9
|
haystack
| 1
|
|
182,034
| 6
| 7
| 6
| 25
| 4
| 0
| 6
| 20
|
layout
|
Ensuring we get and set Layout as set in view.styles everywhere
|
https://github.com/Textualize/textual.git
|
def layout(self) -> Layout:
return self.styles.layout
| 14
|
view.py
|
Python
|
src/textual/view.py
|
9c2a125c2412c5d011307a80f4552cf9824cc022
|
textual
| 1
|
|
189,674
| 16
| 10
| 6
| 59
| 6
| 0
| 16
| 66
|
get_tip
|
Improved structure of the :mod:`.mobject` module (#2476)
* group graphing and update its references
* group text and update its references
* group opengl and update its references
* group three_d and update its references
* group geometry and update (most) references
* move some chaning.py + updater files into animation
* refactor arc.py
* refactor line.py
* refactor polygram.py
* refactor tips.py
* black + isort
* import new files in __init__.py
* refactor places where geometry was used
* black + isort again
* remove unused imports
* update reference.rst
* add descriptions to files
* fix circular imports
* forgot ArrowTip
* fix tests
* fix doctests
* satisfy mypy?
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* fix ALL merge conflicts
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* one VMobject import slipped through
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* re-add imports to `manim/opengl/__init__.py`
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* fix reference manual
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* ignore unknown directive type
* fix arrow tip imports in docstrings
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Co-authored-by: Benjamin Hackl <devel@benjamin-hackl.at>
|
https://github.com/ManimCommunity/manim.git
|
def get_tip(self):
tips = self.get_tips()
if len(tips) == 0:
raise Exception("tip not found")
else:
return tips[0]
| 33
|
arc.py
|
Python
|
manim/mobject/geometry/arc.py
|
e040bcacd38378386749db18aeba575b93f4ebca
|
manim
| 2
|
|
244,527
| 130
| 13
| 44
| 513
| 40
| 0
| 228
| 763
|
test_nasfcos_head_loss
|
Refactor interface of base dense free head and fcos head
|
https://github.com/open-mmlab/mmdetection.git
|
def test_nasfcos_head_loss(self):
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
}]
nasfcos_head = NASFCOSHead(
num_classes=4,
in_channels=2, # the same as `deform_groups` in dconv3x3_config
feat_channels=2,
norm_cfg=None)
# Nasfcos head expects a multiple levels of features per image
feats = (
torch.rand(1, 2, s // stride[1], s // stride[0]).float()
for stride in nasfcos_head.prior_generator.strides)
cls_scores, bbox_preds, centernesses = nasfcos_head.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = nasfcos_head.loss(cls_scores, bbox_preds,
centernesses, [gt_instances],
img_metas)
# When there is no truth, the cls loss should be nonzero but
# box loss and centerness loss should be zero
empty_cls_loss = empty_gt_losses['loss_cls'].item()
empty_box_loss = empty_gt_losses['loss_bbox'].item()
empty_ctr_loss = empty_gt_losses['loss_centerness'].item()
self.assertGreater(empty_cls_loss, 0, 'cls loss should be non-zero')
self.assertEqual(
empty_box_loss, 0,
'there should be no box loss when there are no true boxes')
self.assertEqual(
empty_ctr_loss, 0,
'there should be no centerness loss when there are no true boxes')
# When truth is non-empty then all cls, box loss and centerness loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = nasfcos_head.loss(cls_scores, bbox_preds, centernesses,
[gt_instances], img_metas)
onegt_cls_loss = one_gt_losses['loss_cls'].item()
onegt_box_loss = one_gt_losses['loss_bbox'].item()
onegt_ctr_loss = one_gt_losses['loss_centerness'].item()
self.assertGreater(onegt_cls_loss, 0, 'cls loss should be non-zero')
self.assertGreater(onegt_box_loss, 0, 'box loss should be non-zero')
self.assertGreater(onegt_ctr_loss, 0,
'centerness loss should be non-zero')
| 314
|
test_nasfcos_head.py
|
Python
|
tests/test_models/test_dense_heads/test_nasfcos_head.py
|
015f8a9bafe808fbe3db673d629f126a804a9207
|
mmdetection
| 2
|
|
88,876
| 57
| 10
| 26
| 260
| 28
| 0
| 70
| 203
|
test_track_outcome_default
|
feat(metrics-billing): Produce billing outcomes for metrics with futures (#40030)
|
https://github.com/getsentry/sentry.git
|
def test_track_outcome_default(settings):
# Provide a billing cluster config that should be ignored
settings.KAFKA_TOPICS[settings.KAFKA_OUTCOMES_BILLING] = {"cluster": "different"}
track_outcome(
org_id=1,
project_id=2,
key_id=3,
outcome=Outcome.INVALID,
reason="project_id",
)
cluster_args, _ = kafka_config.get_kafka_producer_cluster_options.call_args
assert cluster_args == (settings.KAFKA_TOPICS[settings.KAFKA_OUTCOMES]["cluster"],)
assert len(outcomes.publishers) == 1
(topic_name, payload), _ = outcomes.publishers["default"].publish.call_args
assert topic_name == settings.KAFKA_OUTCOMES
data = json.loads(payload)
del data["timestamp"]
assert data == {
"org_id": 1,
"project_id": 2,
"key_id": 3,
"outcome": Outcome.INVALID.value,
"reason": "project_id",
"event_id": None,
"category": None,
"quantity": 1,
}
| 158
|
test_outcomes.py
|
Python
|
tests/sentry/utils/test_outcomes.py
|
e8775b6eee9194b00763856e202094ad41afb829
|
sentry
| 1
|
|
186,361
| 43
| 18
| 14
| 212
| 14
| 0
| 51
| 264
|
_add_listens_http
|
Various clean-ups in certbot-apache. Use f-strings. (#9132)
* Various clean-ups in certbot-apache. Use f-strings.
* Smaller tweaks
|
https://github.com/certbot/certbot.git
|
def _add_listens_http(self, listens, listens_orig, port):
new_listens = listens.difference(listens_orig)
if port in new_listens:
# We have wildcard, skip the rest
self.parser.add_dir(parser.get_aug_path(self.parser.loc["listen"]),
"Listen", port)
self.save_notes += (
f"Added Listen {port} directive to {self.parser.loc['listen']}\n"
)
else:
for listen in new_listens:
self.parser.add_dir(parser.get_aug_path(
self.parser.loc["listen"]), "Listen", listen.split(" "))
self.save_notes += (f"Added Listen {listen} directive to "
f"{self.parser.loc['listen']}\n")
| 103
|
configurator.py
|
Python
|
certbot-apache/certbot_apache/_internal/configurator.py
|
eeca208c8f57304590ac1af80b496e61021aaa45
|
certbot
| 3
|
|
177,766
| 59
| 14
| 29
| 321
| 29
| 1
| 97
| 298
|
user_signup
|
fix: DEV-2233: Insecure invite token expiration (#2274)
* remove existing org token by user if exists
* dont bypass user create login
* disallow if token does not match
* +test for coverage
|
https://github.com/heartexlabs/label-studio.git
|
def user_signup(request):
user = request.user
next_page = request.GET.get('next')
token = request.GET.get('token')
next_page = next_page if next_page else reverse('projects:project-index')
user_form = forms.UserSignupForm()
organization_form = OrganizationSignupForm()
if user.is_authenticated:
return redirect(next_page)
# make a new user
if request.method == 'POST':
organization = Organization.objects.first()
if settings.DISABLE_SIGNUP_WITHOUT_LINK is True:
if not(token and organization and token == organization.token):
raise PermissionDenied()
else:
if token and organization and token != organization.token:
raise PermissionDenied()
user_form = forms.UserSignupForm(request.POST)
organization_form = OrganizationSignupForm(request.POST)
if user_form.is_valid():
redirect_response = proceed_registration(request, user_form, organization_form, next_page)
if redirect_response:
return redirect_response
return render(request, 'users/user_signup.html', {
'user_form': user_form,
'organization_form': organization_form,
'next': next_page,
'token': token,
})
@enforce_csrf_checks
|
@enforce_csrf_checks
| 189
|
views.py
|
Python
|
label_studio/users/views.py
|
65d96b2e04525bb5ee9c93ea77678adc1148cb43
|
label-studio
| 13
|
118,683
| 32
| 12
| 25
| 268
| 18
| 0
| 52
| 204
|
test_config_options_removed_on_reparse
|
Report sharing removal (#4260)
The report sharing feature is a substantial but completely unused portion of the code in Streamlit's underlying machinery. The feature was created early on, used by just a few groups, and has not been used by anyone for a while, as indicated by no activity in the associated S3 buckets. This commit removes that code to make the remaining code easier to navigate and understand.
|
https://github.com/streamlit/streamlit.git
|
def test_config_options_removed_on_reparse(self):
global_config_path = "/mock/home/folder/.streamlit/config.toml"
makedirs_patch = patch("streamlit.config.os.makedirs")
makedirs_patch.return_value = True
pathexists_patch = patch("streamlit.config.os.path.exists")
pathexists_patch.side_effect = lambda path: path == global_config_path
global_config =
open_patch = patch("streamlit.config.open", mock_open(read_data=global_config))
with open_patch, makedirs_patch, pathexists_patch:
config.get_config_options()
self.assertEqual("dark", config.get_option("theme.base"))
self.assertEqual("sans serif", config.get_option("theme.font"))
global_config =
open_patch = patch("streamlit.config.open", mock_open(read_data=global_config))
with open_patch, makedirs_patch, pathexists_patch:
config.get_config_options(force_reparse=True)
self.assertEqual("dark", config.get_option("theme.base"))
self.assertEqual(None, config.get_option("theme.font"))
| 147
|
config_test.py
|
Python
|
lib/tests/streamlit/config_test.py
|
dd9084523e365e637443ea351eaaaa25f52d8412
|
streamlit
| 1
|
|
322,037
| 38
| 15
| 17
| 260
| 31
| 0
| 50
| 213
|
_construct_dict_map
|
Unified customization for Taskflow (#1517)
* Add custom model inferface for lac & user dict interface for wordtag
* Update README.md
* Update term-linking
* Update README.md
* Update README.md
* add custom method
* Update README.md
* Update README.md
* Unified custom interface for Taskflow
* Update model inference
* Add config files
* Update README.md
* Update README.md
* Update README.md
* Update README.md
* Update README.md
* Update README.md
* Update README.md
* remove unused code
* Update README.md
* Update README.md
* Update README.md
* Update README.md
* Update README.md
* Update README.md
* Update main cls
|
https://github.com/PaddlePaddle/PaddleNLP.git
|
def _construct_dict_map(self):
name_dict_path = os.path.join(
self._task_path, "name_category_map.json")
with open(name_dict_path, encoding="utf-8") as fp:
self._name_dict = json.load(fp)
self._tree = BurkhardKellerTree()
self._cls_vocabs = OrderedDict()
for k in self._name_dict:
self._tree.add(k)
for c in k:
if c not in self._cls_vocabs:
self._cls_vocabs[c] = len(self._cls_vocabs)
self._cls_vocabs["[PAD]"] = len(self._cls_vocabs)
self._id_vocabs = dict(
zip(self._cls_vocabs.values(), self._cls_vocabs.keys()))
self._vocab_ids = self._tokenizer.vocab.to_indices(
list(self._cls_vocabs.keys()))
| 158
|
knowledge_mining.py
|
Python
|
paddlenlp/taskflow/knowledge_mining.py
|
c1d5241d581569b544c04f5d23b069a29a6e6209
|
PaddleNLP
| 4
|
|
20,375
| 25
| 13
| 15
| 116
| 9
| 0
| 47
| 232
|
_filter_to
|
check point progress on only bringing in pip==22.0.4 (#4966)
* vendor in pip==22.0.4
* updating vendor packaging version
* update pipdeptree to fix pipenv graph with new version of pip.
* Vendoring of pip-shims 0.7.0
* Vendoring of requirementslib 1.6.3
* Update pip index safety restrictions patch for pip==22.0.4
* Update patches
* exclude pyptoject.toml from black to see if that helps.
* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
|
https://github.com/pypa/pipenv.git
|
def _filter_to(self, it, pred):
buf = ''
idx = 0
for i, t, v in it:
if pred(t):
if buf:
yield idx, None, buf
buf = ''
yield i, t, v
else:
if not buf:
idx = i
buf += v
if buf:
yield idx, None, buf
| 70
|
latex.py
|
Python
|
pipenv/patched/notpip/_vendor/pygments/formatters/latex.py
|
f3166e673fe8d40277b804d35d77dcdb760fc3b3
|
pipenv
| 6
|
|
154,213
| 10
| 9
| 5
| 42
| 6
| 0
| 13
| 52
|
_get_name
|
REFACTOR-#4796: Introduce constant for __reduced__ column name (#4799)
Co-authored-by: Mahesh Vashishtha <mvashishtha@users.noreply.github.com>
Co-authored-by: Alexey Prutskov <lehaprutskov@gmail.com>
Co-authored-by: Yaroslav Igoshev <Poolliver868@mail.ru>
Signed-off-by: Jonathan Shi <jhshi@ponder.io>
|
https://github.com/modin-project/modin.git
|
def _get_name(self):
name = self._query_compiler.columns[0]
if name == MODIN_UNNAMED_SERIES_LABEL:
return None
return name
| 25
|
series.py
|
Python
|
modin/pandas/series.py
|
3f985ed6864cc1b5b587094d75ca5b2695e4139f
|
modin
| 2
|
|
250,550
| 29
| 13
| 8
| 128
| 15
| 0
| 38
| 118
|
remove
|
Rename new async helper functions.
async_trigger -> trigger_event
invoke_addon -> invoke_addon_sync (API breakage)
async_invoke_addon -> invoke_addon
|
https://github.com/mitmproxy/mitmproxy.git
|
def remove(self, addon):
for a in traverse([addon]):
n = _get_name(a)
if n not in self.lookup:
raise exceptions.AddonManagerError("No such addon: %s" % n)
self.chain = [i for i in self.chain if i is not a]
del self.lookup[_get_name(a)]
self.invoke_addon_sync(addon, hooks.DoneHook())
| 81
|
addonmanager.py
|
Python
|
mitmproxy/addonmanager.py
|
ee4999e8e4380f7b67faef92f04c361deffba412
|
mitmproxy
| 5
|
|
267,788
| 13
| 13
| 5
| 68
| 11
| 0
| 16
| 28
|
get_comp_type
|
ansible-test - Use more native type hints. (#78435)
* ansible-test - Use more native type hints.
Simple search and replace to switch from comments to native type hints for return types of functions with no arguments.
* ansible-test - Use more native type hints.
Conversion of simple single-line function annotation type comments to native type hints.
* ansible-test - Use more native type hints.
Conversion of single-line function annotation type comments with default values to native type hints.
* ansible-test - Use more native type hints.
Manual conversion of type annotation comments for functions which have pylint directives.
|
https://github.com/ansible/ansible.git
|
def get_comp_type() -> t.Optional[CompType]:
value = os.environ.get('COMP_TYPE')
comp_type = CompType(chr(int(value))) if value else None
return comp_type
| 40
|
argcompletion.py
|
Python
|
test/lib/ansible_test/_internal/cli/argparsing/argcompletion.py
|
3eb0485dd92c88cc92152d3656d94492db44b183
|
ansible
| 2
|
|
261,264
| 163
| 15
| 76
| 620
| 64
| 0
| 281
| 921
|
test_split_interaction_constraints
|
ENH FEA add interaction constraints to HGBT (#21020)
Co-authored-by: Loïc Estève <loic.esteve@ymail.com>
|
https://github.com/scikit-learn/scikit-learn.git
|
def test_split_interaction_constraints():
n_features = 4
# features 1 and 2 are not allowed to be split on
allowed_features = np.array([0, 3], dtype=np.uint32)
n_bins = 5
n_samples = 10
l2_regularization = 0.0
min_hessian_to_split = 1e-3
min_samples_leaf = 1
min_gain_to_split = 0.0
sample_indices = np.arange(n_samples, dtype=np.uint32)
all_hessians = np.ones(1, dtype=G_H_DTYPE)
sum_hessians = n_samples
hessians_are_constant = True
split_features = []
# The loop is to ensure that we split at least once on each allowed feature (0, 3).
# This is tracked by split_features and checked at the end.
for i in range(10):
rng = np.random.RandomState(919 + i)
X_binned = np.asfortranarray(
rng.randint(0, n_bins - 1, size=(n_samples, n_features)),
dtype=X_BINNED_DTYPE,
)
X_binned = np.asfortranarray(X_binned, dtype=X_BINNED_DTYPE)
# Make feature 1 very important
all_gradients = (10 * X_binned[:, 1] + rng.randn(n_samples)).astype(G_H_DTYPE)
sum_gradients = all_gradients.sum()
builder = HistogramBuilder(
X_binned,
n_bins,
all_gradients,
all_hessians,
hessians_are_constant,
n_threads,
)
n_bins_non_missing = np.array([n_bins] * X_binned.shape[1], dtype=np.uint32)
has_missing_values = np.array([False] * X_binned.shape[1], dtype=np.uint8)
monotonic_cst = np.array(
[MonotonicConstraint.NO_CST] * X_binned.shape[1], dtype=np.int8
)
is_categorical = np.zeros_like(monotonic_cst, dtype=np.uint8)
missing_values_bin_idx = n_bins - 1
splitter = Splitter(
X_binned,
n_bins_non_missing,
missing_values_bin_idx,
has_missing_values,
is_categorical,
monotonic_cst,
l2_regularization,
min_hessian_to_split,
min_samples_leaf,
min_gain_to_split,
hessians_are_constant,
)
assert np.all(sample_indices == splitter.partition)
histograms = builder.compute_histograms_brute(sample_indices)
value = compute_node_value(
sum_gradients, sum_hessians, -np.inf, np.inf, l2_regularization
)
# with all features allowed, feature 1 should be split on as it is the most
# important one by construction of the gradients
si_root = splitter.find_node_split(
n_samples,
histograms,
sum_gradients,
sum_hessians,
value,
allowed_features=None,
)
assert si_root.feature_idx == 1
# only features 0 and 3 are allowed to be split on
si_root = splitter.find_node_split(
n_samples,
histograms,
sum_gradients,
sum_hessians,
value,
allowed_features=allowed_features,
)
split_features.append(si_root.feature_idx)
assert si_root.feature_idx in allowed_features
# make sure feature 0 and feature 3 are split on in the constraint setting
assert set(allowed_features) == set(split_features)
| 423
|
test_splitting.py
|
Python
|
sklearn/ensemble/_hist_gradient_boosting/tests/test_splitting.py
|
5ceb8a6a031ddff26a7ede413db1b53edb64166a
|
scikit-learn
| 2
|
|
153,943
| 15
| 10
| 9
| 67
| 8
| 0
| 19
| 62
|
write_items
|
PERF-#4325: Improve perf of multi-column assignment in `__setitem__` when no new column names are assigning (#4455)
Co-authored-by: Yaroslav Igoshev <Poolliver868@mail.ru>
Signed-off-by: Myachev <anatoly.myachev@intel.com>
|
https://github.com/modin-project/modin.git
|
def write_items(self, row_numeric_index, col_numeric_index, broadcasted_items):
if not isinstance(row_numeric_index, slice):
row_numeric_index = list(row_numeric_index)
if not isinstance(col_numeric_index, slice):
col_numeric_index = list(col_numeric_index)
| 58
|
query_compiler.py
|
Python
|
modin/core/storage_formats/base/query_compiler.py
|
eddfda4b521366c628596dcb5c21775c7f50eec1
|
modin
| 3
|
|
42,253
| 35
| 10
| 7
| 137
| 18
| 0
| 41
| 62
|
dark_palette
|
Convert color palette docstrings to notebooks (#3034)
* Convert color palette docstrings to notebooks and rerun all with py310 kernel
* Add v0.12.1 release notes to index
* Improve failure mode when ipywidgets is not involved
* Update palettes docstrings
* Remove all other doctest-style examples
* Remove doctest-oriented testing infrastructure
* Mention in release notes
* Skip colormap patch test on matplotlib's where it's not relevant
* Use more robust approach to mpl backcompat
|
https://github.com/mwaskom/seaborn.git
|
def dark_palette(color, n_colors=6, reverse=False, as_cmap=False, input="rgb"):
rgb = _color_to_rgb(color, input)
h, s, l = husl.rgb_to_husl(*rgb)
gray_s, gray_l = .15 * s, 15
gray = _color_to_rgb((h, gray_s, gray_l), input="husl")
colors = [rgb, gray] if reverse else [gray, rgb]
return blend_palette(colors, n_colors, as_cmap)
| 93
|
palettes.py
|
Python
|
seaborn/palettes.py
|
e644793f0ac2b1be178425f20f529121f37f29de
|
seaborn
| 2
|
|
268,507
| 7
| 8
| 2
| 33
| 5
| 0
| 7
| 21
|
put_file
|
Add `use_rsa_sha2_algorithms` option for paramiko (#78789)
Fixes #76737
Fixes #77673
Co-authored-by: Matt Clay <matt@mystile.com>
|
https://github.com/ansible/ansible.git
|
def put_file(self, in_path, out_path):
return self._local.put_file(in_path, out_path)
| 21
|
connection_base.py
|
Python
|
test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/plugin_utils/connection_base.py
|
76b746655a36807fa9198064ca9fe7c6cc00083a
|
ansible
| 1
|
|
276,425
| 11
| 9
| 2
| 39
| 6
| 0
| 11
| 25
|
_getNumVariables
|
Reformatting the codebase with black.
PiperOrigin-RevId: 450093126
|
https://github.com/keras-team/keras.git
|
def _getNumVariables(self, graph_def):
return sum(node.op == "ReadVariableOp" for node in graph_def.node)
| 23
|
convert_to_constants_test.py
|
Python
|
keras/tests/convert_to_constants_test.py
|
84afc5193d38057e2e2badf9c889ea87d80d8fbf
|
keras
| 2
|
|
135,643
| 16
| 6
| 2
| 21
| 3
| 0
| 17
| 45
|
actors
|
Refactor ActorManager to store underlying remote actors in dict. (#29953)
Signed-off-by: Jun Gong <jungong@anyscale.com>
|
https://github.com/ray-project/ray.git
|
def actors(self):
# TODO(jungong) : remove this API once WorkerSet.remote_workers()
# and WorkerSet._remote_workers() are removed.
return self.__actors
| 10
|
actor_manager.py
|
Python
|
rllib/utils/actor_manager.py
|
b84dac2609bd587c43ed17bb6fa18fb7241a41de
|
ray
| 1
|
|
104,262
| 4
| 6
| 8
| 17
| 4
| 0
| 4
| 11
|
_build_pcollection
|
Add dev-only config to Natural Questions dataset (#3699)
* Add dev-only config to Natural Questions dataset
* Update dataset card
|
https://github.com/huggingface/datasets.git
|
def _build_pcollection(self, pipeline, filepaths):
| 49
|
natural_questions.py
|
Python
|
datasets/natural_questions/natural_questions.py
|
be701e9e89ab38022612c7263edc015bc7feaff9
|
datasets
| 1
|
|
296,541
| 9
| 7
| 4
| 34
| 4
| 0
| 9
| 30
|
async_clear_skipped
|
Add clear_skipped service to update entity (#70116)
|
https://github.com/home-assistant/core.git
|
async def async_clear_skipped(self) -> None:
self.__skipped_version = None
self.async_write_ha_state()
| 18
|
__init__.py
|
Python
|
homeassistant/components/update/__init__.py
|
d65e12ab6eadd8a9d2e5b842a020d741a4eec0e0
|
core
| 1
|
|
8,248
| 5
| 6
| 12
| 30
| 6
| 0
| 5
| 12
|
explain
|
Explanation API and feature importance for GBM (#2564)
* add docstring for explain_ig
* solidify Explainer API
* add gbm explainer
* add dataclasses for typed explanations
* add GBM feature importance
* remove unused imports
* add tests
* fix test
* extract explanation into file
* rename base to explainer
* remove unused kwargs
* remove device placement from base explainer
* use proper field from gbm
|
https://github.com/ludwig-ai/ludwig.git
|
def explain(self) -> Tuple[List[Explanation], List[float]]:
| 19
|
explainer.py
|
Python
|
ludwig/explain/explainer.py
|
1caede3a2da4ec71cb8650c7e45120c26948a5b9
|
ludwig
| 1
|
|
84,169
| 19
| 11
| 13
| 114
| 12
| 0
| 22
| 82
|
test_upload_file_with_supplied_mimetype
|
tests: Refactor away result.json() calls with helpers.
Signed-off-by: Zixuan James Li <p359101898@gmail.com>
|
https://github.com/zulip/zulip.git
|
def test_upload_file_with_supplied_mimetype(self) -> None:
fp = StringIO("zulip!")
fp.name = "pasted_file"
result = self.api_post(
self.example_user("hamlet"), "/api/v1/user_uploads?mimetype=image/png", {"file": fp}
)
uri = self.assert_json_success(result)["uri"]
self.assertTrue(uri.endswith("pasted_file.png"))
| 62
|
test_upload.py
|
Python
|
zerver/tests/test_upload.py
|
a142fbff85302c5e3acb2e204eca2e9c75dbc74b
|
zulip
| 1
|
|
199,921
| 105
| 20
| 73
| 1,356
| 52
| 0
| 255
| 1,166
|
solve
|
added sympy functions for cos and sin instead of using the math module
|
https://github.com/sympy/sympy.git
|
def solve(self):
count_reaction_loads = 0
for node in self._nodes:
if node[0] in list(self._supports):
if self._supports[node[0]]=='pinned':
count_reaction_loads += 2
elif self._supports[node[0]]=='roller':
count_reaction_loads += 1
if 2*len(self._nodes) != len(self._members) + count_reaction_loads:
raise ValueError("The given truss cannot be solved")
coefficients_matrix = [[0 for i in range(2*len(self._nodes))] for j in range(2*len(self._nodes))]
load_matrix = zeros(2*len(self.nodes), 1)
load_matrix_row = 0
for node in self._nodes:
if node[0] in list(self._loads):
for load in self._loads[node[0]]:
if load[0]!=Symbol('R_'+str(node[0])+'_x') and load[0]!=Symbol('R_'+str(node[0])+'_y'):
load_matrix[load_matrix_row] -= load[0]*cos(pi*load[1]/180)
load_matrix[load_matrix_row + 1] -= load[0]*sin(pi*load[1]/180)
load_matrix_row += 2
cols = 0
row = 0
for node in self._nodes:
if node[0] in list(self._supports):
if self._supports[node[0]]=='pinned':
coefficients_matrix[row][cols] += 1
coefficients_matrix[row+1][cols+1] += 1
cols += 2
elif self._supports[node[0]]=='roller':
coefficients_matrix[row+1][cols] += 1
cols += 1
row += 2
for member in list(self._members):
start = self._members[member][0]
end = self._members[member][1]
length = sqrt((self._node_coordinates[start][0]-self._node_coordinates[end][0])**2 + (self._node_coordinates[start][1]-self._node_coordinates[end][1])**2)
start_index = self._node_labels.index(start)
end_index = self._node_labels.index(end)
horizontal_component_start = (self._node_coordinates[end][0]-self._node_coordinates[start][0])/length
vertical_component_start = (self._node_coordinates[end][1]-self._node_coordinates[start][1])/length
horizontal_component_end = (self._node_coordinates[start][0]-self._node_coordinates[end][0])/length
vertical_component_end = (self._node_coordinates[start][1]-self._node_coordinates[end][1])/length
coefficients_matrix[start_index*2][cols] += horizontal_component_start
coefficients_matrix[start_index*2+1][cols] += vertical_component_start
coefficients_matrix[end_index*2][cols] += horizontal_component_end
coefficients_matrix[end_index*2+1][cols] += vertical_component_end
cols += 1
forces_matrix = (Matrix(coefficients_matrix)**-1)*load_matrix
self._reaction_loads = {}
i = 0
min_load = inf
for node in self._nodes:
if node[0] in list(self._loads):
for load in self._loads[node[0]]:
if type(load[0]) not in [Symbol, Mul, Add]:
min_load = min(min_load, load[0])
for j in range(len(forces_matrix)):
if type(forces_matrix[j]) not in [Symbol, Mul, Add]:
if abs(forces_matrix[j]/min_load) <1E-10:
forces_matrix[j] = 0
for node in self._nodes:
if node[0] in list(self._supports):
if self._supports[node[0]]=='pinned':
self._reaction_loads['R_'+str(node[0])+'_x'] = forces_matrix[i]
self._reaction_loads['R_'+str(node[0])+'_y'] = forces_matrix[i+1]
i += 2
elif self._supports[node[0]]=='roller':
self._reaction_loads['R_'+str(node[0])+'_y'] = forces_matrix[i]
i += 1
for member in list(self._members):
self._internal_forces[member] = forces_matrix[i]
i += 1
return
| 887
|
truss.py
|
Python
|
sympy/physics/continuum_mechanics/truss.py
|
58bcaa4c47c8f79c4323ee022b14b39eb0c3339b
|
sympy
| 30
|
|
86,711
| 6
| 6
| 5
| 19
| 3
| 0
| 6
| 20
|
validate_can_orderby
|
feat(metrics): Adds mqb query transform to MetricsQuery [TET-163] (#37652)
So far this PR has only test cases that shows expected output from MQB
(input to metrics abstraction layer) and the final output that would be
passed to metrics abstraction layer
I have printed out queries spit out by MQB and coalesced them into the
test cases in this PR, and so should cover all queries made by
performance to metrics:
- I have only listed a variation or two of the same functions for
example `p75(transaction.duration)` but I did not add
`p50(transaction.duration)` because the logic would be the same so need
to add this to these tests
- Only thing missing is the recent `countIf` functions added for
performance which I will add later on listed here ->
https://github.com/getsentry/sentry/blob/master/src/sentry/search/events/datasets/metrics.py#L179-L276
### Changes to MQB output:-
- Removed tags from select statement, as if they are listed in the
`groupBy`, they will be returned by metrics abstraction layer
- Having clauses are not supported
- Transform functions are not supported
- Removed ordering by `bucketed_time` as this behavior is handled post
query by metrics abstraction layer
- Replaced metric ids/names with MRI as this is the naming contract we
can guarantee
- Replaced tag values with their tag names because metrics abstraction
layer will handle the indexer resolving and reverse resolving
- Replaced SnQL function definition with their corresponding derived
metrics so for example failure_rate, apdex, user_misery,
team_key_transactions, count_web_vitals and histogram functions
### ToDo from me to get this test to pass
- [x] `snuba-sdk` needs to support MRI as a column name in `Column`
[TET-323]
- [x] `MetricField` needs to support `args` and `alias` [TET-320,
TET-322]
- [x] Add `MetricGroupByField` for `groupBy` columns that accept an
`alias` [TET-320]
- [x] Aliasing functionality needs to be supported [TET-320]
- [x] Add derived metric for `team_key_transaction` [TET-325]
- [x] Add derived metric for `count_web_vital_measurements` [TET-161]
- [x] Add derived metric for `rate` [TET-129]
- [x] `MetricsQuery` accepts MRI rather than public facing names
[TET-321]
- [x] Support for tuples conditions [TET-319]
- [x] Add derived metrics for the 3 `countIf` functions [TET-326]
- [x] Transform MQB `Query` object to `MetricsQuery` (This PR)
- [x] Figure out addition of Granularity processor [TET-327]
- [x] Add Invalid test cases (This PR)
- [ ] Discuss granularity differences/query bounds (Will be handled in
subsequent PR [TET-452])
[TET-323]:
https://getsentry.atlassian.net/browse/TET-323?atlOrigin=eyJpIjoiNWRkNTljNzYxNjVmNDY3MDlhMDU5Y2ZhYzA5YTRkZjUiLCJwIjoiZ2l0aHViLWNvbS1KU1cifQ
|
https://github.com/getsentry/sentry.git
|
def validate_can_orderby(self) -> None:
raise NotImplementedError
| 10
|
base.py
|
Python
|
src/sentry/snuba/metrics/fields/base.py
|
4acb1834c41648180bbb41cbe248b50d65e5977d
|
sentry
| 1
|
|
275,158
| 2
| 6
| 5
| 13
| 2
| 0
| 2
| 5
|
create_identity_with_nan_gradients_fn
|
Reformatting the codebase with black.
PiperOrigin-RevId: 450093126
|
https://github.com/keras-team/keras.git
|
def create_identity_with_nan_gradients_fn(have_nan_gradients):
| 16
|
test_util.py
|
Python
|
keras/mixed_precision/test_util.py
|
84afc5193d38057e2e2badf9c889ea87d80d8fbf
|
keras
| 1
|
|
290,680
| 4
| 8
| 2
| 29
| 4
| 0
| 4
| 18
|
get_currency
|
Fix Growatt incorrect energy dashboard values for grid import (#82163)
* Fix Growatt incorrect energy dashboard values for grid import (#80905)
* Growatt - addressing review comments (#80905)
* Growatt - addressing more review comments (#80905)
|
https://github.com/home-assistant/core.git
|
def get_currency(self):
return self.data.get("currency")
| 15
|
sensor.py
|
Python
|
homeassistant/components/growatt_server/sensor.py
|
93401df73fc688d7c8395128f5484d59155a31cc
|
core
| 1
|
|
168,364
| 5
| 7
| 2
| 23
| 4
| 0
| 5
| 19
|
_href_getter
|
ENH: pd.read_html argument to extract hrefs along with text from cells (#45973)
* ENH: pd.read_html argument to extract hrefs along with text from cells
* Fix typing error
* Simplify tests
* Fix still incorrect typing
* Summarise whatsnew entry and move detailed explanation into user guide
* More flexible link extraction
* Suggested changes
* extract_hrefs -> extract_links
* Move versionadded to correct place and improve docstring for extract_links (@attack68)
* Test for invalid extract_links value
* Test all extract_link options
* Fix for MultiIndex headers (also fixes tests)
* Test that text surrounding <a> tag is still captured
* Test for multiple <a> tags in cell
* Fix all tests, with both MultiIndex -> Index and np.nan -> None conversions resolved
* Add back EOF newline to test_html.py
* Correct user guide example
* Update pandas/io/html.py
* Update pandas/io/html.py
* Update pandas/io/html.py
* Simplify MultiIndex -> Index conversion
* Move unnecessary fixtures into test body
* Simplify statement
* Fix code checks
Co-authored-by: JHM Darbyshire <24256554+attack68@users.noreply.github.com>
|
https://github.com/pandas-dev/pandas.git
|
def _href_getter(self, obj):
raise AbstractMethodError(self)
| 13
|
html.py
|
Python
|
pandas/io/html.py
|
9f81aa65a416510b0ad7cb1d473600f261169813
|
pandas
| 1
|
|
169,425
| 12
| 7
| 5
| 39
| 6
| 0
| 16
| 39
|
series_and_frame
|
TST/CLN: Use more frame_or_series fixture (#48926)
* TST/CLN: Use more frame_or_series fixture
* Revert for base ext tests
|
https://github.com/pandas-dev/pandas.git
|
def series_and_frame(frame_or_series, series, frame):
if frame_or_series == Series:
return series
if frame_or_series == DataFrame:
return frame
| 24
|
conftest.py
|
Python
|
pandas/tests/resample/conftest.py
|
e25aa9d313dc372c70d826e3c57c65b6724190e5
|
pandas
| 3
|
|
177,480
| 19
| 7
| 14
| 112
| 14
| 0
| 40
| 82
|
freeze
|
Add clear edges method to the list of methods to be frozen by the nx.… (#6190)
* Add clear edges method to the list of methods to be frozen by the nx.freeze function
* Change tests to create new graph instead of using class attribute
|
https://github.com/networkx/networkx.git
|
def freeze(G):
G.add_node = frozen
G.add_nodes_from = frozen
G.remove_node = frozen
G.remove_nodes_from = frozen
G.add_edge = frozen
G.add_edges_from = frozen
G.add_weighted_edges_from = frozen
G.remove_edge = frozen
G.remove_edges_from = frozen
G.clear = frozen
G.clear_edges = frozen
G.frozen = True
return G
| 68
|
function.py
|
Python
|
networkx/classes/function.py
|
895963729231fe02153afe92ecc946a400247f1d
|
networkx
| 1
|
|
250,232
| 70
| 11
| 18
| 232
| 19
| 0
| 86
| 231
|
test_icu_word_boundary
|
Add optional ICU support for user search (#14464)
Fixes #13655
This change uses ICU (International Components for Unicode) to improve boundary detection in user search.
This change also adds a new dependency on libicu-dev and pkg-config for the Debian packages, which are available in all supported distros.
|
https://github.com/matrix-org/synapse.git
|
def test_icu_word_boundary(self) -> None:
display_name = "Gáo"
# This word is not broken down correctly by Python's regular expressions,
# likely because á is actually a lowercase a followed by a U+0301 combining
# acute accent. This is specifically something that ICU support fixes.
matches = re.findall(r"([\w\-]+)", display_name, re.UNICODE)
self.assertEqual(len(matches), 2)
self.get_success(
self.store.update_profile_in_user_dir(ALICE, display_name, None)
)
self.get_success(self.store.add_users_in_public_rooms("!room:id", (ALICE,)))
# Check that searching for this user yields the correct result.
r = self.get_success(self.store.search_user_dir(BOB, display_name, 10))
self.assertFalse(r["limited"])
self.assertEqual(len(r["results"]), 1)
self.assertDictEqual(
r["results"][0],
{"user_id": ALICE, "display_name": display_name, "avatar_url": None},
)
| 141
|
test_user_directory.py
|
Python
|
tests/storage/test_user_directory.py
|
2a3cd59dd06411a79fb7500970db1b98f0d87695
|
synapse
| 1
|
|
122,813
| 11
| 14
| 4
| 60
| 11
| 0
| 11
| 31
|
ppermute
|
(NFC) Prepare for migration from producing MHLO to producing StableHLO
This CL renames occurrences of "mhlo" in: 1) names, 2) tests, 3) prose in order
to prepare for the upcoming migration.
Unchanged occurrences:
1) Public API that contains "mhlo", e.g. XlaLowering.mhlo and the "mhlo"
argument value in Lowering.as_text and Lowering.compiler_ir.
2) Documentation (changelog, JEPs, IR examples, etc).
3) One rare situation where prose says "StableHLO" and "MHLO" in one sentence,
so both are necessary to disambiguate.
PiperOrigin-RevId: 495771153
|
https://github.com/google/jax.git
|
def ppermute(x, axis_name, perm):
return tree_util.tree_map(
partial(ppermute_p.bind, axis_name=axis_name,
perm=tuple(map(tuple, perm))), x)
| 40
|
parallel.py
|
Python
|
jax/_src/lax/parallel.py
|
b8ae8e3fa10f9abe998459fac1513915acee776d
|
jax
| 1
|
|
225,850
| 4
| 6
| 32
| 15
| 1
| 0
| 4
| 7
|
test_get_text_splitter_partial
|
Add tree summarize response mode + fix text splitting bug (#134)
Co-authored-by: Jerry Liu <jerry@robustintelligence.com>
|
https://github.com/jerryjliu/llama_index.git
|
def test_get_text_splitter_partial() -> None:
| 194
|
test_prompt_helper.py
|
Python
|
tests/indices/test_prompt_helper.py
|
76beea80a83de67966b7e682819924833dce3ce4
|
llama_index
| 1
|
|
274,550
| 14
| 9
| 4
| 96
| 9
| 1
| 18
| 29
|
cosine_similarity
|
Reformatting the codebase with black.
PiperOrigin-RevId: 450093126
|
https://github.com/keras-team/keras.git
|
def cosine_similarity(y_true, y_pred, axis=-1):
y_true = tf.linalg.l2_normalize(y_true, axis=axis)
y_pred = tf.linalg.l2_normalize(y_pred, axis=axis)
return -tf.reduce_sum(y_true * y_pred, axis=axis)
@keras_export("keras.losses.CosineSimilarity")
|
@keras_export("keras.losses.CosineSimilarity")
| 55
|
losses.py
|
Python
|
keras/losses.py
|
84afc5193d38057e2e2badf9c889ea87d80d8fbf
|
keras
| 1
|
260,545
| 8
| 7
| 4
| 42
| 6
| 0
| 8
| 36
|
fit
|
MAINT Use _validate_params in LocallyLinearEmbedding (#23938)
Co-authored-by: jeremiedbb <jeremiedbb@yahoo.fr>
|
https://github.com/scikit-learn/scikit-learn.git
|
def fit(self, X, y=None):
self._validate_params()
self._fit_transform(X)
return self
| 25
|
_locally_linear.py
|
Python
|
sklearn/manifold/_locally_linear.py
|
ceeda362402bfc978bcc93d02481fe28e21a07ad
|
scikit-learn
| 1
|
|
6,406
| 5
| 8
| 2
| 34
| 6
| 0
| 5
| 19
|
raw_temp_path
|
Add and expand docstrings in base_dataset.py (#1819)
|
https://github.com/ludwig-ai/ludwig.git
|
def raw_temp_path(self):
return os.path.join(self.download_dir, "_raw")
| 19
|
base_dataset.py
|
Python
|
ludwig/datasets/base_dataset.py
|
d0bcbb2a6e2ab82501fd34ef583329ff2ac22a15
|
ludwig
| 1
|
|
248,224
| 14
| 10
| 17
| 53
| 8
| 0
| 15
| 54
|
get_prev_state_ids
|
Refactor `EventContext` (#12689)
Refactor how the `EventContext` class works, with the intention of reducing the amount of state we fetch from the DB during event processing.
The idea here is to get rid of the cached `current_state_ids` and `prev_state_ids` that live in the `EventContext`, and instead defer straight to the database (and its caching).
One change that may have a noticeable effect is that we now no longer prefill the `get_current_state_ids` cache on a state change. However, that query is relatively light, since its just a case of reading a table from the DB (unlike fetching state at an event which is more heavyweight). For deployments with workers this cache isn't even used.
Part of #12684
|
https://github.com/matrix-org/synapse.git
|
async def get_prev_state_ids(self) -> StateMap[str]:
assert self.state_group_before_event is not None
return await self._storage.state.get_state_ids_for_group(
self.state_group_before_event
)
| 32
|
snapshot.py
|
Python
|
synapse/events/snapshot.py
|
c72d26c1e1e997e63cef1c474010a7db783f8022
|
synapse
| 1
|
|
277,144
| 38
| 13
| 10
| 141
| 16
| 0
| 56
| 120
|
_process_traceback_frames
|
Reformatting the codebase with black.
PiperOrigin-RevId: 450093126
|
https://github.com/keras-team/keras.git
|
def _process_traceback_frames(tb):
last_tb = None
tb_list = list(traceback.walk_tb(tb))
for f, line_no in reversed(tb_list):
if include_frame(f.f_code.co_filename):
last_tb = types.TracebackType(last_tb, f, f.f_lasti, line_no)
if last_tb is None and tb_list:
# If no frames were kept during filtering, create a new traceback
# from the outermost function.
f, line_no = tb_list[-1]
last_tb = types.TracebackType(last_tb, f, f.f_lasti, line_no)
return last_tb
| 90
|
traceback_utils.py
|
Python
|
keras/utils/traceback_utils.py
|
84afc5193d38057e2e2badf9c889ea87d80d8fbf
|
keras
| 5
|
|
268,747
| 6
| 8
| 3
| 39
| 7
| 0
| 6
| 20
|
cgroup_path
|
ansible-test - Improve container management. (#78550)
See changelogs/fragments/ansible-test-container-management.yml for details.
|
https://github.com/ansible/ansible.git
|
def cgroup_path(self) -> t.Optional[str]:
return self.state.get('cgroup_path')
| 22
|
host_profiles.py
|
Python
|
test/lib/ansible_test/_internal/host_profiles.py
|
cda16cc5e9aa8703fb4e1ac0a0be6b631d9076cc
|
ansible
| 1
|
|
260,530
| 31
| 11
| 9
| 112
| 13
| 0
| 38
| 70
|
sigmoid_kernel
|
DOC Ensure `sigmoid_kernel` passes numpydoc validation (#23955)
Co-authored-by: Jérémie du Boisberranger <34657725+jeremiedbb@users.noreply.github.com>
Co-authored-by: Thomas J. Fan <thomasjpfan@gmail.com>
Co-authored-by: Meekail Zain <34613774+Micky774@users.noreply.github.com>
|
https://github.com/scikit-learn/scikit-learn.git
|
def sigmoid_kernel(X, Y=None, gamma=None, coef0=1):
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
np.tanh(K, K) # compute tanh in-place
return K
| 76
|
pairwise.py
|
Python
|
sklearn/metrics/pairwise.py
|
ef92c6761b64fc1ed2f9051e15310906be14a8fb
|
scikit-learn
| 2
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.