nwo stringlengths 5 106 | sha stringlengths 40 40 | path stringlengths 4 174 | language stringclasses 1
value | identifier stringlengths 1 140 | parameters stringlengths 0 87.7k | argument_list stringclasses 1
value | return_statement stringlengths 0 426k | docstring stringlengths 0 64.3k | docstring_summary stringlengths 0 26.3k | docstring_tokens list | function stringlengths 18 4.83M | function_tokens list | url stringlengths 83 304 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
golismero/golismero | 7d605b937e241f51c1ca4f47b20f755eeefb9d76 | thirdparty_libs/BeautifulSoup.py | python | Tag.__delitem__ | (self, key) | Deleting tag[key] deletes all 'key' attributes for the tag. | Deleting tag[key] deletes all 'key' attributes for the tag. | [
"Deleting",
"tag",
"[",
"key",
"]",
"deletes",
"all",
"key",
"attributes",
"for",
"the",
"tag",
"."
] | def __delitem__(self, key):
"Deleting tag[key] deletes all 'key' attributes for the tag."
for item in self.attrs:
if item[0] == key:
self.attrs.remove(item)
#We don't break because bad HTML can define the same
#attribute multiple times.
self._getAttrMap()
if self.attrMap.has_key(key):
del self.attrMap[key] | [
"def",
"__delitem__",
"(",
"self",
",",
"key",
")",
":",
"for",
"item",
"in",
"self",
".",
"attrs",
":",
"if",
"item",
"[",
"0",
"]",
"==",
"key",
":",
"self",
".",
"attrs",
".",
"remove",
"(",
"item",
")",
"#We don't break because bad HTML can define th... | https://github.com/golismero/golismero/blob/7d605b937e241f51c1ca4f47b20f755eeefb9d76/thirdparty_libs/BeautifulSoup.py#L644-L653 | ||
21dotco/two1-python | 4e833300fd5a58363e3104ed4c097631e5d296d3 | two1/bitcoin/script_interpreter.py | python | ScriptInterpreter._op_abs | (self) | The input is made positive | The input is made positive | [
"The",
"input",
"is",
"made",
"positive"
] | def _op_abs(self):
""" The input is made positive
"""
self._check_stack_len(1)
i = self._get_int()
self._stack.append(abs(i)) | [
"def",
"_op_abs",
"(",
"self",
")",
":",
"self",
".",
"_check_stack_len",
"(",
"1",
")",
"i",
"=",
"self",
".",
"_get_int",
"(",
")",
"self",
".",
"_stack",
".",
"append",
"(",
"abs",
"(",
"i",
")",
")"
] | https://github.com/21dotco/two1-python/blob/4e833300fd5a58363e3104ed4c097631e5d296d3/two1/bitcoin/script_interpreter.py#L551-L556 | ||
bytedance/fedlearner | 89f5a2341d9b3c9100c799473fe5f436da7e87a2 | web_console_v2/api/fedlearner_webconsole/sparkapp/service.py | python | SparkAppService.submit_sparkapp | (self, config: SparkAppConfig) | return SparkAppInfo.from_k8s_resp(resp) | submit sparkapp
Args:
config (SparkAppConfig): sparkapp config
Raises:
InternalException: if fail to get sparkapp
Returns:
SparkAppInfo: resp of sparkapp | submit sparkapp | [
"submit",
"sparkapp"
] | def submit_sparkapp(self, config: SparkAppConfig) -> SparkAppInfo:
"""submit sparkapp
Args:
config (SparkAppConfig): sparkapp config
Raises:
InternalException: if fail to get sparkapp
Returns:
SparkAppInfo: resp of sparkapp
"""
sparkapp_path = config.files_path
if config.files_path is None:
_, sparkapp_path = self._get_sparkapp_upload_path(config.name)
self._clear_and_make_an_empty_dir(sparkapp_path)
with tempfile.TemporaryDirectory() as temp_dir:
tar_path = os.path.join(temp_dir, 'files.tar')
with open(tar_path, 'wb') as fwrite:
fwrite.write(config.files)
self._copy_files_to_target_filesystem(
source_filesystem_path=tar_path,
target_filesystem_path=sparkapp_path)
config_dict = config.build_config(sparkapp_path)
logging.info(f'submit sparkapp, config: {config_dict}')
resp = k8s_client.create_sparkapplication(config_dict)
return SparkAppInfo.from_k8s_resp(resp) | [
"def",
"submit_sparkapp",
"(",
"self",
",",
"config",
":",
"SparkAppConfig",
")",
"->",
"SparkAppInfo",
":",
"sparkapp_path",
"=",
"config",
".",
"files_path",
"if",
"config",
".",
"files_path",
"is",
"None",
":",
"_",
",",
"sparkapp_path",
"=",
"self",
".",... | https://github.com/bytedance/fedlearner/blob/89f5a2341d9b3c9100c799473fe5f436da7e87a2/web_console_v2/api/fedlearner_webconsole/sparkapp/service.py#L103-L131 | |
AppScale/gts | 46f909cf5dc5ba81faf9d81dc9af598dcf8a82a9 | AppServer/lib/jinja2-2.6/jinja2/filters.py | python | do_replace | (eval_ctx, s, old, new, count=None) | return s.replace(soft_unicode(old), soft_unicode(new), count) | Return a copy of the value with all occurrences of a substring
replaced with a new one. The first argument is the substring
that should be replaced, the second is the replacement string.
If the optional third argument ``count`` is given, only the first
``count`` occurrences are replaced:
.. sourcecode:: jinja
{{ "Hello World"|replace("Hello", "Goodbye") }}
-> Goodbye World
{{ "aaaaargh"|replace("a", "d'oh, ", 2) }}
-> d'oh, d'oh, aaargh | Return a copy of the value with all occurrences of a substring
replaced with a new one. The first argument is the substring
that should be replaced, the second is the replacement string.
If the optional third argument ``count`` is given, only the first
``count`` occurrences are replaced: | [
"Return",
"a",
"copy",
"of",
"the",
"value",
"with",
"all",
"occurrences",
"of",
"a",
"substring",
"replaced",
"with",
"a",
"new",
"one",
".",
"The",
"first",
"argument",
"is",
"the",
"substring",
"that",
"should",
"be",
"replaced",
"the",
"second",
"is",
... | def do_replace(eval_ctx, s, old, new, count=None):
"""Return a copy of the value with all occurrences of a substring
replaced with a new one. The first argument is the substring
that should be replaced, the second is the replacement string.
If the optional third argument ``count`` is given, only the first
``count`` occurrences are replaced:
.. sourcecode:: jinja
{{ "Hello World"|replace("Hello", "Goodbye") }}
-> Goodbye World
{{ "aaaaargh"|replace("a", "d'oh, ", 2) }}
-> d'oh, d'oh, aaargh
"""
if count is None:
count = -1
if not eval_ctx.autoescape:
return unicode(s).replace(unicode(old), unicode(new), count)
if hasattr(old, '__html__') or hasattr(new, '__html__') and \
not hasattr(s, '__html__'):
s = escape(s)
else:
s = soft_unicode(s)
return s.replace(soft_unicode(old), soft_unicode(new), count) | [
"def",
"do_replace",
"(",
"eval_ctx",
",",
"s",
",",
"old",
",",
"new",
",",
"count",
"=",
"None",
")",
":",
"if",
"count",
"is",
"None",
":",
"count",
"=",
"-",
"1",
"if",
"not",
"eval_ctx",
".",
"autoescape",
":",
"return",
"unicode",
"(",
"s",
... | https://github.com/AppScale/gts/blob/46f909cf5dc5ba81faf9d81dc9af598dcf8a82a9/AppServer/lib/jinja2-2.6/jinja2/filters.py#L74-L98 | |
home-assistant/core | 265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1 | homeassistant/components/shelly/__init__.py | python | async_block_device_setup | (
hass: HomeAssistant, entry: ConfigEntry, device: BlockDevice
) | Set up a block based device that is online. | Set up a block based device that is online. | [
"Set",
"up",
"a",
"block",
"based",
"device",
"that",
"is",
"online",
"."
] | async def async_block_device_setup(
hass: HomeAssistant, entry: ConfigEntry, device: BlockDevice
) -> None:
"""Set up a block based device that is online."""
device_wrapper = hass.data[DOMAIN][DATA_CONFIG_ENTRY][entry.entry_id][
BLOCK
] = BlockDeviceWrapper(hass, entry, device)
device_wrapper.async_setup()
platforms = BLOCK_SLEEPING_PLATFORMS
if not entry.data.get(CONF_SLEEP_PERIOD):
hass.data[DOMAIN][DATA_CONFIG_ENTRY][entry.entry_id][
REST
] = ShellyDeviceRestWrapper(hass, device, entry)
platforms = BLOCK_PLATFORMS
hass.config_entries.async_setup_platforms(entry, platforms) | [
"async",
"def",
"async_block_device_setup",
"(",
"hass",
":",
"HomeAssistant",
",",
"entry",
":",
"ConfigEntry",
",",
"device",
":",
"BlockDevice",
")",
"->",
"None",
":",
"device_wrapper",
"=",
"hass",
".",
"data",
"[",
"DOMAIN",
"]",
"[",
"DATA_CONFIG_ENTRY"... | https://github.com/home-assistant/core/blob/265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1/homeassistant/components/shelly/__init__.py#L212-L229 | ||
taers232c/GAMADV-XTD3 | 3097d6c24b7377037c746317908fcaff8404d88a | src/gam/gdata/gauth.py | python | SecureAuthSubToken.from_url | (str_or_uri, rsa_private_key) | return SecureAuthSubToken(token_and_scopes[0], rsa_private_key,
token_and_scopes[1]) | Creates a new SecureAuthSubToken using information in the URL.
Uses auth_sub_string_from_url.
Args:
str_or_uri: The current page's URL (as a str or atom.http_core.Uri)
which should contain a token query parameter since the Google auth
server redirected the user's browser to this URL.
rsa_private_key: str the private RSA key cert used to sign all requests
made with this token. | Creates a new SecureAuthSubToken using information in the URL. | [
"Creates",
"a",
"new",
"SecureAuthSubToken",
"using",
"information",
"in",
"the",
"URL",
"."
] | def from_url(str_or_uri, rsa_private_key):
"""Creates a new SecureAuthSubToken using information in the URL.
Uses auth_sub_string_from_url.
Args:
str_or_uri: The current page's URL (as a str or atom.http_core.Uri)
which should contain a token query parameter since the Google auth
server redirected the user's browser to this URL.
rsa_private_key: str the private RSA key cert used to sign all requests
made with this token.
"""
token_and_scopes = auth_sub_string_from_url(str_or_uri)
return SecureAuthSubToken(token_and_scopes[0], rsa_private_key,
token_and_scopes[1]) | [
"def",
"from_url",
"(",
"str_or_uri",
",",
"rsa_private_key",
")",
":",
"token_and_scopes",
"=",
"auth_sub_string_from_url",
"(",
"str_or_uri",
")",
"return",
"SecureAuthSubToken",
"(",
"token_and_scopes",
"[",
"0",
"]",
",",
"rsa_private_key",
",",
"token_and_scopes"... | https://github.com/taers232c/GAMADV-XTD3/blob/3097d6c24b7377037c746317908fcaff8404d88a/src/gam/gdata/gauth.py#L463-L477 | |
lovelylain/pyctp | fd304de4b50c4ddc31a4190b1caaeb5dec66bc5d | futures2/ctp/__init__.py | python | TraderApi.Release | (self) | 删除接口对象本身
@remark 不再使用本接口对象时,调用该函数删除接口对象 | 删除接口对象本身 | [
"删除接口对象本身"
] | def Release(self):
"""删除接口对象本身
@remark 不再使用本接口对象时,调用该函数删除接口对象
""" | [
"def",
"Release",
"(",
"self",
")",
":"
] | https://github.com/lovelylain/pyctp/blob/fd304de4b50c4ddc31a4190b1caaeb5dec66bc5d/futures2/ctp/__init__.py#L168-L171 | ||
oilshell/oil | 94388e7d44a9ad879b12615f6203b38596b5a2d3 | Python-2.7.13/Lib/idlelib/run.py | python | manage_socket | (address) | [] | def manage_socket(address):
for i in range(3):
time.sleep(i)
try:
server = MyRPCServer(address, MyHandler)
break
except socket.error as err:
print>>sys.__stderr__,"IDLE Subprocess: socket error: "\
+ err.args[1] + ", retrying...."
else:
print>>sys.__stderr__, "IDLE Subprocess: Connection to "\
"IDLE GUI failed, exiting."
show_socket_error(err, address)
global exit_now
exit_now = True
return
server.handle_request() | [
"def",
"manage_socket",
"(",
"address",
")",
":",
"for",
"i",
"in",
"range",
"(",
"3",
")",
":",
"time",
".",
"sleep",
"(",
"i",
")",
"try",
":",
"server",
"=",
"MyRPCServer",
"(",
"address",
",",
"MyHandler",
")",
"break",
"except",
"socket",
".",
... | https://github.com/oilshell/oil/blob/94388e7d44a9ad879b12615f6203b38596b5a2d3/Python-2.7.13/Lib/idlelib/run.py#L136-L152 | ||||
caktux/pytrader | b45b216dab3db78d6028d85e9a6f80419c22cea0 | api.py | python | Api._on_op_result | (self, msg) | handle result of authenticated API call (op:result, id:xxxxxx) | handle result of authenticated API call (op:result, id:xxxxxx) | [
"handle",
"result",
"of",
"authenticated",
"API",
"call",
"(",
"op",
":",
"result",
"id",
":",
"xxxxxx",
")"
] | def _on_op_result(self, msg):
"""handle result of authenticated API call (op:result, id:xxxxxx)"""
result = msg["result"]
reqid = msg["id"]
# if reqid == "idkey":
# self.debug("### got key, subscribing to account messages")
# self._idkey = result
# self.client.on_idkey_received(result)
# self.ready_idkey = True
# self.check_connect_ready()
if reqid == "orders":
# self.debug("### got own order list")
# self.count_submitted = 0
self.orderbook.init_own(result)
# self.debug("### have %d own orders for %s/%s" % (len(self.orderbook.owns), self.curr_base, self.curr_quote))
elif reqid == "info":
# self.debug("### got account info")
self.wallet = {}
for currency in result:
self.wallet[currency] = float(result[currency])
# ## Old Gox shit
# wallet = result["Wallets"]
# self.monthly_volume = int(result["Monthly_Volume"]["value_int"])
# self.trade_fee = float(result["Trade_Fee"])
# for currency in wallet:
# self.wallet[currency] = int(
# wallet[currency]["Balance"]["value_int"])
self.signal_wallet(self, None)
self.ready_info = True
if self.client._wait_for_next_info:
self.client._wait_for_next_info = False
self.check_connect_ready()
elif reqid == "volume":
self.monthly_volume = result['volume']
self.currency = result['currency']
self.trade_fee = result['fee']
elif reqid == "order_lag":
lag_usec = result["lag"]
lag_text = result["lag_text"]
# self.debug("### got order lag: %s" % lag_text)
self.order_lag = lag_usec
self.signal_orderlag(self, (lag_usec, lag_text))
elif "order_add:" in reqid:
# order/add has been acked and we got an oid, now we can already
# insert a pending order into the owns list (it will be pending
# for a while when the server is busy but the most important thing
# is that we have the order-id already).
parts = reqid.split(":")
typ = parts[1]
price = float(parts[2])
volume = float(parts[3])
oid = result
self.debug("### got ack for order/add:", typ, price, volume, oid)
self.count_submitted -= 1
self.orderbook.add_own(Order(price, volume, typ, oid, "pending"))
elif "order_cancel:" in reqid:
# cancel request has been acked but we won't remove it from our
# own list now because it is still active on the server.
# do nothing now, let things happen in the user_order message
parts = reqid.split(":")
oid = parts[1]
self.debug("### got ack for order/cancel:", oid)
else:
self.debug("### _on_op_result() ignoring:", msg) | [
"def",
"_on_op_result",
"(",
"self",
",",
"msg",
")",
":",
"result",
"=",
"msg",
"[",
"\"result\"",
"]",
"reqid",
"=",
"msg",
"[",
"\"id\"",
"]",
"# if reqid == \"idkey\":",
"# self.debug(\"### got key, subscribing to account messages\")",
"# self._idkey = result"... | https://github.com/caktux/pytrader/blob/b45b216dab3db78d6028d85e9a6f80419c22cea0/api.py#L951-L1026 | ||
bjmayor/hacker | e3ce2ad74839c2733b27dac6c0f495e0743e1866 | venv/lib/python3.5/site-packages/pip/utils/ui.py | python | InterruptibleMixin.finish | (self) | Restore the original SIGINT handler after finishing.
This should happen regardless of whether the progress display finishes
normally, or gets interrupted. | Restore the original SIGINT handler after finishing. | [
"Restore",
"the",
"original",
"SIGINT",
"handler",
"after",
"finishing",
"."
] | def finish(self):
"""
Restore the original SIGINT handler after finishing.
This should happen regardless of whether the progress display finishes
normally, or gets interrupted.
"""
super(InterruptibleMixin, self).finish()
signal(SIGINT, self.original_handler) | [
"def",
"finish",
"(",
"self",
")",
":",
"super",
"(",
"InterruptibleMixin",
",",
"self",
")",
".",
"finish",
"(",
")",
"signal",
"(",
"SIGINT",
",",
"self",
".",
"original_handler",
")"
] | https://github.com/bjmayor/hacker/blob/e3ce2ad74839c2733b27dac6c0f495e0743e1866/venv/lib/python3.5/site-packages/pip/utils/ui.py#L94-L102 | ||
openedx/ecommerce | db6c774e239e5aa65e5a6151995073d364e8c896 | ecommerce/extensions/refund/signals.py | python | track_completed_refund | (sender, refund=None, **kwargs) | Emit a tracking event when a refund is completed. | Emit a tracking event when a refund is completed. | [
"Emit",
"a",
"tracking",
"event",
"when",
"a",
"refund",
"is",
"completed",
"."
] | def track_completed_refund(sender, refund=None, **kwargs): # pylint: disable=unused-argument
"""Emit a tracking event when a refund is completed."""
if refund.total_credit_excl_tax <= 0:
return
properties = {
'orderId': refund.order.number,
'products': [
{
'id': line.order_line.partner_sku,
'quantity': line.quantity,
} for line in refund.lines.all()
],
'total': refund.total_credit_excl_tax,
}
# The initial version of the refund email only supports refunding a single course.
first_product = refund.lines.first().order_line.product
product_class = first_product.get_product_class().name
if product_class == SEAT_PRODUCT_CLASS_NAME:
title = first_product.course.name
else:
title = first_product.title
properties['title'] = title
track_segment_event(refund.order.site, refund.user, 'Order Refunded', properties) | [
"def",
"track_completed_refund",
"(",
"sender",
",",
"refund",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"# pylint: disable=unused-argument",
"if",
"refund",
".",
"total_credit_excl_tax",
"<=",
"0",
":",
"return",
"properties",
"=",
"{",
"'orderId'",
":",
... | https://github.com/openedx/ecommerce/blob/db6c774e239e5aa65e5a6151995073d364e8c896/ecommerce/extensions/refund/signals.py#L14-L38 | ||
kanzure/nanoengineer | 874e4c9f8a9190f093625b267f9767e19f82e6c4 | cad/src/ne1_ui/WhatsThisText_for_CommandToolbars.py | python | whatsThisTextForCommandToolbarBuildButton | (button) | return | "What's This" text for the Build button (menu). | "What's This" text for the Build button (menu). | [
"What",
"s",
"This",
"text",
"for",
"the",
"Build",
"button",
"(",
"menu",
")",
"."
] | def whatsThisTextForCommandToolbarBuildButton(button):
"""
"What's This" text for the Build button (menu).
"""
button.setWhatsThis(
"""<b>Build</b>
<p>
<img source=\"ui/actions/Command Toolbar/ControlArea/Build.png\"><br>
The <b>Build Command Set</b> for modeling and editing structures
interactively.
</p>""")
return | [
"def",
"whatsThisTextForCommandToolbarBuildButton",
"(",
"button",
")",
":",
"button",
".",
"setWhatsThis",
"(",
"\"\"\"<b>Build</b>\n <p>\n <img source=\\\"ui/actions/Command Toolbar/ControlArea/Build.png\\\"><br>\n The <b>Build Command Set</b> for modeling and editing stru... | https://github.com/kanzure/nanoengineer/blob/874e4c9f8a9190f093625b267f9767e19f82e6c4/cad/src/ne1_ui/WhatsThisText_for_CommandToolbars.py#L19-L30 | |
googleads/google-ads-python | 2a1d6062221f6aad1992a6bcca0e7e4a93d2db86 | google/ads/googleads/v8/services/services/ad_group_ad_service/transports/grpc.py | python | AdGroupAdServiceGrpcTransport.mutate_ad_group_ads | (
self,
) | return self._stubs["mutate_ad_group_ads"] | r"""Return a callable for the mutate ad group ads method over gRPC.
Creates, updates, or removes ads. Operation statuses are
returned.
List of thrown errors: `AdCustomizerError <>`__ `AdError <>`__
`AdGroupAdError <>`__ `AdSharingError <>`__ `AdxError <>`__
`AssetError <>`__ `AssetLinkError <>`__
`AuthenticationError <>`__ `AuthorizationError <>`__
`CollectionSizeError <>`__ `ContextError <>`__
`DatabaseError <>`__ `DateError <>`__ `DistinctError <>`__
`FeedAttributeReferenceError <>`__ `FieldError <>`__
`FieldMaskError <>`__ `FunctionError <>`__
`FunctionParsingError <>`__ `HeaderError <>`__ `IdError <>`__
`ImageError <>`__ `InternalError <>`__ `ListOperationError <>`__
`MediaBundleError <>`__ `MediaFileError <>`__ `MutateError <>`__
`NewResourceCreationError <>`__ `NotEmptyError <>`__
`NullError <>`__ `OperationAccessDeniedError <>`__
`OperatorError <>`__ `PolicyFindingError <>`__
`PolicyValidationParameterError <>`__
`PolicyViolationError <>`__ `QuotaError <>`__ `RangeError <>`__
`RequestError <>`__ `ResourceCountLimitExceededError <>`__
`SizeLimitError <>`__ `StringFormatError <>`__
`StringLengthError <>`__ `UrlFieldError <>`__
Returns:
Callable[[~.MutateAdGroupAdsRequest],
~.MutateAdGroupAdsResponse]:
A function that, when called, will call the underlying RPC
on the server. | r"""Return a callable for the mutate ad group ads method over gRPC. | [
"r",
"Return",
"a",
"callable",
"for",
"the",
"mutate",
"ad",
"group",
"ads",
"method",
"over",
"gRPC",
"."
] | def mutate_ad_group_ads(
self,
) -> Callable[
[ad_group_ad_service.MutateAdGroupAdsRequest],
ad_group_ad_service.MutateAdGroupAdsResponse,
]:
r"""Return a callable for the mutate ad group ads method over gRPC.
Creates, updates, or removes ads. Operation statuses are
returned.
List of thrown errors: `AdCustomizerError <>`__ `AdError <>`__
`AdGroupAdError <>`__ `AdSharingError <>`__ `AdxError <>`__
`AssetError <>`__ `AssetLinkError <>`__
`AuthenticationError <>`__ `AuthorizationError <>`__
`CollectionSizeError <>`__ `ContextError <>`__
`DatabaseError <>`__ `DateError <>`__ `DistinctError <>`__
`FeedAttributeReferenceError <>`__ `FieldError <>`__
`FieldMaskError <>`__ `FunctionError <>`__
`FunctionParsingError <>`__ `HeaderError <>`__ `IdError <>`__
`ImageError <>`__ `InternalError <>`__ `ListOperationError <>`__
`MediaBundleError <>`__ `MediaFileError <>`__ `MutateError <>`__
`NewResourceCreationError <>`__ `NotEmptyError <>`__
`NullError <>`__ `OperationAccessDeniedError <>`__
`OperatorError <>`__ `PolicyFindingError <>`__
`PolicyValidationParameterError <>`__
`PolicyViolationError <>`__ `QuotaError <>`__ `RangeError <>`__
`RequestError <>`__ `ResourceCountLimitExceededError <>`__
`SizeLimitError <>`__ `StringFormatError <>`__
`StringLengthError <>`__ `UrlFieldError <>`__
Returns:
Callable[[~.MutateAdGroupAdsRequest],
~.MutateAdGroupAdsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "mutate_ad_group_ads" not in self._stubs:
self._stubs["mutate_ad_group_ads"] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v8.services.AdGroupAdService/MutateAdGroupAds",
request_serializer=ad_group_ad_service.MutateAdGroupAdsRequest.serialize,
response_deserializer=ad_group_ad_service.MutateAdGroupAdsResponse.deserialize,
)
return self._stubs["mutate_ad_group_ads"] | [
"def",
"mutate_ad_group_ads",
"(",
"self",
",",
")",
"->",
"Callable",
"[",
"[",
"ad_group_ad_service",
".",
"MutateAdGroupAdsRequest",
"]",
",",
"ad_group_ad_service",
".",
"MutateAdGroupAdsResponse",
",",
"]",
":",
"# Generate a \"stub function\" on-the-fly which will act... | https://github.com/googleads/google-ads-python/blob/2a1d6062221f6aad1992a6bcca0e7e4a93d2db86/google/ads/googleads/v8/services/services/ad_group_ad_service/transports/grpc.py#L245-L292 | |
rwth-i6/returnn | f2d718a197a280b0d5f0fd91a7fcb8658560dddb | returnn/__setup__.py | python | get_version_str | (verbose=False, verbose_error=False, fallback=None, long=False) | :param bool verbose: print exactly how we end up with some version
:param bool verbose_error: print only any potential errors
:param str|None fallback:
:param bool long:
False: Always distutils.version.StrictVersion compatible. just like "1.20190202.154527".
True: Will also add the revision string, like "1.20180724.141845+git.7865d01".
The format might change in the future.
We will keep it `SemVer <https://semver.org/>`__ compatible.
I.e. the string before the `"+"` will be the short version.
We always make sure that there is a `"+"` in the string.
:rtype: str | :param bool verbose: print exactly how we end up with some version
:param bool verbose_error: print only any potential errors
:param str|None fallback:
:param bool long:
False: Always distutils.version.StrictVersion compatible. just like "1.20190202.154527".
True: Will also add the revision string, like "1.20180724.141845+git.7865d01".
The format might change in the future.
We will keep it `SemVer <https://semver.org/>`__ compatible.
I.e. the string before the `"+"` will be the short version.
We always make sure that there is a `"+"` in the string.
:rtype: str | [
":",
"param",
"bool",
"verbose",
":",
"print",
"exactly",
"how",
"we",
"end",
"up",
"with",
"some",
"version",
":",
"param",
"bool",
"verbose_error",
":",
"print",
"only",
"any",
"potential",
"errors",
":",
"param",
"str|None",
"fallback",
":",
":",
"param... | def get_version_str(verbose=False, verbose_error=False, fallback=None, long=False):
"""
:param bool verbose: print exactly how we end up with some version
:param bool verbose_error: print only any potential errors
:param str|None fallback:
:param bool long:
False: Always distutils.version.StrictVersion compatible. just like "1.20190202.154527".
True: Will also add the revision string, like "1.20180724.141845+git.7865d01".
The format might change in the future.
We will keep it `SemVer <https://semver.org/>`__ compatible.
I.e. the string before the `"+"` will be the short version.
We always make sure that there is a `"+"` in the string.
:rtype: str
"""
# Earlier we checked PKG-INFO, via parse_pkg_info. Both in the root-dir and in my-dir.
# Now we should always have _setup_info_generated.py, copied by our own setup.
# Do not use PKG-INFO at all anymore (for now), as it would only have the short version.
# Only check _setup_info_generated in the current dir, not in the root-dir,
# because we want to only use it if this was installed via a package.
# Otherwise, we want the current Git version.
if os.path.exists("%s/_setup_info_generated.py" % _my_dir):
# noinspection PyUnresolvedReferences
from . import _setup_info_generated as info
if verbose:
print("Found _setup_info_generated.py, long version %r, version %r." % (info.long_version, info.version))
if long:
assert "+" in info.long_version
return info.long_version
return info.version
info_in_root_filename = "%s/_setup_info_generated.py" % _root_dir
if os.path.exists(info_in_root_filename):
# The root dir might not be in sys.path, so just load directly.
code = compile(open(info_in_root_filename).read(), info_in_root_filename, "exec")
info = {}
eval(code, info)
version = info["version"]
long_version = info["long_version"]
if verbose:
print(
"Found %r in root, long version %r, version %r." % (info_in_root_filename, long_version, version))
if long:
assert "+" in long_version
return long_version
return version
if os.path.exists("%s/.git" % _root_dir):
try:
version = git_head_version(git_dir=_root_dir, long=long)
if verbose:
print("Version via Git:", version)
if long:
assert "+" in version
return version
except Exception as exc:
if verbose or verbose_error:
print("Exception while getting Git version:", exc)
sys.excepthook(*sys.exc_info())
if not fallback:
raise # no fallback
if fallback:
if verbose:
print("Version via fallback:", fallback)
if long:
assert "+" in fallback
return fallback
raise Exception("Cannot get RETURNN version.") | [
"def",
"get_version_str",
"(",
"verbose",
"=",
"False",
",",
"verbose_error",
"=",
"False",
",",
"fallback",
"=",
"None",
",",
"long",
"=",
"False",
")",
":",
"# Earlier we checked PKG-INFO, via parse_pkg_info. Both in the root-dir and in my-dir.",
"# Now we should always h... | https://github.com/rwth-i6/returnn/blob/f2d718a197a280b0d5f0fd91a7fcb8658560dddb/returnn/__setup__.py#L65-L132 | ||
sagemath/sage | f9b2db94f675ff16963ccdefba4f1a3393b3fe0d | src/sage/groups/libgap_mixin.py | python | GroupMixinLibGAP.is_solvable | (self) | return self.gap().IsSolvableGroup().sage() | r"""
Return whether this group is solvable.
EXAMPLES::
sage: from sage.groups.libgap_group import GroupLibGAP
sage: GroupLibGAP(libgap.SymmetricGroup(4)).is_solvable()
True
sage: GroupLibGAP(libgap.SymmetricGroup(5)).is_solvable()
False | r"""
Return whether this group is solvable. | [
"r",
"Return",
"whether",
"this",
"group",
"is",
"solvable",
"."
] | def is_solvable(self):
r"""
Return whether this group is solvable.
EXAMPLES::
sage: from sage.groups.libgap_group import GroupLibGAP
sage: GroupLibGAP(libgap.SymmetricGroup(4)).is_solvable()
True
sage: GroupLibGAP(libgap.SymmetricGroup(5)).is_solvable()
False
"""
return self.gap().IsSolvableGroup().sage() | [
"def",
"is_solvable",
"(",
"self",
")",
":",
"return",
"self",
".",
"gap",
"(",
")",
".",
"IsSolvableGroup",
"(",
")",
".",
"sage",
"(",
")"
] | https://github.com/sagemath/sage/blob/f9b2db94f675ff16963ccdefba4f1a3393b3fe0d/src/sage/groups/libgap_mixin.py#L91-L103 | |
cmsflash/beauty-net | 668210a95ccb4462d7beff10505e4e83532682f2 | beauty/utils/meters.py | python | MeterBundle.__init__ | (self, meters) | [] | def __init__(self, meters):
self.meters = {meter.label: meter for meter in meters} | [
"def",
"__init__",
"(",
"self",
",",
"meters",
")",
":",
"self",
".",
"meters",
"=",
"{",
"meter",
".",
"label",
":",
"meter",
"for",
"meter",
"in",
"meters",
"}"
] | https://github.com/cmsflash/beauty-net/blob/668210a95ccb4462d7beff10505e4e83532682f2/beauty/utils/meters.py#L57-L58 | ||||
ewrfcas/bert_cn_finetune | ec3ccedae5a88f557fe6a407e61af403ac39d9d7 | models/file_utils.py | python | filename_to_url | (filename: str, cache_dir: Union[str, Path] = None) | return url, etag | Return the url and etag (which may be ``None``) stored for `filename`.
Raise ``FileNotFoundError`` if `filename` or its stored metadata do not exist. | Return the url and etag (which may be ``None``) stored for `filename`.
Raise ``FileNotFoundError`` if `filename` or its stored metadata do not exist. | [
"Return",
"the",
"url",
"and",
"etag",
"(",
"which",
"may",
"be",
"None",
")",
"stored",
"for",
"filename",
".",
"Raise",
"FileNotFoundError",
"if",
"filename",
"or",
"its",
"stored",
"metadata",
"do",
"not",
"exist",
"."
] | def filename_to_url(filename: str, cache_dir: Union[str, Path] = None) -> Tuple[str, str]:
"""
Return the url and etag (which may be ``None``) stored for `filename`.
Raise ``FileNotFoundError`` if `filename` or its stored metadata do not exist.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
raise FileNotFoundError("file {} not found".format(cache_path))
meta_path = cache_path + '.json'
if not os.path.exists(meta_path):
raise FileNotFoundError("file {} not found".format(meta_path))
with open(meta_path) as meta_file:
metadata = json.load(meta_file)
url = metadata['url']
etag = metadata['etag']
return url, etag | [
"def",
"filename_to_url",
"(",
"filename",
":",
"str",
",",
"cache_dir",
":",
"Union",
"[",
"str",
",",
"Path",
"]",
"=",
"None",
")",
"->",
"Tuple",
"[",
"str",
",",
"str",
"]",
":",
"if",
"cache_dir",
"is",
"None",
":",
"cache_dir",
"=",
"PYTORCH_P... | https://github.com/ewrfcas/bert_cn_finetune/blob/ec3ccedae5a88f557fe6a407e61af403ac39d9d7/models/file_utils.py#L48-L71 | |
TencentCloud/tencentcloud-sdk-python | 3677fd1cdc8c5fd626ce001c13fd3b59d1f279d2 | tencentcloud/billing/v20180709/models.py | python | DescribeCostSummaryByProductRequest.__init__ | (self) | r"""
:param BeginTime: 目前必须和EndTime相同月份,不支持跨月查询,且查询结果是整月数据,例如 BeginTime为2018-09,EndTime 为 2018-09,查询结果是 2018 年 9 月数据。
:type BeginTime: str
:param EndTime: 目前必须和BeginTime为相同月份,不支持跨月查询,且查询结果是整月数据,例如 BeginTime为2018-09,EndTime 为 2018-09,查询结果是 2018 年 9 月数据。
:type EndTime: str
:param Limit: 每次获取数据量
:type Limit: int
:param Offset: 偏移量
:type Offset: int
:param PayerUin: 查询账单数据的用户UIN
:type PayerUin: str
:param NeedRecordNum: 是否需要返回记录数量,0不需要,1需要,默认不需要
:type NeedRecordNum: int | r"""
:param BeginTime: 目前必须和EndTime相同月份,不支持跨月查询,且查询结果是整月数据,例如 BeginTime为2018-09,EndTime 为 2018-09,查询结果是 2018 年 9 月数据。
:type BeginTime: str
:param EndTime: 目前必须和BeginTime为相同月份,不支持跨月查询,且查询结果是整月数据,例如 BeginTime为2018-09,EndTime 为 2018-09,查询结果是 2018 年 9 月数据。
:type EndTime: str
:param Limit: 每次获取数据量
:type Limit: int
:param Offset: 偏移量
:type Offset: int
:param PayerUin: 查询账单数据的用户UIN
:type PayerUin: str
:param NeedRecordNum: 是否需要返回记录数量,0不需要,1需要,默认不需要
:type NeedRecordNum: int | [
"r",
":",
"param",
"BeginTime",
":",
"目前必须和EndTime相同月份,不支持跨月查询,且查询结果是整月数据,例如",
"BeginTime为2018",
"-",
"09,EndTime",
"为",
"2018",
"-",
"09,查询结果是",
"2018",
"年",
"9",
"月数据。",
":",
"type",
"BeginTime",
":",
"str",
":",
"param",
"EndTime",
":",
"目前必须和BeginTime为相同月份,不支持... | def __init__(self):
r"""
:param BeginTime: 目前必须和EndTime相同月份,不支持跨月查询,且查询结果是整月数据,例如 BeginTime为2018-09,EndTime 为 2018-09,查询结果是 2018 年 9 月数据。
:type BeginTime: str
:param EndTime: 目前必须和BeginTime为相同月份,不支持跨月查询,且查询结果是整月数据,例如 BeginTime为2018-09,EndTime 为 2018-09,查询结果是 2018 年 9 月数据。
:type EndTime: str
:param Limit: 每次获取数据量
:type Limit: int
:param Offset: 偏移量
:type Offset: int
:param PayerUin: 查询账单数据的用户UIN
:type PayerUin: str
:param NeedRecordNum: 是否需要返回记录数量,0不需要,1需要,默认不需要
:type NeedRecordNum: int
"""
self.BeginTime = None
self.EndTime = None
self.Limit = None
self.Offset = None
self.PayerUin = None
self.NeedRecordNum = None | [
"def",
"__init__",
"(",
"self",
")",
":",
"self",
".",
"BeginTime",
"=",
"None",
"self",
".",
"EndTime",
"=",
"None",
"self",
".",
"Limit",
"=",
"None",
"self",
".",
"Offset",
"=",
"None",
"self",
".",
"PayerUin",
"=",
"None",
"self",
".",
"NeedRecor... | https://github.com/TencentCloud/tencentcloud-sdk-python/blob/3677fd1cdc8c5fd626ce001c13fd3b59d1f279d2/tencentcloud/billing/v20180709/models.py#L2408-L2428 | ||
bystep15/google-diff-match-patch | ba831450a67165840fb0042889a45f83f48f682c | python3/diff_match_patch.py | python | diff_match_patch.patch_toText | (self, patches) | return "".join(text) | Take a list of patches and return a textual representation.
Args:
patches: Array of Patch objects.
Returns:
Text representation of patches. | Take a list of patches and return a textual representation. | [
"Take",
"a",
"list",
"of",
"patches",
"and",
"return",
"a",
"textual",
"representation",
"."
] | def patch_toText(self, patches):
"""Take a list of patches and return a textual representation.
Args:
patches: Array of Patch objects.
Returns:
Text representation of patches.
"""
text = []
for patch in patches:
text.append(str(patch))
return "".join(text) | [
"def",
"patch_toText",
"(",
"self",
",",
"patches",
")",
":",
"text",
"=",
"[",
"]",
"for",
"patch",
"in",
"patches",
":",
"text",
".",
"append",
"(",
"str",
"(",
"patch",
")",
")",
"return",
"\"\"",
".",
"join",
"(",
"text",
")"
] | https://github.com/bystep15/google-diff-match-patch/blob/ba831450a67165840fb0042889a45f83f48f682c/python3/diff_match_patch.py#L1775-L1787 | |
clinton-hall/nzbToMedia | 27669389216902d1085660167e7bda0bd8527ecf | libs/common/oauthlib/oauth2/rfc6749/parameters.py | python | parse_authorization_code_response | (uri, state=None) | return params | Parse authorization grant response URI into a dict.
If the resource owner grants the access request, the authorization
server issues an authorization code and delivers it to the client by
adding the following parameters to the query component of the
redirection URI using the ``application/x-www-form-urlencoded`` format:
**code**
REQUIRED. The authorization code generated by the
authorization server. The authorization code MUST expire
shortly after it is issued to mitigate the risk of leaks. A
maximum authorization code lifetime of 10 minutes is
RECOMMENDED. The client MUST NOT use the authorization code
more than once. If an authorization code is used more than
once, the authorization server MUST deny the request and SHOULD
revoke (when possible) all tokens previously issued based on
that authorization code. The authorization code is bound to
the client identifier and redirection URI.
**state**
REQUIRED if the "state" parameter was present in the client
authorization request. The exact value received from the
client.
:param uri: The full redirect URL back to the client.
:param state: The state parameter from the authorization request.
For example, the authorization server redirects the user-agent by
sending the following HTTP response:
.. code-block:: http
HTTP/1.1 302 Found
Location: https://client.example.com/cb?code=SplxlOBeZQQYbYS6WxSbIA
&state=xyz | Parse authorization grant response URI into a dict. | [
"Parse",
"authorization",
"grant",
"response",
"URI",
"into",
"a",
"dict",
"."
] | def parse_authorization_code_response(uri, state=None):
"""Parse authorization grant response URI into a dict.
If the resource owner grants the access request, the authorization
server issues an authorization code and delivers it to the client by
adding the following parameters to the query component of the
redirection URI using the ``application/x-www-form-urlencoded`` format:
**code**
REQUIRED. The authorization code generated by the
authorization server. The authorization code MUST expire
shortly after it is issued to mitigate the risk of leaks. A
maximum authorization code lifetime of 10 minutes is
RECOMMENDED. The client MUST NOT use the authorization code
more than once. If an authorization code is used more than
once, the authorization server MUST deny the request and SHOULD
revoke (when possible) all tokens previously issued based on
that authorization code. The authorization code is bound to
the client identifier and redirection URI.
**state**
REQUIRED if the "state" parameter was present in the client
authorization request. The exact value received from the
client.
:param uri: The full redirect URL back to the client.
:param state: The state parameter from the authorization request.
For example, the authorization server redirects the user-agent by
sending the following HTTP response:
.. code-block:: http
HTTP/1.1 302 Found
Location: https://client.example.com/cb?code=SplxlOBeZQQYbYS6WxSbIA
&state=xyz
"""
if not is_secure_transport(uri):
raise InsecureTransportError()
query = urlparse.urlparse(uri).query
params = dict(urlparse.parse_qsl(query))
if state and params.get('state', None) != state:
raise MismatchingStateError()
if 'error' in params:
raise_from_error(params.get('error'), params)
if not 'code' in params:
raise MissingCodeError("Missing code parameter in response.")
return params | [
"def",
"parse_authorization_code_response",
"(",
"uri",
",",
"state",
"=",
"None",
")",
":",
"if",
"not",
"is_secure_transport",
"(",
"uri",
")",
":",
"raise",
"InsecureTransportError",
"(",
")",
"query",
"=",
"urlparse",
".",
"urlparse",
"(",
"uri",
")",
".... | https://github.com/clinton-hall/nzbToMedia/blob/27669389216902d1085660167e7bda0bd8527ecf/libs/common/oauthlib/oauth2/rfc6749/parameters.py#L223-L276 | |
mayank93/Twitter-Sentiment-Analysis | f095c6ca6bf69787582b5dabb140fefaf278eb37 | front-end/web2py/site-packages/python-ldap-2.4.3/build/lib.linux-x86_64-2.7/ldap/controls/__init__.py | python | ResponseControl.decodeControlValue | (self,encodedControlValue) | decodes the BER-encoded ASN.1 control value and sets the appropriate
class attributes | decodes the BER-encoded ASN.1 control value and sets the appropriate
class attributes | [
"decodes",
"the",
"BER",
"-",
"encoded",
"ASN",
".",
"1",
"control",
"value",
"and",
"sets",
"the",
"appropriate",
"class",
"attributes"
] | def decodeControlValue(self,encodedControlValue):
"""
decodes the BER-encoded ASN.1 control value and sets the appropriate
class attributes
"""
self.encodedControlValue = encodedControlValue | [
"def",
"decodeControlValue",
"(",
"self",
",",
"encodedControlValue",
")",
":",
"self",
".",
"encodedControlValue",
"=",
"encodedControlValue"
] | https://github.com/mayank93/Twitter-Sentiment-Analysis/blob/f095c6ca6bf69787582b5dabb140fefaf278eb37/front-end/web2py/site-packages/python-ldap-2.4.3/build/lib.linux-x86_64-2.7/ldap/controls/__init__.py#L80-L85 | ||
ctxis/beemka | 754ab3178b33b107f626b1e23d1dbba1dc455202 | classes/asar.py | python | Asar.from_path | (cls, path) | return cls(
path=path,
fp=fp,
header=header,
base_offset=round_up(16 + header_string_size, 4)
) | Creates an asar file using the given ``path``.
When this is used, the ``fp`` attribute of the returned instance
will be a :class:`io.BytesIO` object, so it's not written to a file.
You have to do something like:
.. code-block:: python
with Asar.from_path('./something_dir') as a:
with open('./something.asar', 'wb') as f:
a.fp.seek(0) # just making sure we're at the start of the file
f.write(a.fp.read())
You cannot exclude files/folders from being packed yet.
Parameters
----------
path : str
Path to walk into, recursively, and pack
into an asar file. | Creates an asar file using the given ``path``. | [
"Creates",
"an",
"asar",
"file",
"using",
"the",
"given",
"path",
"."
] | def from_path(cls, path):
"""Creates an asar file using the given ``path``.
When this is used, the ``fp`` attribute of the returned instance
will be a :class:`io.BytesIO` object, so it's not written to a file.
You have to do something like:
.. code-block:: python
with Asar.from_path('./something_dir') as a:
with open('./something.asar', 'wb') as f:
a.fp.seek(0) # just making sure we're at the start of the file
f.write(a.fp.read())
You cannot exclude files/folders from being packed yet.
Parameters
----------
path : str
Path to walk into, recursively, and pack
into an asar file.
"""
offset = 0
concatenated_files = b''
def _path_to_dict(path):
nonlocal concatenated_files, offset
result = {'files': {}}
for f in os.scandir(path):
if os.path.isdir(f.path):
result['files'][f.name] = _path_to_dict(f.path)
elif f.is_symlink():
result['files'][f.name] = {
'link': os.path.realpath(f.name)
}
else:
size = f.stat().st_size
result['files'][f.name] = {
'size': size,
'offset': str(offset)
}
with open(f.path, 'rb') as fp:
concatenated_files += fp.read()
offset += size
return result
header = _path_to_dict(path)
header_json = json.dumps(header, sort_keys=True, separators=(',', ':')).encode('utf-8')
# TODO: using known constants here for now (laziness)...
# we likely need to calc these, but as far as discord goes we haven't needed it.
header_string_size = len(header_json)
data_size = 4 # uint32 size
aligned_size = round_up(header_string_size, data_size)
header_size = aligned_size + 8
header_object_size = aligned_size + data_size
# pad remaining space with NULLs
diff = aligned_size - header_string_size
header_json = header_json + b'\0' * (diff) if diff else header_json
fp = io.BytesIO()
fp.write(struct.pack('<4I', data_size, header_size, header_object_size, header_string_size))
fp.write(header_json)
fp.write(concatenated_files)
return cls(
path=path,
fp=fp,
header=header,
base_offset=round_up(16 + header_string_size, 4)
) | [
"def",
"from_path",
"(",
"cls",
",",
"path",
")",
":",
"offset",
"=",
"0",
"concatenated_files",
"=",
"b''",
"def",
"_path_to_dict",
"(",
"path",
")",
":",
"nonlocal",
"concatenated_files",
",",
"offset",
"result",
"=",
"{",
"'files'",
":",
"{",
"}",
"}"... | https://github.com/ctxis/beemka/blob/754ab3178b33b107f626b1e23d1dbba1dc455202/classes/asar.py#L81-L157 | |
DataDog/integrations-core | 934674b29d94b70ccc008f76ea172d0cdae05e1e | clickhouse/datadog_checks/clickhouse/config_models/defaults.py | python | shared_global_custom_queries | (field, value) | return get_default_field_value(field, value) | [] | def shared_global_custom_queries(field, value):
return get_default_field_value(field, value) | [
"def",
"shared_global_custom_queries",
"(",
"field",
",",
"value",
")",
":",
"return",
"get_default_field_value",
"(",
"field",
",",
"value",
")"
] | https://github.com/DataDog/integrations-core/blob/934674b29d94b70ccc008f76ea172d0cdae05e1e/clickhouse/datadog_checks/clickhouse/config_models/defaults.py#L13-L14 | |||
git-cola/git-cola | b48b8028e0c3baf47faf7b074b9773737358163d | cola/widgets/diff.py | python | DiffLineNumbers.paintEvent | (self, event) | Paint the line number | Paint the line number | [
"Paint",
"the",
"line",
"number"
] | def paintEvent(self, event):
"""Paint the line number"""
if not self.lines:
return
painter = QtGui.QPainter(self)
painter.fillRect(event.rect(), self._base)
editor = self.editor
content_offset = editor.contentOffset()
block = editor.firstVisibleBlock()
width = self.width()
event_rect_bottom = event.rect().bottom()
highlight = self._highlight
highlight.setAlphaF(0.3)
highlight_text = self._highlight_text
disabled = self._disabled
fmt = self.formatter
lines = self.lines
num_lines = len(self.lines)
painter.setPen(disabled)
text = ''
while block.isValid():
block_number = block.blockNumber()
if block_number >= num_lines:
break
block_geom = editor.blockBoundingGeometry(block)
block_top = block_geom.translated(content_offset).top()
if not block.isVisible() or block_top >= event_rect_bottom:
break
rect = block_geom.translated(content_offset).toRect()
if block_number == self.highlight_line:
painter.fillRect(rect.x(), rect.y(), width, rect.height(), highlight)
painter.setPen(highlight_text)
else:
painter.setPen(disabled)
line = lines[block_number]
if len(line) == 2:
a, b = line
text = fmt.value(a, b)
elif len(line) == 3:
old, base, new = line
text = fmt.merge_value(old, base, new)
painter.drawText(
rect.x(),
rect.y(),
self.width() - (defs.margin * 2),
rect.height(),
Qt.AlignRight | Qt.AlignVCenter,
text,
)
block = block.next() | [
"def",
"paintEvent",
"(",
"self",
",",
"event",
")",
":",
"if",
"not",
"self",
".",
"lines",
":",
"return",
"painter",
"=",
"QtGui",
".",
"QPainter",
"(",
"self",
")",
"painter",
".",
"fillRect",
"(",
"event",
".",
"rect",
"(",
")",
",",
"self",
".... | https://github.com/git-cola/git-cola/blob/b48b8028e0c3baf47faf7b074b9773737358163d/cola/widgets/diff.py#L297-L355 | ||
akanimax/Variational_Discriminator_Bottleneck | 26a39ddbf9ee2213dbc1b60894a9092b1a5d3710 | source/vdb/Losses.py | python | WGAN_GP.__gradient_penalty | (self, real_samps, fake_samps, reg_lambda=10) | return penalty | private helper for calculating the gradient penalty
:param real_samps: real samples
:param fake_samps: fake samples
:param reg_lambda: regularisation lambda
:return: gradient_penalty => scalar tensor | private helper for calculating the gradient penalty
:param real_samps: real samples
:param fake_samps: fake samples
:param reg_lambda: regularisation lambda
:return: gradient_penalty => scalar tensor | [
"private",
"helper",
"for",
"calculating",
"the",
"gradient",
"penalty",
":",
"param",
"real_samps",
":",
"real",
"samples",
":",
"param",
"fake_samps",
":",
"fake",
"samples",
":",
"param",
"reg_lambda",
":",
"regularisation",
"lambda",
":",
"return",
":",
"g... | def __gradient_penalty(self, real_samps, fake_samps, reg_lambda=10):
"""
private helper for calculating the gradient penalty
:param real_samps: real samples
:param fake_samps: fake samples
:param reg_lambda: regularisation lambda
:return: gradient_penalty => scalar tensor
"""
from torch.autograd import grad
batch_size = real_samps.shape[0]
# generate random epsilon
epsilon = th.rand(batch_size, 1, 1, 1).to(fake_samps.device)
# create the merge of both real and fake samples
merged = (epsilon * real_samps) + ((1 - epsilon) * fake_samps)
# forward pass
op, _, _ = self.dis(merged, mean_mode=False)
# obtain gradient of op wrt. merged
gradient = grad(outputs=op, inputs=merged, create_graph=True,
grad_outputs=th.ones_like(op),
retain_graph=True, only_inputs=True)[0]
# calculate the penalty using these gradients
penalty = reg_lambda * ((gradient.norm(p=2, dim=1) - 1) ** 2).mean()
# return the calculated penalty:
return penalty | [
"def",
"__gradient_penalty",
"(",
"self",
",",
"real_samps",
",",
"fake_samps",
",",
"reg_lambda",
"=",
"10",
")",
":",
"from",
"torch",
".",
"autograd",
"import",
"grad",
"batch_size",
"=",
"real_samps",
".",
"shape",
"[",
"0",
"]",
"# generate random epsilon... | https://github.com/akanimax/Variational_Discriminator_Bottleneck/blob/26a39ddbf9ee2213dbc1b60894a9092b1a5d3710/source/vdb/Losses.py#L188-L218 | |
wanggrun/Adaptively-Connected-Neural-Networks | e27066ef52301bdafa5932f43af8feeb23647edb | tensorpack-installed/build/lib/tensorpack/models/linearwrap.py | python | LinearWrap.__call__ | (self) | return self._t | Returns:
tf.Tensor: the underlying wrapped tensor. | Returns:
tf.Tensor: the underlying wrapped tensor. | [
"Returns",
":",
"tf",
".",
"Tensor",
":",
"the",
"underlying",
"wrapped",
"tensor",
"."
] | def __call__(self):
"""
Returns:
tf.Tensor: the underlying wrapped tensor.
"""
return self._t | [
"def",
"__call__",
"(",
"self",
")",
":",
"return",
"self",
".",
"_t"
] | https://github.com/wanggrun/Adaptively-Connected-Neural-Networks/blob/e27066ef52301bdafa5932f43af8feeb23647edb/tensorpack-installed/build/lib/tensorpack/models/linearwrap.py#L89-L94 | |
apache/incubator-spot | 2d60a2adae7608b43e90ce1b9ec0adf24f6cc8eb | spot-oa/ipython/profile_spot/startup/graphql.py | python | GraphQLClient.set_variables | (self, variables) | [] | def set_variables(self, variables):
self.variables = variables | [
"def",
"set_variables",
"(",
"self",
",",
"variables",
")",
":",
"self",
".",
"variables",
"=",
"variables"
] | https://github.com/apache/incubator-spot/blob/2d60a2adae7608b43e90ce1b9ec0adf24f6cc8eb/spot-oa/ipython/profile_spot/startup/graphql.py#L40-L41 | ||||
microsoft/debugpy | be8dd607f6837244e0b565345e497aff7a0c08bf | src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/pgen2/parse.py | python | Parser.addtoken | (self, type, value, context) | Add a token; return True iff this is the end of the program. | Add a token; return True iff this is the end of the program. | [
"Add",
"a",
"token",
";",
"return",
"True",
"iff",
"this",
"is",
"the",
"end",
"of",
"the",
"program",
"."
] | def addtoken(self, type, value, context):
"""Add a token; return True iff this is the end of the program."""
# Map from token to label
ilabel = self.classify(type, value, context)
# Loop until the token is shifted; may raise exceptions
while True:
dfa, state, node = self.stack[-1]
states, first = dfa
arcs = states[state]
# Look for a state with this label
for i, newstate in arcs:
t, v = self.grammar.labels[i]
if ilabel == i:
# Look it up in the list of labels
assert t < 256
# Shift a token; we're done with it
self.shift(type, value, newstate, context)
# Pop while we are in an accept-only state
state = newstate
while states[state] == [(0, state)]:
self.pop()
if not self.stack:
# Done parsing!
return True
dfa, state, node = self.stack[-1]
states, first = dfa
# Done with this token
return False
elif t >= 256:
# See if it's a symbol and if we're in its first set
itsdfa = self.grammar.dfas[t]
itsstates, itsfirst = itsdfa
if ilabel in itsfirst:
# Push a symbol
self.push(t, self.grammar.dfas[t], newstate, context)
break # To continue the outer while loop
else:
if (0, state) in arcs:
# An accepting state, pop it and try something else
self.pop()
if not self.stack:
# Done parsing, but another token is input
raise ParseError("too much input",
type, value, context)
else:
# No success finding a transition
raise ParseError("bad input", type, value, context) | [
"def",
"addtoken",
"(",
"self",
",",
"type",
",",
"value",
",",
"context",
")",
":",
"# Map from token to label",
"ilabel",
"=",
"self",
".",
"classify",
"(",
"type",
",",
"value",
",",
"context",
")",
"# Loop until the token is shifted; may raise exceptions",
"wh... | https://github.com/microsoft/debugpy/blob/be8dd607f6837244e0b565345e497aff7a0c08bf/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/pgen2/parse.py#L113-L159 | ||
urduhack/urduhack | 44500cd6a78e1a7765bb4f7d6fb92bbb612b7b11 | urduhack/pipeline/core.py | python | Pipeline.__init__ | (self) | Pass | Pass | [
"Pass"
] | def __init__(self):
"""Pass"""
self.parsers = {}
for item in REGISTERED_PARSERS:
self.parsers[item] = REGISTERED_PARSERS[item](config={}, pipeline=self) | [
"def",
"__init__",
"(",
"self",
")",
":",
"self",
".",
"parsers",
"=",
"{",
"}",
"for",
"item",
"in",
"REGISTERED_PARSERS",
":",
"self",
".",
"parsers",
"[",
"item",
"]",
"=",
"REGISTERED_PARSERS",
"[",
"item",
"]",
"(",
"config",
"=",
"{",
"}",
",",... | https://github.com/urduhack/urduhack/blob/44500cd6a78e1a7765bb4f7d6fb92bbb612b7b11/urduhack/pipeline/core.py#L31-L36 | ||
graalvm/mx | 29c0debab406352df3af246be2f8973be5db69ae | mx.py | python | binary_url | (args) | [] | def binary_url(args):
def snapshot_version(suite):
if suite.vc:
'{0}-SNAPSHOT'.format(suite.vc.parent(suite.vc_dir))
else:
abort('binary_url requires suite to be under a vcs repository')
_artifact_url(args, 'mx binary-url', 'mx deploy-binary', snapshot_version) | [
"def",
"binary_url",
"(",
"args",
")",
":",
"def",
"snapshot_version",
"(",
"suite",
")",
":",
"if",
"suite",
".",
"vc",
":",
"'{0}-SNAPSHOT'",
".",
"format",
"(",
"suite",
".",
"vc",
".",
"parent",
"(",
"suite",
".",
"vc_dir",
")",
")",
"else",
":",... | https://github.com/graalvm/mx/blob/29c0debab406352df3af246be2f8973be5db69ae/mx.py#L11608-L11614 | ||||
openstack/nova | b49b7663e1c3073917d5844b81d38db8e86d05c4 | nova/virt/powervm/vm.py | python | delete_lpar | (adapter, instance) | Delete an LPAR.
:param adapter: The adapter for the pypowervm API.
:param instance: The nova instance corresponding to the lpar to delete. | Delete an LPAR. | [
"Delete",
"an",
"LPAR",
"."
] | def delete_lpar(adapter, instance):
"""Delete an LPAR.
:param adapter: The adapter for the pypowervm API.
:param instance: The nova instance corresponding to the lpar to delete.
"""
lpar_uuid = get_pvm_uuid(instance)
# Attempt to delete the VM. To avoid failures due to open vterm, we will
# attempt to close the vterm before issuing the delete.
try:
LOG.info('Deleting virtual machine.', instance=instance)
# Ensure any vterms are closed. Will no-op otherwise.
vterm.close_vterm(adapter, lpar_uuid)
# Run the LPAR delete
resp = adapter.delete(pvm_lpar.LPAR.schema_type, root_id=lpar_uuid)
LOG.info('Virtual machine delete status: %d', resp.status,
instance=instance)
return resp
except pvm_exc.HttpError as e:
with excutils.save_and_reraise_exception(logger=LOG) as sare:
if e.response and e.response.status == 404:
# LPAR is already gone - don't fail
sare.reraise = False
LOG.info('Virtual Machine not found', instance=instance)
else:
LOG.error('HttpError deleting virtual machine.',
instance=instance)
except pvm_exc.Error:
with excutils.save_and_reraise_exception(logger=LOG):
# Attempting to close vterm did not help so raise exception
LOG.error('Virtual machine delete failed: LPARID=%s', lpar_uuid) | [
"def",
"delete_lpar",
"(",
"adapter",
",",
"instance",
")",
":",
"lpar_uuid",
"=",
"get_pvm_uuid",
"(",
"instance",
")",
"# Attempt to delete the VM. To avoid failures due to open vterm, we will",
"# attempt to close the vterm before issuing the delete.",
"try",
":",
"LOG",
"."... | https://github.com/openstack/nova/blob/b49b7663e1c3073917d5844b81d38db8e86d05c4/nova/virt/powervm/vm.py#L226-L256 | ||
larryhastings/gilectomy | 4315ec3f1d6d4f813cc82ce27a24e7f784dbfc1a | Lib/importlib/_bootstrap_external.py | python | PathFinder.find_module | (cls, fullname, path=None) | return spec.loader | find the module on sys.path or 'path' based on sys.path_hooks and
sys.path_importer_cache.
This method is deprecated. Use find_spec() instead. | find the module on sys.path or 'path' based on sys.path_hooks and
sys.path_importer_cache. | [
"find",
"the",
"module",
"on",
"sys",
".",
"path",
"or",
"path",
"based",
"on",
"sys",
".",
"path_hooks",
"and",
"sys",
".",
"path_importer_cache",
"."
] | def find_module(cls, fullname, path=None):
"""find the module on sys.path or 'path' based on sys.path_hooks and
sys.path_importer_cache.
This method is deprecated. Use find_spec() instead.
"""
spec = cls.find_spec(fullname, path)
if spec is None:
return None
return spec.loader | [
"def",
"find_module",
"(",
"cls",
",",
"fullname",
",",
"path",
"=",
"None",
")",
":",
"spec",
"=",
"cls",
".",
"find_spec",
"(",
"fullname",
",",
"path",
")",
"if",
"spec",
"is",
"None",
":",
"return",
"None",
"return",
"spec",
".",
"loader"
] | https://github.com/larryhastings/gilectomy/blob/4315ec3f1d6d4f813cc82ce27a24e7f784dbfc1a/Lib/importlib/_bootstrap_external.py#L1158-L1168 | |
Theano/Theano | 8fd9203edfeecebced9344b0c70193be292a9ade | theano/gof/sched.py | python | sort_apply_nodes | (inputs, outputs, cmps) | return posort(list_of_nodes(inputs, outputs), *cmps) | Order a graph of apply nodes according to a list of comparators.
The following example sorts first by dependence of nodes (this is a
topological sort) and then by lexicographical ordering (nodes that start
with 'E' come before nodes that start with 'I' if there is no dependence.
Examples
--------
>>> from theano.gof.graph import sort_apply_nodes, dependence
>>> from theano.tensor import matrix, dot
>>> x = matrix('x')
>>> y = dot(x*2, x+1)
>>> str_cmp = lambda a, b: cmp(str(a), str(b)) # lexicographical sort
>>> sort_apply_nodes([x], [y], cmps=[dependence, str_cmp])
[Elemwise{add,no_inplace}(x, InplaceDimShuffle{x,x}.0),
InplaceDimShuffle{x,x}(TensorConstant{2}),
Elemwise{mul,no_inplace}(x, InplaceDimShuffle{x,x}.0),
InplaceDimShuffle{x,x}(TensorConstant{1}),
dot(Elemwise{mul,no_inplace}.0, Elemwise{add,no_inplace}.0)] | Order a graph of apply nodes according to a list of comparators. | [
"Order",
"a",
"graph",
"of",
"apply",
"nodes",
"according",
"to",
"a",
"list",
"of",
"comparators",
"."
] | def sort_apply_nodes(inputs, outputs, cmps):
"""
Order a graph of apply nodes according to a list of comparators.
The following example sorts first by dependence of nodes (this is a
topological sort) and then by lexicographical ordering (nodes that start
with 'E' come before nodes that start with 'I' if there is no dependence.
Examples
--------
>>> from theano.gof.graph import sort_apply_nodes, dependence
>>> from theano.tensor import matrix, dot
>>> x = matrix('x')
>>> y = dot(x*2, x+1)
>>> str_cmp = lambda a, b: cmp(str(a), str(b)) # lexicographical sort
>>> sort_apply_nodes([x], [y], cmps=[dependence, str_cmp])
[Elemwise{add,no_inplace}(x, InplaceDimShuffle{x,x}.0),
InplaceDimShuffle{x,x}(TensorConstant{2}),
Elemwise{mul,no_inplace}(x, InplaceDimShuffle{x,x}.0),
InplaceDimShuffle{x,x}(TensorConstant{1}),
dot(Elemwise{mul,no_inplace}.0, Elemwise{add,no_inplace}.0)]
"""
return posort(list_of_nodes(inputs, outputs), *cmps) | [
"def",
"sort_apply_nodes",
"(",
"inputs",
",",
"outputs",
",",
"cmps",
")",
":",
"return",
"posort",
"(",
"list_of_nodes",
"(",
"inputs",
",",
"outputs",
")",
",",
"*",
"cmps",
")"
] | https://github.com/Theano/Theano/blob/8fd9203edfeecebced9344b0c70193be292a9ade/theano/gof/sched.py#L223-L246 | |
caiiiac/Machine-Learning-with-Python | 1a26c4467da41ca4ebc3d5bd789ea942ef79422f | MachineLearning/venv/lib/python3.5/site-packages/numpy/lib/scimath.py | python | sqrt | (x) | return nx.sqrt(x) | Compute the square root of x.
For negative input elements, a complex value is returned
(unlike `numpy.sqrt` which returns NaN).
Parameters
----------
x : array_like
The input value(s).
Returns
-------
out : ndarray or scalar
The square root of `x`. If `x` was a scalar, so is `out`,
otherwise an array is returned.
See Also
--------
numpy.sqrt
Examples
--------
For real, non-negative inputs this works just like `numpy.sqrt`:
>>> np.lib.scimath.sqrt(1)
1.0
>>> np.lib.scimath.sqrt([1, 4])
array([ 1., 2.])
But it automatically handles negative inputs:
>>> np.lib.scimath.sqrt(-1)
(0.0+1.0j)
>>> np.lib.scimath.sqrt([-1,4])
array([ 0.+1.j, 2.+0.j]) | Compute the square root of x. | [
"Compute",
"the",
"square",
"root",
"of",
"x",
"."
] | def sqrt(x):
"""
Compute the square root of x.
For negative input elements, a complex value is returned
(unlike `numpy.sqrt` which returns NaN).
Parameters
----------
x : array_like
The input value(s).
Returns
-------
out : ndarray or scalar
The square root of `x`. If `x` was a scalar, so is `out`,
otherwise an array is returned.
See Also
--------
numpy.sqrt
Examples
--------
For real, non-negative inputs this works just like `numpy.sqrt`:
>>> np.lib.scimath.sqrt(1)
1.0
>>> np.lib.scimath.sqrt([1, 4])
array([ 1., 2.])
But it automatically handles negative inputs:
>>> np.lib.scimath.sqrt(-1)
(0.0+1.0j)
>>> np.lib.scimath.sqrt([-1,4])
array([ 0.+1.j, 2.+0.j])
"""
x = _fix_real_lt_zero(x)
return nx.sqrt(x) | [
"def",
"sqrt",
"(",
"x",
")",
":",
"x",
"=",
"_fix_real_lt_zero",
"(",
"x",
")",
"return",
"nx",
".",
"sqrt",
"(",
"x",
")"
] | https://github.com/caiiiac/Machine-Learning-with-Python/blob/1a26c4467da41ca4ebc3d5bd789ea942ef79422f/MachineLearning/venv/lib/python3.5/site-packages/numpy/lib/scimath.py#L176-L216 | |
aleju/imgaug | 0101108d4fed06bc5056c4a03e2bcb0216dac326 | imgaug/augmentables/utils.py | python | normalize_shape | (shape) | return shape.shape | Normalize a shape ``tuple`` or ``array`` to a shape ``tuple``.
Parameters
----------
shape : tuple of int or ndarray
The input to normalize. May optionally be an array.
Returns
-------
tuple of int
Shape ``tuple``. | Normalize a shape ``tuple`` or ``array`` to a shape ``tuple``. | [
"Normalize",
"a",
"shape",
"tuple",
"or",
"array",
"to",
"a",
"shape",
"tuple",
"."
] | def normalize_shape(shape):
"""Normalize a shape ``tuple`` or ``array`` to a shape ``tuple``.
Parameters
----------
shape : tuple of int or ndarray
The input to normalize. May optionally be an array.
Returns
-------
tuple of int
Shape ``tuple``.
"""
if isinstance(shape, tuple):
return shape
assert ia.is_np_array(shape), (
"Expected tuple of ints or array, got %s." % (type(shape),))
return shape.shape | [
"def",
"normalize_shape",
"(",
"shape",
")",
":",
"if",
"isinstance",
"(",
"shape",
",",
"tuple",
")",
":",
"return",
"shape",
"assert",
"ia",
".",
"is_np_array",
"(",
"shape",
")",
",",
"(",
"\"Expected tuple of ints or array, got %s.\"",
"%",
"(",
"type",
... | https://github.com/aleju/imgaug/blob/0101108d4fed06bc5056c4a03e2bcb0216dac326/imgaug/augmentables/utils.py#L63-L81 | |
biolab/orange2 | db40a9449cb45b507d63dcd5739b223f9cffb8e6 | Orange/utils/addons.py | python | run_setup | (setup_script, args) | Run `setup_script` with `args` in a subprocess, using
:ref:`subprocess.check_output`. | Run `setup_script` with `args` in a subprocess, using
:ref:`subprocess.check_output`. | [
"Run",
"setup_script",
"with",
"args",
"in",
"a",
"subprocess",
"using",
":",
"ref",
":",
"subprocess",
".",
"check_output",
"."
] | def run_setup(setup_script, args):
"""
Run `setup_script` with `args` in a subprocess, using
:ref:`subprocess.check_output`.
"""
source_root = os.path.dirname(setup_script)
executable = sys.executable
extra_kwargs = {}
if os.name == "nt" and os.path.basename(executable) == "pythonw.exe":
dirname, _ = os.path.split(executable)
executable = os.path.join(dirname, "python.exe")
# by default a new console window would show up when executing the
# script
startupinfo = subprocess.STARTUPINFO()
if hasattr(subprocess, "STARTF_USESHOWWINDOW"):
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
else:
# This flag was missing in inital releases of 2.7
startupinfo.dwFlags |= subprocess._subprocess.STARTF_USESHOWWINDOW
extra_kwargs["startupinfo"] = startupinfo
process = subprocess.Popen([executable, setup_script] + args,
cwd=source_root,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
bufsize=1, # line buffered
**extra_kwargs)
output = []
while process.poll() is None:
try:
line = process.stdout.readline()
except (OSError, IOError) as ex:
if ex.errno != errno.EINTR:
raise
else:
output.append(line)
print line,
if process.returncode:
raise subprocess.CalledProcessError(
process.returncode,
setup_script,
"".join(output)
) | [
"def",
"run_setup",
"(",
"setup_script",
",",
"args",
")",
":",
"source_root",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"setup_script",
")",
"executable",
"=",
"sys",
".",
"executable",
"extra_kwargs",
"=",
"{",
"}",
"if",
"os",
".",
"name",
"==",
... | https://github.com/biolab/orange2/blob/db40a9449cb45b507d63dcd5739b223f9cffb8e6/Orange/utils/addons.py#L342-L387 | ||
libass/JavascriptSubtitlesOctopus | c7eeea072e8e86273b8b2e4273626bbacb3240f3 | build/WebIDL.py | python | Parser.p_Inheritance | (self, p) | Inheritance : COLON ScopedName | Inheritance : COLON ScopedName | [
"Inheritance",
":",
"COLON",
"ScopedName"
] | def p_Inheritance(self, p):
"""
Inheritance : COLON ScopedName
"""
p[0] = IDLIdentifierPlaceholder(self.getLocation(p, 2), p[2]) | [
"def",
"p_Inheritance",
"(",
"self",
",",
"p",
")",
":",
"p",
"[",
"0",
"]",
"=",
"IDLIdentifierPlaceholder",
"(",
"self",
".",
"getLocation",
"(",
"p",
",",
"2",
")",
",",
"p",
"[",
"2",
"]",
")"
] | https://github.com/libass/JavascriptSubtitlesOctopus/blob/c7eeea072e8e86273b8b2e4273626bbacb3240f3/build/WebIDL.py#L3821-L3825 | ||
nerdvegas/rez | d392c65bf63b4bca8106f938cec49144ba54e770 | src/rez/status.py | python | Status.print_tools | (self, pattern=None, buf=sys.stdout) | return True | Print a list of visible tools.
Args:
pattern (str): Only list tools that match this glob pattern. | Print a list of visible tools. | [
"Print",
"a",
"list",
"of",
"visible",
"tools",
"."
] | def print_tools(self, pattern=None, buf=sys.stdout):
"""Print a list of visible tools.
Args:
pattern (str): Only list tools that match this glob pattern.
"""
seen = set()
rows = []
context = self.context
if context:
data = context.get_tools()
conflicts = set(context.get_conflicting_tools().keys())
for _, (variant, tools) in sorted(data.items()):
pkg_str = variant.qualified_package_name
for tool in tools:
if pattern and not fnmatch(tool, pattern):
continue
if tool in conflicts:
label = "(in conflict)"
color = critical
else:
label = ''
color = None
rows.append([tool, '-', pkg_str, "active context", label, color])
seen.add(tool)
for suite in self.suites:
for tool, d in suite.get_tools().items():
if tool in seen:
continue
if pattern and not fnmatch(tool, pattern):
continue
label = []
color = None
path = which(tool)
if path:
path_ = os.path.join(suite.tools_path, tool)
if path != path_:
label.append("(hidden by unknown tool '%s')" % path)
color = warning
variant = d["variant"]
if isinstance(variant, set):
pkg_str = ", ".join(variant)
label.append("(in conflict)")
color = critical
else:
pkg_str = variant.qualified_package_name
orig_tool = d["tool_name"]
if orig_tool == tool:
orig_tool = '-'
label = ' '.join(label)
source = ("context '%s' in suite '%s'"
% (d["context_name"], suite.load_path))
rows.append([tool, orig_tool, pkg_str, source, label, color])
seen.add(tool)
_pr = Printer(buf)
if not rows:
_pr("No matching tools.")
return False
headers = [["TOOL", "ALIASING", "PACKAGE", "SOURCE", "", None],
["----", "--------", "-------", "------", "", None]]
rows = headers + sorted(rows, key=lambda x: x[0].lower())
print_colored_columns(_pr, rows)
return True | [
"def",
"print_tools",
"(",
"self",
",",
"pattern",
"=",
"None",
",",
"buf",
"=",
"sys",
".",
"stdout",
")",
":",
"seen",
"=",
"set",
"(",
")",
"rows",
"=",
"[",
"]",
"context",
"=",
"self",
".",
"context",
"if",
"context",
":",
"data",
"=",
"cont... | https://github.com/nerdvegas/rez/blob/d392c65bf63b4bca8106f938cec49144ba54e770/src/rez/status.py#L137-L210 | |
enthought/mayavi | 2103a273568b8f0bd62328801aafbd6252543ae8 | mayavi/components/source_widget.py | python | SourceWidget.setup_pipeline | (self) | Override this method so that it *creates* the tvtk
pipeline.
This method is invoked when the object is initialized via
`__init__`. Note that at the time this method is called, the
tvtk data pipeline will *not* yet be setup. So upstream data
will not be available. The idea is that you simply create the
basic objects and setup those parts of the pipeline not
dependent on upstream sources and filters. You should also
set the `actors` attribute up at this point. | Override this method so that it *creates* the tvtk
pipeline. | [
"Override",
"this",
"method",
"so",
"that",
"it",
"*",
"creates",
"*",
"the",
"tvtk",
"pipeline",
"."
] | def setup_pipeline(self):
"""Override this method so that it *creates* the tvtk
pipeline.
This method is invoked when the object is initialized via
`__init__`. Note that at the time this method is called, the
tvtk data pipeline will *not* yet be setup. So upstream data
will not be available. The idea is that you simply create the
basic objects and setup those parts of the pipeline not
dependent on upstream sources and filters. You should also
set the `actors` attribute up at this point.
"""
# Setup the glyphs.
sources = [tvtk.SphereWidget(theta_resolution=8, phi_resolution=6),
tvtk.LineWidget(clamp_to_bounds=False),
tvtk.PlaneWidget(),
tvtk.PointWidget(outline=False, x_shadows=False,
y_shadows=False, z_shadows=False),
]
self.widget_list = sources
# The 'widgets' trait is set in the '_widget_changed' handler.
self.widget = sources[0]
for s in sources:
self._connect(s) | [
"def",
"setup_pipeline",
"(",
"self",
")",
":",
"# Setup the glyphs.",
"sources",
"=",
"[",
"tvtk",
".",
"SphereWidget",
"(",
"theta_resolution",
"=",
"8",
",",
"phi_resolution",
"=",
"6",
")",
",",
"tvtk",
".",
"LineWidget",
"(",
"clamp_to_bounds",
"=",
"Fa... | https://github.com/enthought/mayavi/blob/2103a273568b8f0bd62328801aafbd6252543ae8/mayavi/components/source_widget.py#L125-L149 | ||
PyHDI/veriloggen | 2382d200deabf59cfcfd741f5eba371010aaf2bb | veriloggen/stream/stypes.py | python | ReduceMaxValid | (right, size, interval=None, initval=0,
enable=None, reset=None, reg_initval=None, width=32, signed=True) | return _ReduceValid(cls, right, size, interval, initval,
enable, reset, reg_initval, width, signed) | [] | def ReduceMaxValid(right, size, interval=None, initval=0,
enable=None, reset=None, reg_initval=None, width=32, signed=True):
cls = ReduceMax
return _ReduceValid(cls, right, size, interval, initval,
enable, reset, reg_initval, width, signed) | [
"def",
"ReduceMaxValid",
"(",
"right",
",",
"size",
",",
"interval",
"=",
"None",
",",
"initval",
"=",
"0",
",",
"enable",
"=",
"None",
",",
"reset",
"=",
"None",
",",
"reg_initval",
"=",
"None",
",",
"width",
"=",
"32",
",",
"signed",
"=",
"True",
... | https://github.com/PyHDI/veriloggen/blob/2382d200deabf59cfcfd741f5eba371010aaf2bb/veriloggen/stream/stypes.py#L3696-L3701 | |||
nodesign/weio | 1d67d705a5c36a2e825ad13feab910b0aca9a2e8 | openWrt/files/usr/lib/python2.7/site-packages/tornado/ioloop.py | python | IOLoop.configurable_default | (cls) | return SelectIOLoop | [] | def configurable_default(cls):
if hasattr(select, "epoll"):
from tornado.platform.epoll import EPollIOLoop
return EPollIOLoop
if hasattr(select, "kqueue"):
# Python 2.6+ on BSD or Mac
from tornado.platform.kqueue import KQueueIOLoop
return KQueueIOLoop
from tornado.platform.select import SelectIOLoop
return SelectIOLoop | [
"def",
"configurable_default",
"(",
"cls",
")",
":",
"if",
"hasattr",
"(",
"select",
",",
"\"epoll\"",
")",
":",
"from",
"tornado",
".",
"platform",
".",
"epoll",
"import",
"EPollIOLoop",
"return",
"EPollIOLoop",
"if",
"hasattr",
"(",
"select",
",",
"\"kqueu... | https://github.com/nodesign/weio/blob/1d67d705a5c36a2e825ad13feab910b0aca9a2e8/openWrt/files/usr/lib/python2.7/site-packages/tornado/ioloop.py#L202-L211 | |||
datacenter/acitoolkit | 629b84887dd0f0183b81efc8adb16817f985541a | samples/aci-show-tenant-faults.py | python | main | () | Main execution routine | Main execution routine | [
"Main",
"execution",
"routine"
] | def main():
"""
Main execution routine
"""
description = ('Simple application that logs on to the APIC'
' and displays all the faults. If tenant name is given, '
' shows the faults associated with that tenant')
creds = ACI.Credentials('apic', description)
creds.add_argument("-t", "--tenant_name",
help="name of the tenant of which faults are to be displayed")
creds.add_argument('--continuous', action='store_true',
help='Continuously monitor for tenant faults')
args = creds.get()
# Login to APIC
session = ACI.Session(args.url, args.login, args.password)
resp = session.login()
if not resp.ok:
print('%% Could not login to APIC')
return
if args.tenant_name is not None:
tenant_name = args.tenant_name
else:
tenant_name = None
faults_obj = Faults()
faults_obj.subscribe_faults(session)
while faults_obj.has_faults(session) or args.continuous:
if faults_obj.has_faults(session):
faults = faults_obj.get_faults(session, tenant_name=tenant_name)
if faults is not None:
for fault in faults:
if fault is not None:
print("****************")
if fault.descr is not None:
print(" descr : " + fault.descr)
else:
print(" descr : " + " ")
print(" dn : " + fault.dn)
print(" rule : " + fault.rule)
print(" severity : " + fault.severity)
print(" type : " + fault.type)
print(" domain : " + fault.domain) | [
"def",
"main",
"(",
")",
":",
"description",
"=",
"(",
"'Simple application that logs on to the APIC'",
"' and displays all the faults. If tenant name is given, '",
"' shows the faults associated with that tenant'",
")",
"creds",
"=",
"ACI",
".",
"Credentials",
"(",
"'apic'",
"... | https://github.com/datacenter/acitoolkit/blob/629b84887dd0f0183b81efc8adb16817f985541a/samples/aci-show-tenant-faults.py#L13-L55 | ||
home-assistant/core | 265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1 | homeassistant/components/anthemav/media_player.py | python | AnthemAVR.dump_avrdata | (self) | return f"dump_avrdata: {items_string}" | Return state of avr object for debugging forensics. | Return state of avr object for debugging forensics. | [
"Return",
"state",
"of",
"avr",
"object",
"for",
"debugging",
"forensics",
"."
] | def dump_avrdata(self):
"""Return state of avr object for debugging forensics."""
attrs = vars(self)
items_string = ", ".join(f"{item}: {item}" for item in attrs.items())
return f"dump_avrdata: {items_string}" | [
"def",
"dump_avrdata",
"(",
"self",
")",
":",
"attrs",
"=",
"vars",
"(",
"self",
")",
"items_string",
"=",
"\", \"",
".",
"join",
"(",
"f\"{item}: {item}\"",
"for",
"item",
"in",
"attrs",
".",
"items",
"(",
")",
")",
"return",
"f\"dump_avrdata: {items_string... | https://github.com/home-assistant/core/blob/265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1/homeassistant/components/anthemav/media_player.py#L182-L186 | |
rcorcs/NatI | fdf014f4292afdc95250add7b6658468043228e1 | en/parser/nltk_lite/contrib/toolbox/data.py | python | ToolboxData._make_parse_table | (self, grammar) | return (parse_table, first) | Return parsing state information used by tree_parser. | Return parsing state information used by tree_parser. | [
"Return",
"parsing",
"state",
"information",
"used",
"by",
"tree_parser",
"."
] | def _make_parse_table(self, grammar):
"""
Return parsing state information used by tree_parser.
"""
first = dict()
gram = dict()
for sym, value in grammar.items():
first[sym] = value[0]
gram[sym] = value[0] + value[1]
parse_table = dict()
for state in gram.keys():
parse_table[state] = dict()
for to_sym in gram[state]:
if to_sym in grammar:
# is a nonterminal
# assume all firsts are terminals
for i in first[to_sym]:
parse_table[state][i] = to_sym
else:
parse_table[state][to_sym] = to_sym
return (parse_table, first) | [
"def",
"_make_parse_table",
"(",
"self",
",",
"grammar",
")",
":",
"first",
"=",
"dict",
"(",
")",
"gram",
"=",
"dict",
"(",
")",
"for",
"sym",
",",
"value",
"in",
"grammar",
".",
"items",
"(",
")",
":",
"first",
"[",
"sym",
"]",
"=",
"value",
"[... | https://github.com/rcorcs/NatI/blob/fdf014f4292afdc95250add7b6658468043228e1/en/parser/nltk_lite/contrib/toolbox/data.py#L21-L42 | |
wakatime/legacy-python-cli | 9b64548b16ab5ef16603d9a6c2620a16d0df8d46 | wakatime/packages/py27/OpenSSL/SSL.py | python | Context.set_cipher_list | (self, cipher_list) | Set the list of ciphers to be used in this context.
See the OpenSSL manual for more information (e.g.
:manpage:`ciphers(1)`).
:param bytes cipher_list: An OpenSSL cipher string.
:return: None | Set the list of ciphers to be used in this context. | [
"Set",
"the",
"list",
"of",
"ciphers",
"to",
"be",
"used",
"in",
"this",
"context",
"."
] | def set_cipher_list(self, cipher_list):
"""
Set the list of ciphers to be used in this context.
See the OpenSSL manual for more information (e.g.
:manpage:`ciphers(1)`).
:param bytes cipher_list: An OpenSSL cipher string.
:return: None
"""
cipher_list = _text_to_bytes_and_warn("cipher_list", cipher_list)
if not isinstance(cipher_list, bytes):
raise TypeError("cipher_list must be a byte string.")
_openssl_assert(
_lib.SSL_CTX_set_cipher_list(self._context, cipher_list) == 1
)
# In OpenSSL 1.1.1 setting the cipher list will always return TLS 1.3
# ciphers even if you pass an invalid cipher. Applications (like
# Twisted) have tests that depend on an error being raised if an
# invalid cipher string is passed, but without the following check
# for the TLS 1.3 specific cipher suites it would never error.
tmpconn = Connection(self, None)
if (
tmpconn.get_cipher_list() == [
'TLS_AES_256_GCM_SHA384',
'TLS_CHACHA20_POLY1305_SHA256',
'TLS_AES_128_GCM_SHA256'
]
):
raise Error(
[
(
'SSL routines',
'SSL_CTX_set_cipher_list',
'no cipher match',
),
],
) | [
"def",
"set_cipher_list",
"(",
"self",
",",
"cipher_list",
")",
":",
"cipher_list",
"=",
"_text_to_bytes_and_warn",
"(",
"\"cipher_list\"",
",",
"cipher_list",
")",
"if",
"not",
"isinstance",
"(",
"cipher_list",
",",
"bytes",
")",
":",
"raise",
"TypeError",
"(",... | https://github.com/wakatime/legacy-python-cli/blob/9b64548b16ab5ef16603d9a6c2620a16d0df8d46/wakatime/packages/py27/OpenSSL/SSL.py#L1186-L1225 | ||
napari/napari | dbf4158e801fa7a429de8ef1cdee73bf6d64c61e | napari/components/layerlist.py | python | LayerList._get_extent_world | (self, layer_extent_list) | return np.vstack([min_v, max_v]) | Extent of layers in world coordinates.
Default to 2D with (-0.5, 511.5) min/ max values if no data is present.
Corresponds to pixels centered at [0, ..., 511].
Returns
-------
extent_world : array, shape (2, D) | Extent of layers in world coordinates. | [
"Extent",
"of",
"layers",
"in",
"world",
"coordinates",
"."
] | def _get_extent_world(self, layer_extent_list):
"""Extent of layers in world coordinates.
Default to 2D with (-0.5, 511.5) min/ max values if no data is present.
Corresponds to pixels centered at [0, ..., 511].
Returns
-------
extent_world : array, shape (2, D)
"""
if len(self) == 0:
min_v = np.asarray([-0.5] * self.ndim)
max_v = np.asarray([511.5] * self.ndim)
else:
extrema = [extent.world for extent in layer_extent_list]
mins = [e[0] for e in extrema]
maxs = [e[1] for e in extrema]
min_v, max_v = self._get_min_and_max(mins, maxs)
return np.vstack([min_v, max_v]) | [
"def",
"_get_extent_world",
"(",
"self",
",",
"layer_extent_list",
")",
":",
"if",
"len",
"(",
"self",
")",
"==",
"0",
":",
"min_v",
"=",
"np",
".",
"asarray",
"(",
"[",
"-",
"0.5",
"]",
"*",
"self",
".",
"ndim",
")",
"max_v",
"=",
"np",
".",
"as... | https://github.com/napari/napari/blob/dbf4158e801fa7a429de8ef1cdee73bf6d64c61e/napari/components/layerlist.py#L170-L189 | |
effigies/BitTornado | 03f64f2933678d08f104e340fa42b9b7d0c14993 | BitTornado/Application/makemetafile.py | python | make_meta_file | (loc, url, params=None, flag=None,
progress=lambda x: None, progress_percent=True) | Make a single .torrent file for a given location | Make a single .torrent file for a given location | [
"Make",
"a",
"single",
".",
"torrent",
"file",
"for",
"a",
"given",
"location"
] | def make_meta_file(loc, url, params=None, flag=None,
progress=lambda x: None, progress_percent=True):
"""Make a single .torrent file for a given location"""
if params is None:
params = {}
if flag is None:
flag = threading.Event()
tree = BTTree(loc, [])
# Extract target from parameters
if 'target' not in params or params['target'] == '':
fname, ext = os.path.split(loc)
if ext == '':
target = fname + '.torrent'
else:
target = os.path.join(fname, ext + '.torrent')
params['target'] = target
info = tree.makeInfo(flag=flag, progress=progress,
progress_percent=progress_percent, **params)
if flag is not None and flag.is_set():
return
metainfo = MetaInfo(announce=url, info=info, **params)
metainfo.write(params['target']) | [
"def",
"make_meta_file",
"(",
"loc",
",",
"url",
",",
"params",
"=",
"None",
",",
"flag",
"=",
"None",
",",
"progress",
"=",
"lambda",
"x",
":",
"None",
",",
"progress_percent",
"=",
"True",
")",
":",
"if",
"params",
"is",
"None",
":",
"params",
"=",... | https://github.com/effigies/BitTornado/blob/03f64f2933678d08f104e340fa42b9b7d0c14993/BitTornado/Application/makemetafile.py#L47-L73 | ||
openstack/octavia | 27e5b27d31c695ba72fb6750de2bdafd76e0d7d9 | octavia/common/jinja/haproxy/split_listeners/jinja_cfg.py | python | JinjaTemplater._escape_haproxy_config_string | (value) | return value | Escapes certain characters in a given string such that
haproxy will parse the string as a single value | Escapes certain characters in a given string such that | [
"Escapes",
"certain",
"characters",
"in",
"a",
"given",
"string",
"such",
"that"
] | def _escape_haproxy_config_string(value):
"""Escapes certain characters in a given string such that
haproxy will parse the string as a single value
"""
# Escape backslashes first
value = re.sub(r'\\', r'\\\\', value)
# Spaces next
value = re.sub(' ', '\\ ', value)
return value | [
"def",
"_escape_haproxy_config_string",
"(",
"value",
")",
":",
"# Escape backslashes first",
"value",
"=",
"re",
".",
"sub",
"(",
"r'\\\\'",
",",
"r'\\\\\\\\'",
",",
"value",
")",
"# Spaces next",
"value",
"=",
"re",
".",
"sub",
"(",
"' '",
",",
"'\\\\ '",
... | https://github.com/openstack/octavia/blob/27e5b27d31c695ba72fb6750de2bdafd76e0d7d9/octavia/common/jinja/haproxy/split_listeners/jinja_cfg.py#L456-L465 | |
pygments/pygments | cd3ad20dfc8a6cb43e2c0b22b14446dcc0a554d7 | pygments/lexers/templates.py | python | HandlebarsHtmlLexer.__init__ | (self, **options) | [] | def __init__(self, **options):
super().__init__(HtmlLexer, HandlebarsLexer, **options) | [
"def",
"__init__",
"(",
"self",
",",
"*",
"*",
"options",
")",
":",
"super",
"(",
")",
".",
"__init__",
"(",
"HtmlLexer",
",",
"HandlebarsLexer",
",",
"*",
"*",
"options",
")"
] | https://github.com/pygments/pygments/blob/cd3ad20dfc8a6cb43e2c0b22b14446dcc0a554d7/pygments/lexers/templates.py#L1857-L1858 | ||||
davidhalter/parso | ee5edaf22ff3941cbdfa4efd8cb3e8f69779fd56 | parso/tree.py | python | NodeOrLeaf.get_previous_sibling | (self) | Returns the node immediately preceding this node in this parent's
children list. If this node does not have a previous sibling, it is
None. | Returns the node immediately preceding this node in this parent's
children list. If this node does not have a previous sibling, it is
None. | [
"Returns",
"the",
"node",
"immediately",
"preceding",
"this",
"node",
"in",
"this",
"parent",
"s",
"children",
"list",
".",
"If",
"this",
"node",
"does",
"not",
"have",
"a",
"previous",
"sibling",
"it",
"is",
"None",
"."
] | def get_previous_sibling(self):
"""
Returns the node immediately preceding this node in this parent's
children list. If this node does not have a previous sibling, it is
None.
"""
parent = self.parent
if parent is None:
return None
# Can't use index(); we need to test by identity
for i, child in enumerate(parent.children):
if child is self:
if i == 0:
return None
return self.parent.children[i - 1] | [
"def",
"get_previous_sibling",
"(",
"self",
")",
":",
"parent",
"=",
"self",
".",
"parent",
"if",
"parent",
"is",
"None",
":",
"return",
"None",
"# Can't use index(); we need to test by identity",
"for",
"i",
",",
"child",
"in",
"enumerate",
"(",
"parent",
".",
... | https://github.com/davidhalter/parso/blob/ee5edaf22ff3941cbdfa4efd8cb3e8f69779fd56/parso/tree.py#L67-L82 | ||
Epistimio/orion | 732e739d99561020dbe620760acf062ade746006 | src/orion/core/evc/conflicts.py | python | ChangedDimensionConflict.detect | (cls, old_config, new_config, branching_config=None) | Detect all changed dimensions in `new_config` based on `old_config`
:param branching_config: | Detect all changed dimensions in `new_config` based on `old_config`
:param branching_config: | [
"Detect",
"all",
"changed",
"dimensions",
"in",
"new_config",
"based",
"on",
"old_config",
":",
"param",
"branching_config",
":"
] | def detect(cls, old_config, new_config, branching_config=None):
"""Detect all changed dimensions in `new_config` based on `old_config`
:param branching_config:
"""
old_space = _build_space(old_config)
new_space = _build_space(new_config)
for name, dim in new_space.items():
if name not in old_space:
continue
new_prior = dim.get_prior_string()
old_prior = old_space[name].get_prior_string()
if new_prior != old_prior:
yield cls(old_config, new_config, dim, old_prior, new_prior) | [
"def",
"detect",
"(",
"cls",
",",
"old_config",
",",
"new_config",
",",
"branching_config",
"=",
"None",
")",
":",
"old_space",
"=",
"_build_space",
"(",
"old_config",
")",
"new_space",
"=",
"_build_space",
"(",
"new_config",
")",
"for",
"name",
",",
"dim",
... | https://github.com/Epistimio/orion/blob/732e739d99561020dbe620760acf062ade746006/src/orion/core/evc/conflicts.py#L694-L708 | ||
pilotmoon/PopClip-Extensions | 29fc472befc09ee350092ac70283bd9fdb456cb6 | source/PushbulletPython/requests/packages/urllib3/packages/ordered_dict.py | python | OrderedDict.fromkeys | (cls, iterable, value=None) | return d | OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None). | OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None). | [
"OD",
".",
"fromkeys",
"(",
"S",
"[",
"v",
"]",
")",
"-",
">",
"New",
"ordered",
"dictionary",
"with",
"keys",
"from",
"S",
"and",
"values",
"equal",
"to",
"v",
"(",
"which",
"defaults",
"to",
"None",
")",
"."
] | def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d | [
"def",
"fromkeys",
"(",
"cls",
",",
"iterable",
",",
"value",
"=",
"None",
")",
":",
"d",
"=",
"cls",
"(",
")",
"for",
"key",
"in",
"iterable",
":",
"d",
"[",
"key",
"]",
"=",
"value",
"return",
"d"
] | https://github.com/pilotmoon/PopClip-Extensions/blob/29fc472befc09ee350092ac70283bd9fdb456cb6/source/PushbulletPython/requests/packages/urllib3/packages/ordered_dict.py#L226-L234 | |
csurfer/pypette | 4e0bfcc56d36d7fb56d381ffcd6e5e58cb9b3ca1 | pypette/pipes.py | python | Pipe.add_jobs | (self, jobs: List[Union[Job, BashJob, 'Pipe']], run_in_parallel: bool = False) | return self | Method to add jobs to pipeline.
:param jobs: List of jobs/pipes to run.
:param run_in_parallel: This flag when set to False(default) runs the
list of jobs given one after another. This flag
if set to True runs the jobs/pipes submitted in
parallel threads. | Method to add jobs to pipeline. | [
"Method",
"to",
"add",
"jobs",
"to",
"pipeline",
"."
] | def add_jobs(self, jobs: List[Union[Job, BashJob, 'Pipe']], run_in_parallel: bool = False):
"""Method to add jobs to pipeline.
:param jobs: List of jobs/pipes to run.
:param run_in_parallel: This flag when set to False(default) runs the
list of jobs given one after another. This flag
if set to True runs the jobs/pipes submitted in
parallel threads.
"""
# Return if nothing to do.
if not jobs:
return
# Validate the set of jobs given.
Pipe._validate(jobs)
# Add jobs to pipeline.
if run_in_parallel:
self._add_in_parallel(jobs)
else:
self._add_in_series(jobs)
return self | [
"def",
"add_jobs",
"(",
"self",
",",
"jobs",
":",
"List",
"[",
"Union",
"[",
"Job",
",",
"BashJob",
",",
"'Pipe'",
"]",
"]",
",",
"run_in_parallel",
":",
"bool",
"=",
"False",
")",
":",
"# Return if nothing to do.",
"if",
"not",
"jobs",
":",
"return",
... | https://github.com/csurfer/pypette/blob/4e0bfcc56d36d7fb56d381ffcd6e5e58cb9b3ca1/pypette/pipes.py#L55-L78 | |
dgilland/pydash | 24ad0e43b51b367d00447c45baa68c9c03ad1a52 | src/pydash/strings.py | python | JSRegExp.find | (self, text) | return results | Return list of regular expression matches. | Return list of regular expression matches. | [
"Return",
"list",
"of",
"regular",
"expression",
"matches",
"."
] | def find(self, text):
"""Return list of regular expression matches."""
if self._global:
results = self.pattern.findall(text)
else:
res = self.pattern.search(text)
if res:
results = [res.group()]
else:
results = []
return results | [
"def",
"find",
"(",
"self",
",",
"text",
")",
":",
"if",
"self",
".",
"_global",
":",
"results",
"=",
"self",
".",
"pattern",
".",
"findall",
"(",
"text",
")",
"else",
":",
"res",
"=",
"self",
".",
"pattern",
".",
"search",
"(",
"text",
")",
"if"... | https://github.com/dgilland/pydash/blob/24ad0e43b51b367d00447c45baa68c9c03ad1a52/src/pydash/strings.py#L105-L115 | |
mme/vergeml | 3dc30ba4e0f3d038743b6d468860cbcf3681acc6 | vergeml/loader.py | python | LiveLoader.perform_read | (self, split: str, index: int, n_samples: int = 1) | return list(map(lambda s: ((s.x, s.y), (s.meta, s.rng)), res)) | [] | def perform_read(self, split: str, index: int, n_samples: int = 1): # pylint: disable=R0914
mul = self.multipliers[split]
offset = int(index % mul)
start_index = int(index/mul)
end_index = int((index+n_samples)/mul)
read = max(1, int(n_samples/mul) + int(min(1, index%mul)))
res = []
samples = self.input.read_samples(split, start_index, read)
if self.output and self.ops:
op1, *oprest = self.ops
for sample in samples:
res.extend(op1.process(sample, oprest))
else:
res = samples
if self.output and self.transform:
res = [self.output.transform(sample) for sample in res]
for sample, i in zip(res, range(start_index, end_index)):
if self.rngs[split][i] is None:
self.rngs[split][i] = sample.rng
else:
sample.rng = self.rngs[split][i]
res = res[offset: offset+n_samples]
return list(map(lambda s: ((s.x, s.y), (s.meta, s.rng)), res)) | [
"def",
"perform_read",
"(",
"self",
",",
"split",
":",
"str",
",",
"index",
":",
"int",
",",
"n_samples",
":",
"int",
"=",
"1",
")",
":",
"# pylint: disable=R0914",
"mul",
"=",
"self",
".",
"multipliers",
"[",
"split",
"]",
"offset",
"=",
"int",
"(",
... | https://github.com/mme/vergeml/blob/3dc30ba4e0f3d038743b6d468860cbcf3681acc6/vergeml/loader.py#L368-L399 | |||
johntruckenbrodt/pyroSAR | efac51134ba42d20120b259f968afe5a4ddcc46a | pyroSAR/S1/linesimplify.py | python | createPoly | (xn, yn, xmax, ymax, plot=False) | return poly | create an OGR geometry from a sequence of indices
Parameters
----------
xn: numpy.ndarray
the x indices of the points
yn: numpy.ndarray
the y indices of the points
xmax: int or float
the maximum x index value
ymax: int or float
the maximum y index value
Returns
-------
ogr.Geometry | create an OGR geometry from a sequence of indices
Parameters
----------
xn: numpy.ndarray
the x indices of the points
yn: numpy.ndarray
the y indices of the points
xmax: int or float
the maximum x index value
ymax: int or float
the maximum y index value | [
"create",
"an",
"OGR",
"geometry",
"from",
"a",
"sequence",
"of",
"indices",
"Parameters",
"----------",
"xn",
":",
"numpy",
".",
"ndarray",
"the",
"x",
"indices",
"of",
"the",
"points",
"yn",
":",
"numpy",
".",
"ndarray",
"the",
"y",
"indices",
"of",
"t... | def createPoly(xn, yn, xmax, ymax, plot=False):
"""
create an OGR geometry from a sequence of indices
Parameters
----------
xn: numpy.ndarray
the x indices of the points
yn: numpy.ndarray
the y indices of the points
xmax: int or float
the maximum x index value
ymax: int or float
the maximum y index value
Returns
-------
ogr.Geometry
"""
ring = ogr.Geometry(ogr.wkbLinearRing)
ring.AddPoint_2D(0, 0)
for item in zip(xn, yn):
item = list(map(int, item))
if item != [0, 0] and item != [xmax, ymax]:
ring.AddPoint_2D(item[0], item[1])
ring.AddPoint_2D(xmax, ymax)
ring.AddPoint_2D(xmax, 0)
ring.CloseRings()
poly = ogr.Geometry(ogr.wkbPolygon)
poly.AddGeometry(ring)
if plot:
fig, ax = plt.subplots()
pts = ring.GetPoints()
arr = np.array(pts)
polygon = Polygon(arr, True)
p = PatchCollection([polygon], cmap=matplotlib.cm.jet, alpha=0.4)
ax.add_collection(p)
ax.autoscale_view()
plt.scatter(arr[:, 0], arr[:, 1], s=10, color='red')
plt.show()
return poly | [
"def",
"createPoly",
"(",
"xn",
",",
"yn",
",",
"xmax",
",",
"ymax",
",",
"plot",
"=",
"False",
")",
":",
"ring",
"=",
"ogr",
".",
"Geometry",
"(",
"ogr",
".",
"wkbLinearRing",
")",
"ring",
".",
"AddPoint_2D",
"(",
"0",
",",
"0",
")",
"for",
"ite... | https://github.com/johntruckenbrodt/pyroSAR/blob/efac51134ba42d20120b259f968afe5a4ddcc46a/pyroSAR/S1/linesimplify.py#L60-L100 | |
kuri65536/python-for-android | 26402a08fc46b09ef94e8d7a6bbc3a54ff9d0891 | python3-alpha/python-libs/pyxmpp2/message.py | python | Message.make_error_response | (self, cond) | return msg | Create error response for any non-error message stanza.
:Parameters:
- `cond`: error condition name, as defined in XMPP specification.
:return: new message stanza with the same "id" as self, "from" and
"to" attributes swapped, type="error" and containing <error />
element plus payload of `self`.
:returntype: `Message` | Create error response for any non-error message stanza. | [
"Create",
"error",
"response",
"for",
"any",
"non",
"-",
"error",
"message",
"stanza",
"."
] | def make_error_response(self, cond):
"""Create error response for any non-error message stanza.
:Parameters:
- `cond`: error condition name, as defined in XMPP specification.
:return: new message stanza with the same "id" as self, "from" and
"to" attributes swapped, type="error" and containing <error />
element plus payload of `self`.
:returntype: `Message`"""
if self.stanza_type == "error":
raise ValueError("Errors may not be generated in response"
" to errors")
msg = Message(stanza_type = "error", from_jid = self.to_jid,
to_jid = self.from_jid, stanza_id = self.stanza_id,
error_cond = cond,
subject = self._subject, body = self._body,
thread = self._thread)
if self._payload is None:
self.decode_payload()
for payload in self._payload:
msg.add_payload(payload.copy())
return msg | [
"def",
"make_error_response",
"(",
"self",
",",
"cond",
")",
":",
"if",
"self",
".",
"stanza_type",
"==",
"\"error\"",
":",
"raise",
"ValueError",
"(",
"\"Errors may not be generated in response\"",
"\" to errors\"",
")",
"msg",
"=",
"Message",
"(",
"stanza_type",
... | https://github.com/kuri65536/python-for-android/blob/26402a08fc46b09ef94e8d7a6bbc3a54ff9d0891/python3-alpha/python-libs/pyxmpp2/message.py#L183-L209 | |
KalleHallden/AutoTimer | 2d954216700c4930baa154e28dbddc34609af7ce | env/lib/python2.7/site-packages/setuptools/command/easy_install.py | python | CommandSpec._sys_executable | (cls) | return os.environ.get('__PYVENV_LAUNCHER__', _default) | [] | def _sys_executable(cls):
_default = os.path.normpath(sys.executable)
return os.environ.get('__PYVENV_LAUNCHER__', _default) | [
"def",
"_sys_executable",
"(",
"cls",
")",
":",
"_default",
"=",
"os",
".",
"path",
".",
"normpath",
"(",
"sys",
".",
"executable",
")",
"return",
"os",
".",
"environ",
".",
"get",
"(",
"'__PYVENV_LAUNCHER__'",
",",
"_default",
")"
] | https://github.com/KalleHallden/AutoTimer/blob/2d954216700c4930baa154e28dbddc34609af7ce/env/lib/python2.7/site-packages/setuptools/command/easy_install.py#L1984-L1986 | |||
IronLanguages/main | a949455434b1fda8c783289e897e78a9a0caabb5 | External.LCA_RESTRICTED/Languages/IronPython/repackage/pip/pip/req/req_uninstall.py | python | UninstallPathSet.commit | (self) | Remove temporary save dir: rollback will no longer be possible. | Remove temporary save dir: rollback will no longer be possible. | [
"Remove",
"temporary",
"save",
"dir",
":",
"rollback",
"will",
"no",
"longer",
"be",
"possible",
"."
] | def commit(self):
"""Remove temporary save dir: rollback will no longer be possible."""
if self.save_dir is not None:
rmtree(self.save_dir)
self.save_dir = None
self._moved_paths = [] | [
"def",
"commit",
"(",
"self",
")",
":",
"if",
"self",
".",
"save_dir",
"is",
"not",
"None",
":",
"rmtree",
"(",
"self",
".",
"save_dir",
")",
"self",
".",
"save_dir",
"=",
"None",
"self",
".",
"_moved_paths",
"=",
"[",
"]"
] | https://github.com/IronLanguages/main/blob/a949455434b1fda8c783289e897e78a9a0caabb5/External.LCA_RESTRICTED/Languages/IronPython/repackage/pip/pip/req/req_uninstall.py#L139-L144 | ||
lzzy12/python-aria-mirror-bot | 9f60efc04af9a7f2495684ebbf0fbc74c4bd39b6 | bot/helper/mirror_utils/upload_utils/gdriveTools.py | python | GoogleDriveHelper.speed | (self) | It calculates the average upload speed and returns it in bytes/seconds unit
:return: Upload speed in bytes/second | It calculates the average upload speed and returns it in bytes/seconds unit
:return: Upload speed in bytes/second | [
"It",
"calculates",
"the",
"average",
"upload",
"speed",
"and",
"returns",
"it",
"in",
"bytes",
"/",
"seconds",
"unit",
":",
"return",
":",
"Upload",
"speed",
"in",
"bytes",
"/",
"second"
] | def speed(self):
"""
It calculates the average upload speed and returns it in bytes/seconds unit
:return: Upload speed in bytes/second
"""
try:
return self.uploaded_bytes / self.total_time
except ZeroDivisionError:
return 0 | [
"def",
"speed",
"(",
"self",
")",
":",
"try",
":",
"return",
"self",
".",
"uploaded_bytes",
"/",
"self",
".",
"total_time",
"except",
"ZeroDivisionError",
":",
"return",
"0"
] | https://github.com/lzzy12/python-aria-mirror-bot/blob/9f60efc04af9a7f2495684ebbf0fbc74c4bd39b6/bot/helper/mirror_utils/upload_utils/gdriveTools.py#L59-L67 | ||
apache/tvm | 6eb4ed813ebcdcd9558f0906a1870db8302ff1e0 | python/tvm/contrib/utils.py | python | TempDirectory.set_keep_for_debug | (cls, set_to=True) | Keep temporary directories past program exit for debugging. | Keep temporary directories past program exit for debugging. | [
"Keep",
"temporary",
"directories",
"past",
"program",
"exit",
"for",
"debugging",
"."
] | def set_keep_for_debug(cls, set_to=True):
"""Keep temporary directories past program exit for debugging."""
old_keep_for_debug = cls._KEEP_FOR_DEBUG
try:
cls._KEEP_FOR_DEBUG = set_to
yield
finally:
cls._KEEP_FOR_DEBUG = old_keep_for_debug | [
"def",
"set_keep_for_debug",
"(",
"cls",
",",
"set_to",
"=",
"True",
")",
":",
"old_keep_for_debug",
"=",
"cls",
".",
"_KEEP_FOR_DEBUG",
"try",
":",
"cls",
".",
"_KEEP_FOR_DEBUG",
"=",
"set_to",
"yield",
"finally",
":",
"cls",
".",
"_KEEP_FOR_DEBUG",
"=",
"o... | https://github.com/apache/tvm/blob/6eb4ed813ebcdcd9558f0906a1870db8302ff1e0/python/tvm/contrib/utils.py#L87-L94 | ||
google-research/motion_imitation | d0e7b963c5a301984352d25a3ee0820266fa4218 | motion_imitation/robots/laikago.py | python | Laikago.ResetPose | (self, add_constraint) | [] | def ResetPose(self, add_constraint):
del add_constraint
for name in self._joint_name_to_id:
joint_id = self._joint_name_to_id[name]
self._pybullet_client.setJointMotorControl2(
bodyIndex=self.quadruped,
jointIndex=(joint_id),
controlMode=self._pybullet_client.VELOCITY_CONTROL,
targetVelocity=0,
force=0)
for name, i in zip(MOTOR_NAMES, range(len(MOTOR_NAMES))):
if "hip_motor_2_chassis_joint" in name:
angle = INIT_MOTOR_ANGLES[i] + HIP_JOINT_OFFSET
elif "upper_leg_2_hip_motor_joint" in name:
angle = INIT_MOTOR_ANGLES[i] + UPPER_LEG_JOINT_OFFSET
elif "lower_leg_2_upper_leg_joint" in name:
angle = INIT_MOTOR_ANGLES[i] + KNEE_JOINT_OFFSET
else:
raise ValueError("The name %s is not recognized as a motor joint." %
name)
self._pybullet_client.resetJointState(self.quadruped,
self._joint_name_to_id[name],
angle,
targetVelocity=0) | [
"def",
"ResetPose",
"(",
"self",
",",
"add_constraint",
")",
":",
"del",
"add_constraint",
"for",
"name",
"in",
"self",
".",
"_joint_name_to_id",
":",
"joint_id",
"=",
"self",
".",
"_joint_name_to_id",
"[",
"name",
"]",
"self",
".",
"_pybullet_client",
".",
... | https://github.com/google-research/motion_imitation/blob/d0e7b963c5a301984352d25a3ee0820266fa4218/motion_imitation/robots/laikago.py#L251-L274 | ||||
oracle/oci-python-sdk | 3c1604e4e212008fb6718e2f68cdb5ef71fd5793 | src/oci/database_management/sql_tuning_client.py | python | SqlTuningClient.get_sql_tuning_advisor_task_summary_report | (self, managed_database_id, sql_tuning_advisor_task_id, **kwargs) | Gets the summary report for the specific SQL Tuning Advisor task.
:param str managed_database_id: (required)
The `OCID`__ of the Managed Database.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param int sql_tuning_advisor_task_id: (required)
The SQL tuning task identifier. This is not the `OCID`__.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param str search_period: (optional)
How far back the API will search for begin and end exec id. Unused if neither exec ids nor time filter query params are supplied. This is applicable only for Auto SQL Tuning tasks.
Allowed values are: "LAST_24HR", "LAST_7DAY", "LAST_31DAY", "SINCE_LAST", "ALL"
:param datetime time_greater_than_or_equal_to: (optional)
The optional greater than or equal to query parameter to filter the timestamp. This is applicable only for Auto SQL Tuning tasks.
:param datetime time_less_than_or_equal_to: (optional)
The optional less than or equal to query parameter to filter the timestamp. This is applicable only for Auto SQL Tuning tasks.
:param int begin_exec_id_greater_than_or_equal_to: (optional)
The optional greater than or equal to filter on the execution ID related to a specific SQL Tuning Advisor task. This is applicable only for Auto SQL Tuning tasks.
:param int end_exec_id_less_than_or_equal_to: (optional)
The optional less than or equal to query parameter to filter on the execution ID related to a specific SQL Tuning Advisor task. This is applicable only for Auto SQL Tuning tasks.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.database_management.models.SqlTuningAdvisorTaskSummaryReport`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/databasemanagement/get_sql_tuning_advisor_task_summary_report.py.html>`__ to see an example of how to use get_sql_tuning_advisor_task_summary_report API. | Gets the summary report for the specific SQL Tuning Advisor task. | [
"Gets",
"the",
"summary",
"report",
"for",
"the",
"specific",
"SQL",
"Tuning",
"Advisor",
"task",
"."
] | def get_sql_tuning_advisor_task_summary_report(self, managed_database_id, sql_tuning_advisor_task_id, **kwargs):
"""
Gets the summary report for the specific SQL Tuning Advisor task.
:param str managed_database_id: (required)
The `OCID`__ of the Managed Database.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param int sql_tuning_advisor_task_id: (required)
The SQL tuning task identifier. This is not the `OCID`__.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param str search_period: (optional)
How far back the API will search for begin and end exec id. Unused if neither exec ids nor time filter query params are supplied. This is applicable only for Auto SQL Tuning tasks.
Allowed values are: "LAST_24HR", "LAST_7DAY", "LAST_31DAY", "SINCE_LAST", "ALL"
:param datetime time_greater_than_or_equal_to: (optional)
The optional greater than or equal to query parameter to filter the timestamp. This is applicable only for Auto SQL Tuning tasks.
:param datetime time_less_than_or_equal_to: (optional)
The optional less than or equal to query parameter to filter the timestamp. This is applicable only for Auto SQL Tuning tasks.
:param int begin_exec_id_greater_than_or_equal_to: (optional)
The optional greater than or equal to filter on the execution ID related to a specific SQL Tuning Advisor task. This is applicable only for Auto SQL Tuning tasks.
:param int end_exec_id_less_than_or_equal_to: (optional)
The optional less than or equal to query parameter to filter on the execution ID related to a specific SQL Tuning Advisor task. This is applicable only for Auto SQL Tuning tasks.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.database_management.models.SqlTuningAdvisorTaskSummaryReport`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/databasemanagement/get_sql_tuning_advisor_task_summary_report.py.html>`__ to see an example of how to use get_sql_tuning_advisor_task_summary_report API.
"""
resource_path = "/managedDatabases/{managedDatabaseId}/sqlTuningAdvisorTasks/{sqlTuningAdvisorTaskId}/summaryReport"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"search_period",
"time_greater_than_or_equal_to",
"time_less_than_or_equal_to",
"begin_exec_id_greater_than_or_equal_to",
"end_exec_id_less_than_or_equal_to",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_sql_tuning_advisor_task_summary_report got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"managedDatabaseId": managed_database_id,
"sqlTuningAdvisorTaskId": sql_tuning_advisor_task_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
if 'search_period' in kwargs:
search_period_allowed_values = ["LAST_24HR", "LAST_7DAY", "LAST_31DAY", "SINCE_LAST", "ALL"]
if kwargs['search_period'] not in search_period_allowed_values:
raise ValueError(
"Invalid value for `search_period`, must be one of {0}".format(search_period_allowed_values)
)
query_params = {
"searchPeriod": kwargs.get("search_period", missing),
"timeGreaterThanOrEqualTo": kwargs.get("time_greater_than_or_equal_to", missing),
"timeLessThanOrEqualTo": kwargs.get("time_less_than_or_equal_to", missing),
"beginExecIdGreaterThanOrEqualTo": kwargs.get("begin_exec_id_greater_than_or_equal_to", missing),
"endExecIdLessThanOrEqualTo": kwargs.get("end_exec_id_less_than_or_equal_to", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="SqlTuningAdvisorTaskSummaryReport")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="SqlTuningAdvisorTaskSummaryReport") | [
"def",
"get_sql_tuning_advisor_task_summary_report",
"(",
"self",
",",
"managed_database_id",
",",
"sql_tuning_advisor_task_id",
",",
"*",
"*",
"kwargs",
")",
":",
"resource_path",
"=",
"\"/managedDatabases/{managedDatabaseId}/sqlTuningAdvisorTasks/{sqlTuningAdvisorTaskId}/summaryRep... | https://github.com/oracle/oci-python-sdk/blob/3c1604e4e212008fb6718e2f68cdb5ef71fd5793/src/oci/database_management/sql_tuning_client.py#L510-L635 | ||
rubys/venus | 9de21094a8cf565bdfcf75688e121a5ad1f5397b | planet/vendor/compat_logging/config.py | python | stopListening | () | Stop the listening server which was created with a call to listen(). | Stop the listening server which was created with a call to listen(). | [
"Stop",
"the",
"listening",
"server",
"which",
"was",
"created",
"with",
"a",
"call",
"to",
"listen",
"()",
"."
] | def stopListening():
"""
Stop the listening server which was created with a call to listen().
"""
global _listener
if _listener:
logging._acquireLock()
_listener.abort = 1
_listener = None
logging._releaseLock() | [
"def",
"stopListening",
"(",
")",
":",
"global",
"_listener",
"if",
"_listener",
":",
"logging",
".",
"_acquireLock",
"(",
")",
"_listener",
".",
"abort",
"=",
"1",
"_listener",
"=",
"None",
"logging",
".",
"_releaseLock",
"(",
")"
] | https://github.com/rubys/venus/blob/9de21094a8cf565bdfcf75688e121a5ad1f5397b/planet/vendor/compat_logging/config.py#L290-L299 | ||
jonasrothfuss/ProMP | 93ae339e23dfc6e1133f9538f2c7cc0ccee89d19 | meta_policy_search/envs/base.py | python | MetaEnv.sample_tasks | (self, n_tasks) | Samples task of the meta-environment
Args:
n_tasks (int) : number of different meta-tasks needed
Returns:
tasks (list) : an (n_tasks) length list of tasks | Samples task of the meta-environment | [
"Samples",
"task",
"of",
"the",
"meta",
"-",
"environment"
] | def sample_tasks(self, n_tasks):
"""
Samples task of the meta-environment
Args:
n_tasks (int) : number of different meta-tasks needed
Returns:
tasks (list) : an (n_tasks) length list of tasks
"""
raise NotImplementedError | [
"def",
"sample_tasks",
"(",
"self",
",",
"n_tasks",
")",
":",
"raise",
"NotImplementedError"
] | https://github.com/jonasrothfuss/ProMP/blob/93ae339e23dfc6e1133f9538f2c7cc0ccee89d19/meta_policy_search/envs/base.py#L11-L21 | ||
vulscanteam/vulscan | 787397e267c4e6469522ee0abe55b3e98f968d4a | pocsuite/thirdparty/requests/utils.py | python | prepend_scheme_if_needed | (url, new_scheme) | return urlunparse((scheme, netloc, path, params, query, fragment)) | Given a URL that may or may not have a scheme, prepend the given scheme.
Does not replace a present scheme with the one provided as an argument. | Given a URL that may or may not have a scheme, prepend the given scheme.
Does not replace a present scheme with the one provided as an argument. | [
"Given",
"a",
"URL",
"that",
"may",
"or",
"may",
"not",
"have",
"a",
"scheme",
"prepend",
"the",
"given",
"scheme",
".",
"Does",
"not",
"replace",
"a",
"present",
"scheme",
"with",
"the",
"one",
"provided",
"as",
"an",
"argument",
"."
] | def prepend_scheme_if_needed(url, new_scheme):
'''Given a URL that may or may not have a scheme, prepend the given scheme.
Does not replace a present scheme with the one provided as an argument.'''
scheme, netloc, path, params, query, fragment = urlparse(url, new_scheme)
# urlparse is a finicky beast, and sometimes decides that there isn't a
# netloc present. Assume that it's being over-cautious, and switch netloc
# and path if urlparse decided there was no netloc.
if not netloc:
netloc, path = path, netloc
return urlunparse((scheme, netloc, path, params, query, fragment)) | [
"def",
"prepend_scheme_if_needed",
"(",
"url",
",",
"new_scheme",
")",
":",
"scheme",
",",
"netloc",
",",
"path",
",",
"params",
",",
"query",
",",
"fragment",
"=",
"urlparse",
"(",
"url",
",",
"new_scheme",
")",
"# urlparse is a finicky beast, and sometimes decid... | https://github.com/vulscanteam/vulscan/blob/787397e267c4e6469522ee0abe55b3e98f968d4a/pocsuite/thirdparty/requests/utils.py#L649-L660 | |
olivierkes/manuskript | 2b992e70c617325013e347b470246af66f6d2690 | manuskript/models/abstractItem.py | python | abstractItem.toXML | (self) | return ET.tostring(item) | Returns a string containing the item (and children) in XML.
By default, saves all attributes from self.enum and lastPath.
You can define in XMLExclude and XMLForce what you want to be
excluded or forcibly included. | Returns a string containing the item (and children) in XML.
By default, saves all attributes from self.enum and lastPath.
You can define in XMLExclude and XMLForce what you want to be
excluded or forcibly included. | [
"Returns",
"a",
"string",
"containing",
"the",
"item",
"(",
"and",
"children",
")",
"in",
"XML",
".",
"By",
"default",
"saves",
"all",
"attributes",
"from",
"self",
".",
"enum",
"and",
"lastPath",
".",
"You",
"can",
"define",
"in",
"XMLExclude",
"and",
"... | def toXML(self):
"""
Returns a string containing the item (and children) in XML.
By default, saves all attributes from self.enum and lastPath.
You can define in XMLExclude and XMLForce what you want to be
excluded or forcibly included.
"""
item = ET.Element(self.name)
for attrib in self.enum:
if attrib in self.XMLExclude:
continue
val = self.data(attrib)
if val or attrib in self.XMLForce:
item.set(attrib.name, self.cleanTextForXML(str(val)))
# Saving lastPath
item.set("lastPath", self._lastPath)
# Additional stuff for subclasses
item = self.toXMLProcessItem(item)
for i in self.childItems:
item.append(ET.XML(i.toXML()))
return ET.tostring(item) | [
"def",
"toXML",
"(",
"self",
")",
":",
"item",
"=",
"ET",
".",
"Element",
"(",
"self",
".",
"name",
")",
"for",
"attrib",
"in",
"self",
".",
"enum",
":",
"if",
"attrib",
"in",
"self",
".",
"XMLExclude",
":",
"continue",
"val",
"=",
"self",
".",
"... | https://github.com/olivierkes/manuskript/blob/2b992e70c617325013e347b470246af66f6d2690/manuskript/models/abstractItem.py#L277-L302 | |
edisonlz/fastor | 342078a18363ac41d3c6b1ab29dbdd44fdb0b7b3 | base/site-packages/tencentcloud/cbs/v20170312/models.py | python | DescribeSnapshotsRequest.__init__ | (self) | :param SnapshotIds: 要查询快照的ID列表。参数不支持同时指定`SnapshotIds`和`Filters`。
:type SnapshotIds: list of str
:param Filters: 过滤条件。参数不支持同时指定`SnapshotIds`和`Filters`。<br><li>snapshot-id - Array of String - 是否必填:否 -(过滤条件)按照快照的ID过滤。快照ID形如:`snap-11112222`。<br><li>snapshot-name - Array of String - 是否必填:否 -(过滤条件)按照快照名称过滤。<br><li>snapshot-state - Array of String - 是否必填:否 -(过滤条件)按照快照状态过滤。 (NORMAL:正常 | CREATING:创建中 | ROLLBACKING:回滚中。)<br><li>disk-usage - Array of String - 是否必填:否 -(过滤条件)按创建快照的云盘类型过滤。 (SYSTEM_DISK:代表系统盘 | DATA_DISK:代表数据盘。)<br><li>project-id - Array of String - 是否必填:否 -(过滤条件)按云硬盘所属项目ID过滤。<br><li>disk-id - Array of String - 是否必填:否 -(过滤条件)按照创建快照的云硬盘ID过滤。<br><li>zone - Array of String - 是否必填:否 -(过滤条件)按照[可用区](/document/api/213/9452#zone)过滤。<br><li>encrypt - Array of String - 是否必填:否 -(过滤条件)按是否加密盘快照过滤。 (TRUE:表示加密盘快照 | FALSE:表示非加密盘快照。)
:type Filters: list of Filter
:param Offset: 偏移量,默认为0。关于`Offset`的更进一步介绍请参考API[简介](/document/product/362/15633)中的相关小节。
:type Offset: int
:param Limit: 返回数量,默认为20,最大值为100。关于`Limit`的更进一步介绍请参考 API [简介](/document/product/362/15633)中的相关小节。
:type Limit: int
:param Order: 输出云盘列表的排列顺序。取值范围:<br><li>ASC:升序排列<br><li>DESC:降序排列。
:type Order: str
:param OrderField: 快照列表排序的依据字段。取值范围:<br><li>CREATE_TIME:依据快照的创建时间排序<br>默认按创建时间排序。
:type OrderField: str | :param SnapshotIds: 要查询快照的ID列表。参数不支持同时指定`SnapshotIds`和`Filters`。
:type SnapshotIds: list of str
:param Filters: 过滤条件。参数不支持同时指定`SnapshotIds`和`Filters`。<br><li>snapshot-id - Array of String - 是否必填:否 -(过滤条件)按照快照的ID过滤。快照ID形如:`snap-11112222`。<br><li>snapshot-name - Array of String - 是否必填:否 -(过滤条件)按照快照名称过滤。<br><li>snapshot-state - Array of String - 是否必填:否 -(过滤条件)按照快照状态过滤。 (NORMAL:正常 | CREATING:创建中 | ROLLBACKING:回滚中。)<br><li>disk-usage - Array of String - 是否必填:否 -(过滤条件)按创建快照的云盘类型过滤。 (SYSTEM_DISK:代表系统盘 | DATA_DISK:代表数据盘。)<br><li>project-id - Array of String - 是否必填:否 -(过滤条件)按云硬盘所属项目ID过滤。<br><li>disk-id - Array of String - 是否必填:否 -(过滤条件)按照创建快照的云硬盘ID过滤。<br><li>zone - Array of String - 是否必填:否 -(过滤条件)按照[可用区](/document/api/213/9452#zone)过滤。<br><li>encrypt - Array of String - 是否必填:否 -(过滤条件)按是否加密盘快照过滤。 (TRUE:表示加密盘快照 | FALSE:表示非加密盘快照。)
:type Filters: list of Filter
:param Offset: 偏移量,默认为0。关于`Offset`的更进一步介绍请参考API[简介](/document/product/362/15633)中的相关小节。
:type Offset: int
:param Limit: 返回数量,默认为20,最大值为100。关于`Limit`的更进一步介绍请参考 API [简介](/document/product/362/15633)中的相关小节。
:type Limit: int
:param Order: 输出云盘列表的排列顺序。取值范围:<br><li>ASC:升序排列<br><li>DESC:降序排列。
:type Order: str
:param OrderField: 快照列表排序的依据字段。取值范围:<br><li>CREATE_TIME:依据快照的创建时间排序<br>默认按创建时间排序。
:type OrderField: str | [
":",
"param",
"SnapshotIds",
":",
"要查询快照的ID列表。参数不支持同时指定",
"SnapshotIds",
"和",
"Filters",
"。",
":",
"type",
"SnapshotIds",
":",
"list",
"of",
"str",
":",
"param",
"Filters",
":",
"过滤条件。参数不支持同时指定",
"SnapshotIds",
"和",
"Filters",
"。<br",
">",
"<li",
">",
"snapsho... | def __init__(self):
"""
:param SnapshotIds: 要查询快照的ID列表。参数不支持同时指定`SnapshotIds`和`Filters`。
:type SnapshotIds: list of str
:param Filters: 过滤条件。参数不支持同时指定`SnapshotIds`和`Filters`。<br><li>snapshot-id - Array of String - 是否必填:否 -(过滤条件)按照快照的ID过滤。快照ID形如:`snap-11112222`。<br><li>snapshot-name - Array of String - 是否必填:否 -(过滤条件)按照快照名称过滤。<br><li>snapshot-state - Array of String - 是否必填:否 -(过滤条件)按照快照状态过滤。 (NORMAL:正常 | CREATING:创建中 | ROLLBACKING:回滚中。)<br><li>disk-usage - Array of String - 是否必填:否 -(过滤条件)按创建快照的云盘类型过滤。 (SYSTEM_DISK:代表系统盘 | DATA_DISK:代表数据盘。)<br><li>project-id - Array of String - 是否必填:否 -(过滤条件)按云硬盘所属项目ID过滤。<br><li>disk-id - Array of String - 是否必填:否 -(过滤条件)按照创建快照的云硬盘ID过滤。<br><li>zone - Array of String - 是否必填:否 -(过滤条件)按照[可用区](/document/api/213/9452#zone)过滤。<br><li>encrypt - Array of String - 是否必填:否 -(过滤条件)按是否加密盘快照过滤。 (TRUE:表示加密盘快照 | FALSE:表示非加密盘快照。)
:type Filters: list of Filter
:param Offset: 偏移量,默认为0。关于`Offset`的更进一步介绍请参考API[简介](/document/product/362/15633)中的相关小节。
:type Offset: int
:param Limit: 返回数量,默认为20,最大值为100。关于`Limit`的更进一步介绍请参考 API [简介](/document/product/362/15633)中的相关小节。
:type Limit: int
:param Order: 输出云盘列表的排列顺序。取值范围:<br><li>ASC:升序排列<br><li>DESC:降序排列。
:type Order: str
:param OrderField: 快照列表排序的依据字段。取值范围:<br><li>CREATE_TIME:依据快照的创建时间排序<br>默认按创建时间排序。
:type OrderField: str
"""
self.SnapshotIds = None
self.Filters = None
self.Offset = None
self.Limit = None
self.Order = None
self.OrderField = None | [
"def",
"__init__",
"(",
"self",
")",
":",
"self",
".",
"SnapshotIds",
"=",
"None",
"self",
".",
"Filters",
"=",
"None",
"self",
".",
"Offset",
"=",
"None",
"self",
".",
"Limit",
"=",
"None",
"self",
".",
"Order",
"=",
"None",
"self",
".",
"OrderField... | https://github.com/edisonlz/fastor/blob/342078a18363ac41d3c6b1ab29dbdd44fdb0b7b3/base/site-packages/tencentcloud/cbs/v20170312/models.py#L456-L476 | ||
TencentCloud/tencentcloud-sdk-python | 3677fd1cdc8c5fd626ce001c13fd3b59d1f279d2 | tencentcloud/dc/v20180410/models.py | python | DescribeInternetAddressResponse.__init__ | (self) | r"""
:param TotalCount: 互联网公网地址数量
:type TotalCount: int
:param Subnets: 互联网公网地址列表
注意:此字段可能返回 null,表示取不到有效值。
:type Subnets: list of InternetAddressDetail
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str | r"""
:param TotalCount: 互联网公网地址数量
:type TotalCount: int
:param Subnets: 互联网公网地址列表
注意:此字段可能返回 null,表示取不到有效值。
:type Subnets: list of InternetAddressDetail
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str | [
"r",
":",
"param",
"TotalCount",
":",
"互联网公网地址数量",
":",
"type",
"TotalCount",
":",
"int",
":",
"param",
"Subnets",
":",
"互联网公网地址列表",
"注意:此字段可能返回",
"null,表示取不到有效值。",
":",
"type",
"Subnets",
":",
"list",
"of",
"InternetAddressDetail",
":",
"param",
"RequestId",
... | def __init__(self):
r"""
:param TotalCount: 互联网公网地址数量
:type TotalCount: int
:param Subnets: 互联网公网地址列表
注意:此字段可能返回 null,表示取不到有效值。
:type Subnets: list of InternetAddressDetail
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.Subnets = None
self.RequestId = None | [
"def",
"__init__",
"(",
"self",
")",
":",
"self",
".",
"TotalCount",
"=",
"None",
"self",
".",
"Subnets",
"=",
"None",
"self",
".",
"RequestId",
"=",
"None"
] | https://github.com/TencentCloud/tencentcloud-sdk-python/blob/3677fd1cdc8c5fd626ce001c13fd3b59d1f279d2/tencentcloud/dc/v20180410/models.py#L973-L985 | ||
openscopeproject/InteractiveHtmlBom | 5e976ccd8d79dd2c8d7b15c788554bdf500427b6 | InteractiveHtmlBom/ecad/svgpath.py | python | Arc.bbox | (self) | return min(xtrema), max(xtrema), min(ytrema), max(ytrema) | returns a bounding box for the segment in the form
(xmin, xmax, ymin, ymax). | returns a bounding box for the segment in the form
(xmin, xmax, ymin, ymax). | [
"returns",
"a",
"bounding",
"box",
"for",
"the",
"segment",
"in",
"the",
"form",
"(",
"xmin",
"xmax",
"ymin",
"ymax",
")",
"."
] | def bbox(self):
"""returns a bounding box for the segment in the form
(xmin, xmax, ymin, ymax)."""
# a(t) = radians(self.theta + self.delta*t)
# = (2*pi/360)*(self.theta + self.delta*t)
# x'=0: ~~~~~~~~~
# -rx*cos(phi)*sin(a(t)) = ry*sin(phi)*cos(a(t))
# -(rx/ry)*cot(phi)*tan(a(t)) = 1
# a(t) = arctan(-(ry/rx)tan(phi)) + pi*k === atan_x
# y'=0: ~~~~~~~~~~
# rx*sin(phi)*sin(a(t)) = ry*cos(phi)*cos(a(t))
# (rx/ry)*tan(phi)*tan(a(t)) = 1
# a(t) = arctan((ry/rx)*cot(phi))
# atanres = arctan((ry/rx)*cot(phi)) === atan_y
# ~~~~~~~~
# (2*pi/360)*(self.theta + self.delta*t) = atanres + pi*k
# Therefore, for both x' and y', we have...
# t = ((atan_{x/y} + pi*k)*(360/(2*pi)) - self.theta)/self.delta
# for all k s.t. 0 < t < 1
from math import atan, tan
if cos(self.phi) == 0:
atan_x = pi / 2
atan_y = 0
elif sin(self.phi) == 0:
atan_x = 0
atan_y = pi / 2
else:
rx, ry = self.radius.real, self.radius.imag
atan_x = atan(-(ry / rx) * tan(self.phi))
atan_y = atan((ry / rx) / tan(self.phi))
def angle_inv(ang, q): # inverse of angle from Arc.derivative()
return ((ang + pi * q) * (360 / (2 * pi)) - self.theta) / self.delta
xtrema = [self.start.real, self.end.real]
ytrema = [self.start.imag, self.end.imag]
for k in range(-4, 5):
tx = angle_inv(atan_x, k)
ty = angle_inv(atan_y, k)
if 0 <= tx <= 1:
xtrema.append(self.point(tx).real)
if 0 <= ty <= 1:
ytrema.append(self.point(ty).imag)
return min(xtrema), max(xtrema), min(ytrema), max(ytrema) | [
"def",
"bbox",
"(",
"self",
")",
":",
"# a(t) = radians(self.theta + self.delta*t)",
"# = (2*pi/360)*(self.theta + self.delta*t)",
"# x'=0: ~~~~~~~~~",
"# -rx*cos(phi)*sin(a(t)) = ry*sin(phi)*cos(a(t))",
"# -(rx/ry)*cot(phi)*tan(a(t)) = 1",
"# a(t) = arctan(-(ry/rx)tan(phi)) + pi*k === at... | https://github.com/openscopeproject/InteractiveHtmlBom/blob/5e976ccd8d79dd2c8d7b15c788554bdf500427b6/InteractiveHtmlBom/ecad/svgpath.py#L293-L338 | |
mutpy/mutpy | 5c8b3ca0d365083a4da8333f7fce8783114371fa | mutpy/utils.py | python | StdoutManager.__enter__ | (self) | [] | def __enter__(self):
if self.disable:
self.original_stdout = sys.stdout
sys.stdout = StringIO() | [
"def",
"__enter__",
"(",
"self",
")",
":",
"if",
"self",
".",
"disable",
":",
"self",
".",
"original_stdout",
"=",
"sys",
".",
"stdout",
"sys",
".",
"stdout",
"=",
"StringIO",
"(",
")"
] | https://github.com/mutpy/mutpy/blob/5c8b3ca0d365083a4da8333f7fce8783114371fa/mutpy/utils.py#L265-L268 | ||||
dimagi/commcare-hq | d67ff1d3b4c51fa050c19e60c3253a79d3452a39 | corehq/apps/smsforms/util.py | python | critical_section_for_smsforms_sessions | (contact_id) | return CriticalSection(['smsforms-sessions-lock-for-contact-%s' % contact_id], timeout=5 * 60) | [] | def critical_section_for_smsforms_sessions(contact_id):
return CriticalSection(['smsforms-sessions-lock-for-contact-%s' % contact_id], timeout=5 * 60) | [
"def",
"critical_section_for_smsforms_sessions",
"(",
"contact_id",
")",
":",
"return",
"CriticalSection",
"(",
"[",
"'smsforms-sessions-lock-for-contact-%s'",
"%",
"contact_id",
"]",
",",
"timeout",
"=",
"5",
"*",
"60",
")"
] | https://github.com/dimagi/commcare-hq/blob/d67ff1d3b4c51fa050c19e60c3253a79d3452a39/corehq/apps/smsforms/util.py#L42-L43 | |||
pyparallel/pyparallel | 11e8c6072d48c8f13641925d17b147bf36ee0ba3 | Lib/mailbox.py | python | Mailbox.iteritems | (self) | Return an iterator over (key, message) tuples. | Return an iterator over (key, message) tuples. | [
"Return",
"an",
"iterator",
"over",
"(",
"key",
"message",
")",
"tuples",
"."
] | def iteritems(self):
"""Return an iterator over (key, message) tuples."""
for key in self.keys():
try:
value = self[key]
except KeyError:
continue
yield (key, value) | [
"def",
"iteritems",
"(",
"self",
")",
":",
"for",
"key",
"in",
"self",
".",
"keys",
"(",
")",
":",
"try",
":",
"value",
"=",
"self",
"[",
"key",
"]",
"except",
"KeyError",
":",
"continue",
"yield",
"(",
"key",
",",
"value",
")"
] | https://github.com/pyparallel/pyparallel/blob/11e8c6072d48c8f13641925d17b147bf36ee0ba3/Lib/mailbox.py#L124-L131 | ||
linxid/Machine_Learning_Study_Path | 558e82d13237114bbb8152483977806fc0c222af | Machine Learning In Action/Chapter4-NaiveBayes/venv/Lib/site-packages/pip/_vendor/packaging/_structures.py | python | NegativeInfinity.__gt__ | (self, other) | return False | [] | def __gt__(self, other):
return False | [
"def",
"__gt__",
"(",
"self",
",",
"other",
")",
":",
"return",
"False"
] | https://github.com/linxid/Machine_Learning_Study_Path/blob/558e82d13237114bbb8152483977806fc0c222af/Machine Learning In Action/Chapter4-NaiveBayes/venv/Lib/site-packages/pip/_vendor/packaging/_structures.py#L59-L60 | |||
kuri65536/python-for-android | 26402a08fc46b09ef94e8d7a6bbc3a54ff9d0891 | python-modules/twisted/twisted/scripts/trial.py | python | Options.opt_testmodule | (self, filename) | Filename to grep for test cases (-*- test-case-name) | Filename to grep for test cases (-*- test-case-name) | [
"Filename",
"to",
"grep",
"for",
"test",
"cases",
"(",
"-",
"*",
"-",
"test",
"-",
"case",
"-",
"name",
")"
] | def opt_testmodule(self, filename):
"Filename to grep for test cases (-*- test-case-name)"
# If the filename passed to this parameter looks like a test module
# we just add that to the test suite.
#
# If not, we inspect it for an Emacs buffer local variable called
# 'test-case-name'. If that variable is declared, we try to add its
# value to the test suite as a module.
#
# This parameter allows automated processes (like Buildbot) to pass
# a list of files to Trial with the general expectation of "these files,
# whatever they are, will get tested"
if not os.path.isfile(filename):
sys.stderr.write("File %r doesn't exist\n" % (filename,))
return
filename = os.path.abspath(filename)
if isTestFile(filename):
self['tests'].add(filename)
else:
self['tests'].update(getTestModules(filename)) | [
"def",
"opt_testmodule",
"(",
"self",
",",
"filename",
")",
":",
"# If the filename passed to this parameter looks like a test module",
"# we just add that to the test suite.",
"#",
"# If not, we inspect it for an Emacs buffer local variable called",
"# 'test-case-name'. If that variable is... | https://github.com/kuri65536/python-for-android/blob/26402a08fc46b09ef94e8d7a6bbc3a54ff9d0891/python-modules/twisted/twisted/scripts/trial.py#L165-L184 | ||
JimmXinu/FanFicFare | bc149a2deb2636320fe50a3e374af6eef8f61889 | included_dependencies/urllib3/util/ssltransport.py | python | SSLTransport.selected_npn_protocol | (self) | return self.sslobj.selected_npn_protocol() | [] | def selected_npn_protocol(self):
return self.sslobj.selected_npn_protocol() | [
"def",
"selected_npn_protocol",
"(",
"self",
")",
":",
"return",
"self",
".",
"sslobj",
".",
"selected_npn_protocol",
"(",
")"
] | https://github.com/JimmXinu/FanFicFare/blob/bc149a2deb2636320fe50a3e374af6eef8f61889/included_dependencies/urllib3/util/ssltransport.py#L168-L169 | |||
fake-name/ReadableWebProxy | ed5c7abe38706acc2684a1e6cd80242a03c5f010 | WebMirror/management/rss_parser_funcs/feed_parse_extractWhocaresnovelsCom.py | python | extractWhocaresnovelsCom | (item) | return False | Parser for 'whocaresnovels.com' | Parser for 'whocaresnovels.com' | [
"Parser",
"for",
"whocaresnovels",
".",
"com"
] | def extractWhocaresnovelsCom(item):
'''
Parser for 'whocaresnovels.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False | [
"def",
"extractWhocaresnovelsCom",
"(",
"item",
")",
":",
"vol",
",",
"chp",
",",
"frag",
",",
"postfix",
"=",
"extractVolChapterFragmentPostfix",
"(",
"item",
"[",
"'title'",
"]",
")",
"if",
"not",
"(",
"chp",
"or",
"vol",
")",
"or",
"\"preview\"",
"in",
... | https://github.com/fake-name/ReadableWebProxy/blob/ed5c7abe38706acc2684a1e6cd80242a03c5f010/WebMirror/management/rss_parser_funcs/feed_parse_extractWhocaresnovelsCom.py#L2-L21 | |
debian-calibre/calibre | 020fc81d3936a64b2ac51459ecb796666ab6a051 | src/calibre/ebooks/rtf2xml/group_borders.py | python | GroupBorders.__init__ | (self,
in_file,
bug_handler,
copy=None,
run_level=1,
wrap=0,
) | Required:
'file'
Optional:
'copy'-- whether to make a copy of result for debugging
'temp_dir' --where to output temporary results (default is
directory from which the script is run.)
Returns:
nothing | Required:
'file'
Optional:
'copy'-- whether to make a copy of result for debugging
'temp_dir' --where to output temporary results (default is
directory from which the script is run.)
Returns:
nothing | [
"Required",
":",
"file",
"Optional",
":",
"copy",
"--",
"whether",
"to",
"make",
"a",
"copy",
"of",
"result",
"for",
"debugging",
"temp_dir",
"--",
"where",
"to",
"output",
"temporary",
"results",
"(",
"default",
"is",
"directory",
"from",
"which",
"the",
... | def __init__(self,
in_file,
bug_handler,
copy=None,
run_level=1,
wrap=0,
):
"""
Required:
'file'
Optional:
'copy'-- whether to make a copy of result for debugging
'temp_dir' --where to output temporary results (default is
directory from which the script is run.)
Returns:
nothing
"""
self.__file = in_file
self.__bug_handler = bug_handler
self.__copy = copy
self.__run_level = run_level
self.__write_to = better_mktemp()
self.__wrap = wrap | [
"def",
"__init__",
"(",
"self",
",",
"in_file",
",",
"bug_handler",
",",
"copy",
"=",
"None",
",",
"run_level",
"=",
"1",
",",
"wrap",
"=",
"0",
",",
")",
":",
"self",
".",
"__file",
"=",
"in_file",
"self",
".",
"__bug_handler",
"=",
"bug_handler",
"... | https://github.com/debian-calibre/calibre/blob/020fc81d3936a64b2ac51459ecb796666ab6a051/src/calibre/ebooks/rtf2xml/group_borders.py#L27-L49 | ||
LabPy/lantz | 3e878e3f765a4295b0089d04e241d4beb7b8a65b | lantz/drivers/legacy/rgblasersystems/minilasevo.py | python | MiniLasEvo.enabled | (self) | return self.query('O?') | Method for turning on the laser | Method for turning on the laser | [
"Method",
"for",
"turning",
"on",
"the",
"laser"
] | def enabled(self):
"""Method for turning on the laser
"""
return self.query('O?') | [
"def",
"enabled",
"(",
"self",
")",
":",
"return",
"self",
".",
"query",
"(",
"'O?'",
")"
] | https://github.com/LabPy/lantz/blob/3e878e3f765a4295b0089d04e241d4beb7b8a65b/lantz/drivers/legacy/rgblasersystems/minilasevo.py#L166-L169 | |
KalleHallden/AutoTimer | 2d954216700c4930baa154e28dbddc34609af7ce | env/lib/python2.7/site-packages/pkg_resources/__init__.py | python | ResourceManager.resource_exists | (self, package_or_requirement, resource_name) | return get_provider(package_or_requirement).has_resource(resource_name) | Does the named resource exist? | Does the named resource exist? | [
"Does",
"the",
"named",
"resource",
"exist?"
] | def resource_exists(self, package_or_requirement, resource_name):
"""Does the named resource exist?"""
return get_provider(package_or_requirement).has_resource(resource_name) | [
"def",
"resource_exists",
"(",
"self",
",",
"package_or_requirement",
",",
"resource_name",
")",
":",
"return",
"get_provider",
"(",
"package_or_requirement",
")",
".",
"has_resource",
"(",
"resource_name",
")"
] | https://github.com/KalleHallden/AutoTimer/blob/2d954216700c4930baa154e28dbddc34609af7ce/env/lib/python2.7/site-packages/pkg_resources/__init__.py#L1132-L1134 | |
Project-MONAI/MONAI | 83f8b06372a3803ebe9281300cb794a1f3395018 | monai/transforms/utility/array.py | python | AddChannel.__call__ | (self, img: NdarrayOrTensor) | return img[None] | Apply the transform to `img`. | Apply the transform to `img`. | [
"Apply",
"the",
"transform",
"to",
"img",
"."
] | def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor:
"""
Apply the transform to `img`.
"""
return img[None] | [
"def",
"__call__",
"(",
"self",
",",
"img",
":",
"NdarrayOrTensor",
")",
"->",
"NdarrayOrTensor",
":",
"return",
"img",
"[",
"None",
"]"
] | https://github.com/Project-MONAI/MONAI/blob/83f8b06372a3803ebe9281300cb794a1f3395018/monai/transforms/utility/array.py#L182-L186 | |
rowliny/DiffHelper | ab3a96f58f9579d0023aed9ebd785f4edf26f8af | Tool/SitePackages/joblib/externals/loky/backend/fork_exec.py | python | close_fds | (keep_fds) | Close all the file descriptors except those in keep_fds. | Close all the file descriptors except those in keep_fds. | [
"Close",
"all",
"the",
"file",
"descriptors",
"except",
"those",
"in",
"keep_fds",
"."
] | def close_fds(keep_fds): # pragma: no cover
"""Close all the file descriptors except those in keep_fds."""
# Make sure to keep stdout and stderr open for logging purpose
keep_fds = set(keep_fds).union([1, 2])
# We try to retrieve all the open fds
try:
open_fds = set(int(fd) for fd in os.listdir('/proc/self/fd'))
except FileNotFoundError:
import resource
max_nfds = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
open_fds = set(fd for fd in range(3, max_nfds))
open_fds.add(0)
for i in open_fds - keep_fds:
try:
os.close(i)
except OSError:
pass | [
"def",
"close_fds",
"(",
"keep_fds",
")",
":",
"# pragma: no cover",
"# Make sure to keep stdout and stderr open for logging purpose",
"keep_fds",
"=",
"set",
"(",
"keep_fds",
")",
".",
"union",
"(",
"[",
"1",
",",
"2",
"]",
")",
"# We try to retrieve all the open fds",... | https://github.com/rowliny/DiffHelper/blob/ab3a96f58f9579d0023aed9ebd785f4edf26f8af/Tool/SitePackages/joblib/externals/loky/backend/fork_exec.py#L14-L33 | ||
pwnieexpress/pwn_plug_sources | 1a23324f5dc2c3de20f9c810269b6a29b2758cad | src/waffit/libs/BeautifulSoup.py | python | Tag.__nonzero__ | (self) | return True | A tag is non-None even if it has no contents. | A tag is non-None even if it has no contents. | [
"A",
"tag",
"is",
"non",
"-",
"None",
"even",
"if",
"it",
"has",
"no",
"contents",
"."
] | def __nonzero__(self):
"A tag is non-None even if it has no contents."
return True | [
"def",
"__nonzero__",
"(",
"self",
")",
":",
"return",
"True"
] | https://github.com/pwnieexpress/pwn_plug_sources/blob/1a23324f5dc2c3de20f9c810269b6a29b2758cad/src/waffit/libs/BeautifulSoup.py#L549-L551 | |
JPaulMora/Pyrit | f0f1913c645b445dd391fb047b812b5ba511782c | cpyrit/util.py | python | SortedCollection.count | (self, item) | return self._items[i:j].count(item) | Return number of occurrences of item | Return number of occurrences of item | [
"Return",
"number",
"of",
"occurrences",
"of",
"item"
] | def count(self, item):
'Return number of occurrences of item'
k = self._key(item)
i = bisect.bisect_left(self._keys, k)
j = bisect.bisect_right(self._keys, k)
return self._items[i:j].count(item) | [
"def",
"count",
"(",
"self",
",",
"item",
")",
":",
"k",
"=",
"self",
".",
"_key",
"(",
"item",
")",
"i",
"=",
"bisect",
".",
"bisect_left",
"(",
"self",
".",
"_keys",
",",
"k",
")",
"j",
"=",
"bisect",
".",
"bisect_right",
"(",
"self",
".",
"_... | https://github.com/JPaulMora/Pyrit/blob/f0f1913c645b445dd391fb047b812b5ba511782c/cpyrit/util.py#L181-L186 | |
prody/ProDy | b24bbf58aa8fffe463c8548ae50e3955910e5b7f | prody/database/cath.py | python | CATHDB.update | (self, source=None) | Update data and files from CATH. | Update data and files from CATH. | [
"Update",
"data",
"and",
"files",
"from",
"CATH",
"."
] | def update(self, source=None):
"""Update data and files from CATH."""
self._source = source = self._source or source
self.reset()
if source is None:
return
LOGGER.timeit('_cath_update')
type_ = 0
tree = None
if isinstance(source, str):
if isfile(source):
type_ = 1
elif isURL(source):
type_ = 0
else:
type_ = 2
elif hasattr(source, 'read'):
type_ = 1
else:
raise TypeError('source must be either an url, file name, file handle, '
'or text in xml format')
if type_ == 0:
LOGGER.info('Fetching data from CATH...')
self._fetch()
LOGGER.info('Parsing CATH files...')
self._parse()
elif type_ == 1:
LOGGER.info('Reading data from the local xml file...')
tree = ET.parse(source)
elif type_ == 2:
LOGGER.info('Parsing input string...')
tree = ET.fromstring(source)
# post-processing
if type_ > 0:
root = tree.getroot()
nodes = root.iter()
# remove prefix from node tags
for node in nodes:
node.tag = node.tag.lstrip('id.')
# convert int to str
length_nodes = root.findall('.//*[@length]')
for node in length_nodes:
node.attrib['length'] = int(node.attrib['length'])
copy2(root, self.root)
self._update_map()
LOGGER.report('CATH local database built in %.2fs.', '_cath_update') | [
"def",
"update",
"(",
"self",
",",
"source",
"=",
"None",
")",
":",
"self",
".",
"_source",
"=",
"source",
"=",
"self",
".",
"_source",
"or",
"source",
"self",
".",
"reset",
"(",
")",
"if",
"source",
"is",
"None",
":",
"return",
"LOGGER",
".",
"tim... | https://github.com/prody/ProDy/blob/b24bbf58aa8fffe463c8548ae50e3955910e5b7f/prody/database/cath.py#L305-L360 | ||
llSourcell/AI_Artist | 3038c06c2e389b9c919c881c9a169efe2fd7810e | lib/python2.7/site-packages/pip/_vendor/html5lib/tokenizer.py | python | HTMLTokenizer.doctypeState | (self) | return True | [] | def doctypeState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeDoctypeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-doctype-name-but-got-eof"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"need-space-after-doctype"})
self.stream.unget(data)
self.state = self.beforeDoctypeNameState
return True | [
"def",
"doctypeState",
"(",
"self",
")",
":",
"data",
"=",
"self",
".",
"stream",
".",
"char",
"(",
")",
"if",
"data",
"in",
"spaceCharacters",
":",
"self",
".",
"state",
"=",
"self",
".",
"beforeDoctypeNameState",
"elif",
"data",
"is",
"EOF",
":",
"se... | https://github.com/llSourcell/AI_Artist/blob/3038c06c2e389b9c919c881c9a169efe2fd7810e/lib/python2.7/site-packages/pip/_vendor/html5lib/tokenizer.py#L1301-L1316 | |||
F8LEFT/DecLLVM | d38e45e3d0dd35634adae1d0cf7f96f3bd96e74c | python/idaapi.py | python | vloc_visitor_t.visit_location | (self, *args) | return _idaapi.vloc_visitor_t_visit_location(self, *args) | visit_location(self, v, off, size) -> int | visit_location(self, v, off, size) -> int | [
"visit_location",
"(",
"self",
"v",
"off",
"size",
")",
"-",
">",
"int"
] | def visit_location(self, *args):
"""
visit_location(self, v, off, size) -> int
"""
return _idaapi.vloc_visitor_t_visit_location(self, *args) | [
"def",
"visit_location",
"(",
"self",
",",
"*",
"args",
")",
":",
"return",
"_idaapi",
".",
"vloc_visitor_t_visit_location",
"(",
"self",
",",
"*",
"args",
")"
] | https://github.com/F8LEFT/DecLLVM/blob/d38e45e3d0dd35634adae1d0cf7f96f3bd96e74c/python/idaapi.py#L32946-L32950 | |
suavecode/SUAVE | 4f83c467c5662b6cc611ce2ab6c0bdd25fd5c0a5 | trunk/SUAVE/Methods/Missions/Segments/Climb/Constant_Throttle_Constant_Speed.py | python | update_differentials_altitude | (segment) | return | On each iteration creates the differentials and integration funcitons from knowns about the problem. Sets the time at each point. Must return in dimensional time, with t[0] = 0
Assumptions:
Constant throttle setting, with a constant true airspeed.
Source:
N/A
Inputs:
segment.climb_angle [radians]
state.conditions.frames.inertial.velocity_vector [meter/second]
segment.altitude_start [meters]
segment.altitude_end [meters]
Outputs:
state.conditions.frames.inertial.time [seconds]
conditions.frames.inertial.position_vector [meters]
conditions.freestream.altitude [meters]
Properties Used:
N/A | On each iteration creates the differentials and integration funcitons from knowns about the problem. Sets the time at each point. Must return in dimensional time, with t[0] = 0
Assumptions:
Constant throttle setting, with a constant true airspeed. | [
"On",
"each",
"iteration",
"creates",
"the",
"differentials",
"and",
"integration",
"funcitons",
"from",
"knowns",
"about",
"the",
"problem",
".",
"Sets",
"the",
"time",
"at",
"each",
"point",
".",
"Must",
"return",
"in",
"dimensional",
"time",
"with",
"t",
... | def update_differentials_altitude(segment):
"""On each iteration creates the differentials and integration funcitons from knowns about the problem. Sets the time at each point. Must return in dimensional time, with t[0] = 0
Assumptions:
Constant throttle setting, with a constant true airspeed.
Source:
N/A
Inputs:
segment.climb_angle [radians]
state.conditions.frames.inertial.velocity_vector [meter/second]
segment.altitude_start [meters]
segment.altitude_end [meters]
Outputs:
state.conditions.frames.inertial.time [seconds]
conditions.frames.inertial.position_vector [meters]
conditions.freestream.altitude [meters]
Properties Used:
N/A
"""
# unpack
t = segment.state.numerics.dimensionless.control_points
D = segment.state.numerics.dimensionless.differentiate
I = segment.state.numerics.dimensionless.integrate
# Unpack segment initials
alt0 = segment.altitude_start
altf = segment.altitude_end
conditions = segment.state.conditions
v = segment.state.conditions.frames.inertial.velocity_vector
# check for initial altitude
if alt0 is None:
if not segment.state.initials: raise AttributeError('initial altitude not set')
alt0 = -1.0 *segment.state.initials.conditions.frames.inertial.position_vector[-1,2]
# get overall time step
vz = -v[:,2,None] # Inertial velocity is z down
dz = altf- alt0
dt = dz / np.dot(I[-1,:],vz)[-1] # maintain column array
# Integrate vz to get altitudes
alt = alt0 + np.dot(I*dt,vz)
# rescale operators
t = t * dt
# pack
t_initial = segment.state.conditions.frames.inertial.time[0,0]
segment.state.conditions.frames.inertial.time[:,0] = t_initial + t[:,0]
conditions.frames.inertial.position_vector[:,2] = -alt[:,0] # z points down
conditions.freestream.altitude[:,0] = alt[:,0] # positive altitude in this context
return | [
"def",
"update_differentials_altitude",
"(",
"segment",
")",
":",
"# unpack",
"t",
"=",
"segment",
".",
"state",
".",
"numerics",
".",
"dimensionless",
".",
"control_points",
"D",
"=",
"segment",
".",
"state",
".",
"numerics",
".",
"dimensionless",
".",
"diffe... | https://github.com/suavecode/SUAVE/blob/4f83c467c5662b6cc611ce2ab6c0bdd25fd5c0a5/trunk/SUAVE/Methods/Missions/Segments/Climb/Constant_Throttle_Constant_Speed.py#L91-L149 | |
thunlp/FewRel | 6734c0df46805e3c825c4f616bc8527d20b8e7ab | models/gnn.py | python | GNN.forward | (self, support, query, N, K, NQ) | return logits, pred | support: Inputs of the support set.
query: Inputs of the query set.
N: Num of classes
K: Num of instances for each class in the support set
Q: Num of instances for each class in the query set | support: Inputs of the support set.
query: Inputs of the query set.
N: Num of classes
K: Num of instances for each class in the support set
Q: Num of instances for each class in the query set | [
"support",
":",
"Inputs",
"of",
"the",
"support",
"set",
".",
"query",
":",
"Inputs",
"of",
"the",
"query",
"set",
".",
"N",
":",
"Num",
"of",
"classes",
"K",
":",
"Num",
"of",
"instances",
"for",
"each",
"class",
"in",
"the",
"support",
"set",
"Q",
... | def forward(self, support, query, N, K, NQ):
'''
support: Inputs of the support set.
query: Inputs of the query set.
N: Num of classes
K: Num of instances for each class in the support set
Q: Num of instances for each class in the query set
'''
support = self.sentence_encoder(support)
query = self.sentence_encoder(query)
support = support.view(-1, N, K, self.hidden_size)
query = query.view(-1, NQ, self.hidden_size)
B = support.size(0)
D = self.hidden_size
support = support.unsqueeze(1).expand(-1, NQ, -1, -1, -1).contiguous().view(-1, N * K, D) # (B * NQ, N * K, D)
query = query.view(-1, 1, D) # (B * NQ, 1, D)
labels = Variable(torch.zeros((B * NQ, 1 + N * K, N), dtype=torch.float)).cuda()
for b in range(B * NQ):
for i in range(N):
for k in range(K):
labels[b][1 + i * K + k][i] = 1
nodes = torch.cat([torch.cat([query, support], 1), labels], -1) # (B * NQ, 1 + N * K, D + N)
logits = self.gnn_obj(nodes) # (B * NQ, N)
_, pred = torch.max(logits, 1)
return logits, pred | [
"def",
"forward",
"(",
"self",
",",
"support",
",",
"query",
",",
"N",
",",
"K",
",",
"NQ",
")",
":",
"support",
"=",
"self",
".",
"sentence_encoder",
"(",
"support",
")",
"query",
"=",
"self",
".",
"sentence_encoder",
"(",
"query",
")",
"support",
"... | https://github.com/thunlp/FewRel/blob/6734c0df46805e3c825c4f616bc8527d20b8e7ab/models/gnn.py#L21-L48 | |
oilshell/oil | 94388e7d44a9ad879b12615f6203b38596b5a2d3 | Python-2.7.13/Lib/xml/etree/ElementTree.py | python | ProcessingInstruction | (target, text=None) | return element | [] | def ProcessingInstruction(target, text=None):
element = Element(ProcessingInstruction)
element.text = target
if text:
element.text = element.text + " " + text
return element | [
"def",
"ProcessingInstruction",
"(",
"target",
",",
"text",
"=",
"None",
")",
":",
"element",
"=",
"Element",
"(",
"ProcessingInstruction",
")",
"element",
".",
"text",
"=",
"target",
"if",
"text",
":",
"element",
".",
"text",
"=",
"element",
".",
"text",
... | https://github.com/oilshell/oil/blob/94388e7d44a9ad879b12615f6203b38596b5a2d3/Python-2.7.13/Lib/xml/etree/ElementTree.py#L561-L566 | |||
sassoftware/saspy | 47adeb5b9e298e6b9ec017f850245e318f2faa57 | saspy/sasdata.py | python | SASdata.impute | (self, vars: dict, replace: bool = False, prefix: str = 'imp_', out: 'SASdata' = None) | return ret | Imputes missing values for a SASdata object.
:param vars: a dictionary in the form of {'varname':'impute type'} or {'impute type':'[var1, var2]'}
:param replace:
:param prefix:
:param out:
:return: 'SASdata' | Imputes missing values for a SASdata object. | [
"Imputes",
"missing",
"values",
"for",
"a",
"SASdata",
"object",
"."
] | def impute(self, vars: dict, replace: bool = False, prefix: str = 'imp_', out: 'SASdata' = None) -> 'SASdata':
"""
Imputes missing values for a SASdata object.
:param vars: a dictionary in the form of {'varname':'impute type'} or {'impute type':'[var1, var2]'}
:param replace:
:param prefix:
:param out:
:return: 'SASdata'
"""
lastlog = len(self.sas._io._log)
outstr = ''
if out:
if isinstance(out, str):
fn = out.partition('.')
if fn[1] == '.':
out_libref = fn[0]
out_table = fn[2].strip()
else:
out_libref = ''
out_table = fn[0].strip()
else:
out_libref = out.libref
out_table = out.table
outstr = "out=%s.'%s'n" % (out_libref, out_table.replace("'", "''"))
else:
out_table = self.table
out_libref = self.libref
# get list of variables and types
varcode = 'data _null_; d = open("' + self.libref + ".'" + self.table.replace("'", "''") + "'n " + '");\n'
varcode += "nvars = attrn(d, 'NVARS');\n"
varcode += "put 'VARNUMS=' nvars 'VARNUMS_END=';\n"
varcode += "put 'VARLIST=';\n"
varcode += "do i = 1 to nvars; var = varname(d, i); put %upcase('var=') var %upcase('varEND='); end;\n"
varcode += "put 'TYPELIST=';\n"
varcode += "do i = 1 to nvars; var = vartype(d, i); put %upcase('type=') var %upcase('typeEND='); end;\n"
varcode += "put 'END_ALL_VARS_AND_TYPES=';\n"
varcode += "run;"
ll = self.sas._io.submit(varcode, "text")
l2 = ll['LOG'].rpartition("VARNUMS=")[2].partition("VARNUMS_END=")
nvars = int(float(l2[0].strip()))
varlist = []
log = ll['LOG'].rpartition('TYPELIST=')[0].rpartition('VARLIST=')
for vari in range(log[2].count('VAR=')):
log = log[2].partition('VAR=')[2].partition('VAREND=')
varlist.append(log[0].strip().upper())
typelist = []
log = ll['LOG'].rpartition('END_ALL_VARS_AND_TYPES=')[0].rpartition('TYPELIST=')
for typei in range(log[2].count('VAR=')):
log = log[2].partition('TYPE=')[2].partition('TYPEEND=')
typelist.append(log[0].strip().upper())
varListType = dict(zip(varlist, typelist))
# process vars dictionary to generate code
## setup default statements
sql = "proc sql;\n select\n"
sqlsel = ' %s(%s),\n'
sqlinto = ' into\n'
if len(out_libref)>0 :
ds1 = "data " + out_libref + ".'" + out_table.replace("'", "''") + "'n " + "; set " + self.libref + ".'" + self.table.replace("'", "''") +"'n " + self._dsopts() + ";\n"
else:
ds1 = "data '" + out_table.replace("'", "''") + "'n " + "; set " + self.libref + ".'" + self.table.replace("'", "''") +"'n " + self._dsopts() + ";\n"
dsmiss = 'if missing({0}) then {1} = {2};\n'
if replace:
dsmiss = prefix+'{1} = {0}; if missing({0}) then %s{1} = {2};\n' % prefix
modesql = ''
modeq = "proc sql outobs=1;\n select %s, count(*) as freq into :imp_mode_%s, :imp_mode_freq\n"
modeq += " from %s where %s is not null group by %s order by freq desc, %s;\nquit;\n"
# pop the values key because it needs special treatment
contantValues = vars.pop('value', None)
if contantValues is not None:
if not all(isinstance(x, tuple) for x in contantValues):
raise SyntaxError("The elements in the 'value' key must be tuples")
for t in contantValues:
if varListType.get(t[0].upper()) == "N":
ds1 += dsmiss.format((t[0], t[0], t[1]))
else:
ds1 += dsmiss.format(t[0], t[0], '"' + str(t[1]) + '"')
for key, values in vars.items():
if key.lower() in ['midrange', 'random']:
for v in values:
sql += sqlsel % ('max', v)
sql += sqlsel % ('min', v)
sqlinto += ' :imp_max_' + v + ',\n'
sqlinto += ' :imp_min_' + v + ',\n'
if key.lower() == 'midrange':
ds1 += dsmiss.format(v, v, '(&imp_min_' + v + '.' + ' + ' + '&imp_max_' + v + '.' + ') / 2')
elif key.lower() == 'random':
# random * (max - min) + min
ds1 += dsmiss.format(v, v, '(&imp_max_' + v + '.' + ' - ' + '&imp_min_' + v + '.' + ') * ranuni(0)' + '+ &imp_min_' + v + '.')
else:
raise SyntaxError("This should not happen!!!!")
else:
for v in values:
sql += sqlsel % (key, v)
sqlinto += ' :imp_' + v + ',\n'
if key.lower == 'mode':
modesql += modeq % (v, v, self.libref + ".'" + self.table.replace("'", "''") + "'n " + self._dsopts() , v, v, v)
if varListType.get(v.upper()) == "N":
ds1 += dsmiss.format(v, v, '&imp_' + v + '.')
else:
ds1 += dsmiss.format(v, v, '"&imp_' + v + '."')
if len(sql) > 20:
sql = sql.rstrip(', \n') + '\n' + sqlinto.rstrip(', \n') + '\n from ' + self.libref + ".'" + self.table.replace("'", "''") + "'n " + self._dsopts() + ';\nquit;\n'
else:
sql = ''
ds1 += 'run;\n'
if self.sas.nosub:
print(modesql + sql + ds1)
return None
ll = self.sas._io.submit(modesql + sql + ds1)
ret = self.sas.sasdata(out_table, libref=out_libref, results=self.results, dsopts=self._dsopts())
self.sas._lastlog = self.sas._io._log[lastlog:]
return ret | [
"def",
"impute",
"(",
"self",
",",
"vars",
":",
"dict",
",",
"replace",
":",
"bool",
"=",
"False",
",",
"prefix",
":",
"str",
"=",
"'imp_'",
",",
"out",
":",
"'SASdata'",
"=",
"None",
")",
"->",
"'SASdata'",
":",
"lastlog",
"=",
"len",
"(",
"self",... | https://github.com/sassoftware/saspy/blob/47adeb5b9e298e6b9ec017f850245e318f2faa57/saspy/sasdata.py#L723-L849 | |
boostorg/build | aaa95bba19a7acb07badb1929737c67583b14ba0 | src/build/targets.py | python | AbstractTarget.project | (self) | return self.project_ | Returns the project for this target. | Returns the project for this target. | [
"Returns",
"the",
"project",
"for",
"this",
"target",
"."
] | def project (self):
""" Returns the project for this target.
"""
return self.project_ | [
"def",
"project",
"(",
"self",
")",
":",
"return",
"self",
".",
"project_"
] | https://github.com/boostorg/build/blob/aaa95bba19a7acb07badb1929737c67583b14ba0/src/build/targets.py#L324-L327 | |
SeuTao/kaggle-competition-solutions | 784a2ec2812b2079a49b913bf8ffaa9d58657858 | SIIM19_Pneumothorax_Segmentation_2nd_solution/semantic_segmentation/network/__init__.py | python | get_model | (network, num_classes, criterion) | return net | Fetch Network Function Pointer | Fetch Network Function Pointer | [
"Fetch",
"Network",
"Function",
"Pointer"
] | def get_model(network, num_classes, criterion):
"""
Fetch Network Function Pointer
"""
module = network[:network.rfind('.')]
model = network[network.rfind('.') + 1:]
mod = importlib.import_module(module)
net_func = getattr(mod, model)
net = net_func(num_classes=num_classes, criterion=criterion)
return net | [
"def",
"get_model",
"(",
"network",
",",
"num_classes",
",",
"criterion",
")",
":",
"module",
"=",
"network",
"[",
":",
"network",
".",
"rfind",
"(",
"'.'",
")",
"]",
"model",
"=",
"network",
"[",
"network",
".",
"rfind",
"(",
"'.'",
")",
"+",
"1",
... | https://github.com/SeuTao/kaggle-competition-solutions/blob/784a2ec2812b2079a49b913bf8ffaa9d58657858/SIIM19_Pneumothorax_Segmentation_2nd_solution/semantic_segmentation/network/__init__.py#L36-L45 | |
gkrizek/bash-lambda-layer | 703b0ade8174022d44779d823172ab7ac33a5505 | bin/docutils/utils/math/math2html.py | python | CommandLineParser.readquoted | (self, args, initial) | return value | Read a value between quotes | Read a value between quotes | [
"Read",
"a",
"value",
"between",
"quotes"
] | def readquoted(self, args, initial):
"Read a value between quotes"
Trace.error('Oops')
value = initial[1:]
while len(args) > 0 and not args[0].endswith('"') and not args[0].startswith('--'):
Trace.error('Appending ' + args[0])
value += ' ' + args[0]
del args[0]
if len(args) == 0 or args[0].startswith('--'):
return None
value += ' ' + args[0:-1]
return value | [
"def",
"readquoted",
"(",
"self",
",",
"args",
",",
"initial",
")",
":",
"Trace",
".",
"error",
"(",
"'Oops'",
")",
"value",
"=",
"initial",
"[",
"1",
":",
"]",
"while",
"len",
"(",
"args",
")",
">",
"0",
"and",
"not",
"args",
"[",
"0",
"]",
".... | https://github.com/gkrizek/bash-lambda-layer/blob/703b0ade8174022d44779d823172ab7ac33a5505/bin/docutils/utils/math/math2html.py#L1039-L1050 | |
bids-standard/pybids | 9449fdc319c4bdff4ed9aa1b299964352f394d56 | bids/modeling/statsmodels.py | python | BIDSStatsModelsGraph.__getitem__ | (self, key) | return self.get_node(key) | Alias for get_node(key). | Alias for get_node(key). | [
"Alias",
"for",
"get_node",
"(",
"key",
")",
"."
] | def __getitem__(self, key):
'''Alias for get_node(key).'''
return self.get_node(key) | [
"def",
"__getitem__",
"(",
"self",
",",
"key",
")",
":",
"return",
"self",
".",
"get_node",
"(",
"key",
")"
] | https://github.com/bids-standard/pybids/blob/9449fdc319c4bdff4ed9aa1b299964352f394d56/bids/modeling/statsmodels.py#L102-L104 | |
KhronosGroup/OpenXR-SDK-Source | 76756e2e7849b15466d29bee7d80cada92865550 | specification/scripts/validitygenerator.py | python | ValidityOutputGenerator.isBaseHeaderType | (self, typename) | return type_member.get('values') is None | Returns true if the type is a struct that is a "base header" type. | Returns true if the type is a struct that is a "base header" type. | [
"Returns",
"true",
"if",
"the",
"type",
"is",
"a",
"struct",
"that",
"is",
"a",
"base",
"header",
"type",
"."
] | def isBaseHeaderType(self, typename):
"""Returns true if the type is a struct that is a "base header" type."""
info = self.registry.typedict.get(typename)
if not info:
return False
members = info.getMembers()
type_member = findNamedElem(members, self.structtype_member_name)
if type_member is None:
return False
# If we have a type member without specified values, it's a base header.
return type_member.get('values') is None | [
"def",
"isBaseHeaderType",
"(",
"self",
",",
"typename",
")",
":",
"info",
"=",
"self",
".",
"registry",
".",
"typedict",
".",
"get",
"(",
"typename",
")",
"if",
"not",
"info",
":",
"return",
"False",
"members",
"=",
"info",
".",
"getMembers",
"(",
")"... | https://github.com/KhronosGroup/OpenXR-SDK-Source/blob/76756e2e7849b15466d29bee7d80cada92865550/specification/scripts/validitygenerator.py#L747-L758 | |
dimagi/commcare-hq | d67ff1d3b4c51fa050c19e60c3253a79d3452a39 | corehq/util/metrics/datadog.py | python | datadog_initialized | () | return api._api_key and api._application_key | [] | def datadog_initialized():
return api._api_key and api._application_key | [
"def",
"datadog_initialized",
"(",
")",
":",
"return",
"api",
".",
"_api_key",
"and",
"api",
".",
"_application_key"
] | https://github.com/dimagi/commcare-hq/blob/d67ff1d3b4c51fa050c19e60c3253a79d3452a39/corehq/util/metrics/datadog.py#L117-L118 | |||
tomplus/kubernetes_asyncio | f028cc793e3a2c519be6a52a49fb77ff0b014c9b | kubernetes_asyncio/client/api/autoscaling_v2beta2_api.py | python | AutoscalingV2beta2Api.read_namespaced_horizontal_pod_autoscaler_status_with_http_info | (self, name, namespace, **kwargs) | return self.api_client.call_api(
'/apis/autoscaling/v2beta2/namespaces/{namespace}/horizontalpodautoscalers/{name}/status', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V2beta2HorizontalPodAutoscaler', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats) | read_namespaced_horizontal_pod_autoscaler_status # noqa: E501
read status of the specified HorizontalPodAutoscaler # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_horizontal_pod_autoscaler_status_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the HorizontalPodAutoscaler (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V2beta2HorizontalPodAutoscaler, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread. | read_namespaced_horizontal_pod_autoscaler_status # noqa: E501 | [
"read_namespaced_horizontal_pod_autoscaler_status",
"#",
"noqa",
":",
"E501"
] | def read_namespaced_horizontal_pod_autoscaler_status_with_http_info(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_horizontal_pod_autoscaler_status # noqa: E501
read status of the specified HorizontalPodAutoscaler # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_horizontal_pod_autoscaler_status_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the HorizontalPodAutoscaler (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V2beta2HorizontalPodAutoscaler, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'pretty'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_horizontal_pod_autoscaler_status" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_horizontal_pod_autoscaler_status`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_horizontal_pod_autoscaler_status`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/autoscaling/v2beta2/namespaces/{namespace}/horizontalpodautoscalers/{name}/status', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V2beta2HorizontalPodAutoscaler', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats) | [
"def",
"read_namespaced_horizontal_pod_autoscaler_status_with_http_info",
"(",
"self",
",",
"name",
",",
"namespace",
",",
"*",
"*",
"kwargs",
")",
":",
"# noqa: E501",
"local_var_params",
"=",
"locals",
"(",
")",
"all_params",
"=",
"[",
"'name'",
",",
"'namespace'"... | https://github.com/tomplus/kubernetes_asyncio/blob/f028cc793e3a2c519be6a52a49fb77ff0b014c9b/kubernetes_asyncio/client/api/autoscaling_v2beta2_api.py#L1410-L1509 | |
Mingtzge/2019-CCF-BDCI-OCR-MCZJ-OCR-IdentificationIDElement | 42ad9686f3c3bde0d29a8bc6bcb0e3afb35fb3c3 | recognize_process/crnn_model/crnn_model.py | python | ShadowNet._conv_stage | (self, inputdata, out_dims, name) | return max_pool | [] | def _conv_stage(self, inputdata, out_dims, name):
with tf.variable_scope(name_or_scope=name):
conv = self.conv2d(inputdata=inputdata, out_channel=out_dims, \
kernel_size=3, stride=1, use_bias=True, name='conv')
bn = self.layerbn(inputdata=conv, is_training=self._is_training, name='bn')
relu = self.relu( inputdata=bn, name='relu')
max_pool = self.maxpooling(inputdata=relu, kernel_size=2, stride=2, name='max_pool')
return max_pool | [
"def",
"_conv_stage",
"(",
"self",
",",
"inputdata",
",",
"out_dims",
",",
"name",
")",
":",
"with",
"tf",
".",
"variable_scope",
"(",
"name_or_scope",
"=",
"name",
")",
":",
"conv",
"=",
"self",
".",
"conv2d",
"(",
"inputdata",
"=",
"inputdata",
",",
... | https://github.com/Mingtzge/2019-CCF-BDCI-OCR-MCZJ-OCR-IdentificationIDElement/blob/42ad9686f3c3bde0d29a8bc6bcb0e3afb35fb3c3/recognize_process/crnn_model/crnn_model.py#L38-L45 | |||
arsaboo/homeassistant-config | 53c998986fbe84d793a0b174757154ab30e676e4 | custom_components/aarlo/media_player.py | python | ArloMediaPlayer.icon | (self) | return "mdi:speaker" | Icon to use in the frontend, if any. | Icon to use in the frontend, if any. | [
"Icon",
"to",
"use",
"in",
"the",
"frontend",
"if",
"any",
"."
] | def icon(self):
"""Icon to use in the frontend, if any."""
return "mdi:speaker" | [
"def",
"icon",
"(",
"self",
")",
":",
"return",
"\"mdi:speaker\""
] | https://github.com/arsaboo/homeassistant-config/blob/53c998986fbe84d793a0b174757154ab30e676e4/custom_components/aarlo/media_player.py#L178-L180 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.