repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1
value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1
value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
aws/aws-xray-sdk-python | aws_xray_sdk/ext/util.py | calculate_segment_name | def calculate_segment_name(host_name, recorder):
"""
Returns the segment name based on recorder configuration and
input host name. This is a helper generally used in web framework
middleware where a host name is available from incoming request's headers.
"""
if recorder.dynamic_naming:
return recorder.dynamic_naming.get_name(host_name)
else:
return recorder.service | python | def calculate_segment_name(host_name, recorder):
"""
Returns the segment name based on recorder configuration and
input host name. This is a helper generally used in web framework
middleware where a host name is available from incoming request's headers.
"""
if recorder.dynamic_naming:
return recorder.dynamic_naming.get_name(host_name)
else:
return recorder.service | [
"def",
"calculate_segment_name",
"(",
"host_name",
",",
"recorder",
")",
":",
"if",
"recorder",
".",
"dynamic_naming",
":",
"return",
"recorder",
".",
"dynamic_naming",
".",
"get_name",
"(",
"host_name",
")",
"else",
":",
"return",
"recorder",
".",
"service"
] | Returns the segment name based on recorder configuration and
input host name. This is a helper generally used in web framework
middleware where a host name is available from incoming request's headers. | [
"Returns",
"the",
"segment",
"name",
"based",
"on",
"recorder",
"configuration",
"and",
"input",
"host",
"name",
".",
"This",
"is",
"a",
"helper",
"generally",
"used",
"in",
"web",
"framework",
"middleware",
"where",
"a",
"host",
"name",
"is",
"available",
"... | 707358cd3a516d51f2ebf71cf34f00e8d906a667 | https://github.com/aws/aws-xray-sdk-python/blob/707358cd3a516d51f2ebf71cf34f00e8d906a667/aws_xray_sdk/ext/util.py#L76-L85 | train | 213,700 |
aws/aws-xray-sdk-python | aws_xray_sdk/ext/util.py | prepare_response_header | def prepare_response_header(origin_header, segment):
"""
Prepare a trace header to be inserted into response
based on original header and the request segment.
"""
if origin_header and origin_header.sampled == '?':
new_header = TraceHeader(root=segment.trace_id,
sampled=segment.sampled)
else:
new_header = TraceHeader(root=segment.trace_id)
return new_header.to_header_str() | python | def prepare_response_header(origin_header, segment):
"""
Prepare a trace header to be inserted into response
based on original header and the request segment.
"""
if origin_header and origin_header.sampled == '?':
new_header = TraceHeader(root=segment.trace_id,
sampled=segment.sampled)
else:
new_header = TraceHeader(root=segment.trace_id)
return new_header.to_header_str() | [
"def",
"prepare_response_header",
"(",
"origin_header",
",",
"segment",
")",
":",
"if",
"origin_header",
"and",
"origin_header",
".",
"sampled",
"==",
"'?'",
":",
"new_header",
"=",
"TraceHeader",
"(",
"root",
"=",
"segment",
".",
"trace_id",
",",
"sampled",
"... | Prepare a trace header to be inserted into response
based on original header and the request segment. | [
"Prepare",
"a",
"trace",
"header",
"to",
"be",
"inserted",
"into",
"response",
"based",
"on",
"original",
"header",
"and",
"the",
"request",
"segment",
"."
] | 707358cd3a516d51f2ebf71cf34f00e8d906a667 | https://github.com/aws/aws-xray-sdk-python/blob/707358cd3a516d51f2ebf71cf34f00e8d906a667/aws_xray_sdk/ext/util.py#L88-L99 | train | 213,701 |
aws/aws-xray-sdk-python | aws_xray_sdk/ext/util.py | to_snake_case | def to_snake_case(name):
"""
Convert the input string to snake-cased string.
"""
s1 = first_cap_re.sub(r'\1_\2', name)
# handle acronym words
return all_cap_re.sub(r'\1_\2', s1).lower() | python | def to_snake_case(name):
"""
Convert the input string to snake-cased string.
"""
s1 = first_cap_re.sub(r'\1_\2', name)
# handle acronym words
return all_cap_re.sub(r'\1_\2', s1).lower() | [
"def",
"to_snake_case",
"(",
"name",
")",
":",
"s1",
"=",
"first_cap_re",
".",
"sub",
"(",
"r'\\1_\\2'",
",",
"name",
")",
"# handle acronym words",
"return",
"all_cap_re",
".",
"sub",
"(",
"r'\\1_\\2'",
",",
"s1",
")",
".",
"lower",
"(",
")"
] | Convert the input string to snake-cased string. | [
"Convert",
"the",
"input",
"string",
"to",
"snake",
"-",
"cased",
"string",
"."
] | 707358cd3a516d51f2ebf71cf34f00e8d906a667 | https://github.com/aws/aws-xray-sdk-python/blob/707358cd3a516d51f2ebf71cf34f00e8d906a667/aws_xray_sdk/ext/util.py#L102-L108 | train | 213,702 |
aws/aws-xray-sdk-python | aws_xray_sdk/ext/botocore/patch.py | patch | def patch():
"""
Patch botocore client so it generates subsegments
when calling AWS services.
"""
if hasattr(botocore.client, '_xray_enabled'):
return
setattr(botocore.client, '_xray_enabled', True)
wrapt.wrap_function_wrapper(
'botocore.client',
'BaseClient._make_api_call',
_xray_traced_botocore,
)
wrapt.wrap_function_wrapper(
'botocore.endpoint',
'Endpoint.prepare_request',
inject_header,
) | python | def patch():
"""
Patch botocore client so it generates subsegments
when calling AWS services.
"""
if hasattr(botocore.client, '_xray_enabled'):
return
setattr(botocore.client, '_xray_enabled', True)
wrapt.wrap_function_wrapper(
'botocore.client',
'BaseClient._make_api_call',
_xray_traced_botocore,
)
wrapt.wrap_function_wrapper(
'botocore.endpoint',
'Endpoint.prepare_request',
inject_header,
) | [
"def",
"patch",
"(",
")",
":",
"if",
"hasattr",
"(",
"botocore",
".",
"client",
",",
"'_xray_enabled'",
")",
":",
"return",
"setattr",
"(",
"botocore",
".",
"client",
",",
"'_xray_enabled'",
",",
"True",
")",
"wrapt",
".",
"wrap_function_wrapper",
"(",
"'b... | Patch botocore client so it generates subsegments
when calling AWS services. | [
"Patch",
"botocore",
"client",
"so",
"it",
"generates",
"subsegments",
"when",
"calling",
"AWS",
"services",
"."
] | 707358cd3a516d51f2ebf71cf34f00e8d906a667 | https://github.com/aws/aws-xray-sdk-python/blob/707358cd3a516d51f2ebf71cf34f00e8d906a667/aws_xray_sdk/ext/botocore/patch.py#L8-L27 | train | 213,703 |
aws/aws-xray-sdk-python | aws_xray_sdk/core/recorder.py | AWSXRayRecorder.configure | def configure(self, sampling=None, plugins=None,
context_missing=None, sampling_rules=None,
daemon_address=None, service=None,
context=None, emitter=None, streaming=None,
dynamic_naming=None, streaming_threshold=None,
max_trace_back=None, sampler=None,
stream_sql=True):
"""Configure global X-Ray recorder.
Configure needs to run before patching thrid party libraries
to avoid creating dangling subsegment.
:param bool sampling: If sampling is enabled, every time the recorder
creates a segment it decides whether to send this segment to
the X-Ray daemon. This setting is not used if the recorder
is running in AWS Lambda. The recorder always respect the incoming
sampling decisions regardless of this setting.
:param sampling_rules: Pass a set of local custom sampling rules.
Can be an absolute path of the sampling rule config json file
or a dictionary that defines those rules. This will also be the
fallback rules in case of centralized sampling opted-in while
the cetralized sampling rules are not available.
:param sampler: The sampler used to make sampling decisions. The SDK
provides two built-in samplers. One is centralized rules based and
the other is local rules based. The former is the default.
:param tuple plugins: plugins that add extra metadata to each segment.
Currently available plugins are EC2Plugin, ECS plugin and
ElasticBeanstalkPlugin.
If you want to disable all previously enabled plugins,
pass an empty tuple ``()``.
:param str context_missing: recorder behavior when it tries to mutate
a segment or add a subsegment but there is no active segment.
RUNTIME_ERROR means the recorder will raise an exception.
LOG_ERROR means the recorder will only log the error and
do nothing.
:param str daemon_address: The X-Ray daemon address where the recorder
sends data to.
:param str service: default segment name if creating a segment without
providing a name.
:param context: You can pass your own implementation of context storage
for active segment/subsegment by overriding the default
``Context`` class.
:param emitter: The emitter that sends a segment/subsegment to
the X-Ray daemon. You can override ``UDPEmitter`` class.
:param dynamic_naming: a string that defines a pattern that host names
should match. Alternatively you can pass a module which
overrides ``DefaultDynamicNaming`` module.
:param streaming: The streaming module to stream out trace documents
when they grow too large. You can override ``DefaultStreaming``
class to have your own implementation of the streaming process.
:param streaming_threshold: If breaks within a single segment it will
start streaming out children subsegments. By default it is the
maximum number of subsegments within a segment.
:param int max_trace_back: The maxinum number of stack traces recorded
by auto-capture. Lower this if a single document becomes too large.
:param bool stream_sql: Whether SQL query texts should be streamed.
Environment variables AWS_XRAY_DAEMON_ADDRESS, AWS_XRAY_CONTEXT_MISSING
and AWS_XRAY_TRACING_NAME respectively overrides arguments
daemon_address, context_missing and service.
"""
if sampling is not None:
self.sampling = sampling
if sampler:
self.sampler = sampler
if service:
self.service = os.getenv(TRACING_NAME_KEY, service)
if sampling_rules:
self._load_sampling_rules(sampling_rules)
if emitter:
self.emitter = emitter
if daemon_address:
self.emitter.set_daemon_address(os.getenv(DAEMON_ADDR_KEY, daemon_address))
if context:
self.context = context
if context_missing:
self.context.context_missing = os.getenv(CONTEXT_MISSING_KEY, context_missing)
if dynamic_naming:
self.dynamic_naming = dynamic_naming
if streaming:
self.streaming = streaming
if streaming_threshold:
self.streaming_threshold = streaming_threshold
if type(max_trace_back) == int and max_trace_back >= 0:
self.max_trace_back = max_trace_back
if stream_sql is not None:
self.stream_sql = stream_sql
if plugins:
plugin_modules = get_plugin_modules(plugins)
for plugin in plugin_modules:
plugin.initialize()
if plugin.runtime_context:
self._aws_metadata[plugin.SERVICE_NAME] = plugin.runtime_context
self._origin = plugin.ORIGIN
# handling explicitly using empty list to clean up plugins.
elif plugins is not None:
self._aws_metadata = copy.deepcopy(XRAY_META)
self._origin = None
if type(self.sampler).__name__ == 'DefaultSampler':
self.sampler.load_settings(DaemonConfig(daemon_address),
self.context, self._origin) | python | def configure(self, sampling=None, plugins=None,
context_missing=None, sampling_rules=None,
daemon_address=None, service=None,
context=None, emitter=None, streaming=None,
dynamic_naming=None, streaming_threshold=None,
max_trace_back=None, sampler=None,
stream_sql=True):
"""Configure global X-Ray recorder.
Configure needs to run before patching thrid party libraries
to avoid creating dangling subsegment.
:param bool sampling: If sampling is enabled, every time the recorder
creates a segment it decides whether to send this segment to
the X-Ray daemon. This setting is not used if the recorder
is running in AWS Lambda. The recorder always respect the incoming
sampling decisions regardless of this setting.
:param sampling_rules: Pass a set of local custom sampling rules.
Can be an absolute path of the sampling rule config json file
or a dictionary that defines those rules. This will also be the
fallback rules in case of centralized sampling opted-in while
the cetralized sampling rules are not available.
:param sampler: The sampler used to make sampling decisions. The SDK
provides two built-in samplers. One is centralized rules based and
the other is local rules based. The former is the default.
:param tuple plugins: plugins that add extra metadata to each segment.
Currently available plugins are EC2Plugin, ECS plugin and
ElasticBeanstalkPlugin.
If you want to disable all previously enabled plugins,
pass an empty tuple ``()``.
:param str context_missing: recorder behavior when it tries to mutate
a segment or add a subsegment but there is no active segment.
RUNTIME_ERROR means the recorder will raise an exception.
LOG_ERROR means the recorder will only log the error and
do nothing.
:param str daemon_address: The X-Ray daemon address where the recorder
sends data to.
:param str service: default segment name if creating a segment without
providing a name.
:param context: You can pass your own implementation of context storage
for active segment/subsegment by overriding the default
``Context`` class.
:param emitter: The emitter that sends a segment/subsegment to
the X-Ray daemon. You can override ``UDPEmitter`` class.
:param dynamic_naming: a string that defines a pattern that host names
should match. Alternatively you can pass a module which
overrides ``DefaultDynamicNaming`` module.
:param streaming: The streaming module to stream out trace documents
when they grow too large. You can override ``DefaultStreaming``
class to have your own implementation of the streaming process.
:param streaming_threshold: If breaks within a single segment it will
start streaming out children subsegments. By default it is the
maximum number of subsegments within a segment.
:param int max_trace_back: The maxinum number of stack traces recorded
by auto-capture. Lower this if a single document becomes too large.
:param bool stream_sql: Whether SQL query texts should be streamed.
Environment variables AWS_XRAY_DAEMON_ADDRESS, AWS_XRAY_CONTEXT_MISSING
and AWS_XRAY_TRACING_NAME respectively overrides arguments
daemon_address, context_missing and service.
"""
if sampling is not None:
self.sampling = sampling
if sampler:
self.sampler = sampler
if service:
self.service = os.getenv(TRACING_NAME_KEY, service)
if sampling_rules:
self._load_sampling_rules(sampling_rules)
if emitter:
self.emitter = emitter
if daemon_address:
self.emitter.set_daemon_address(os.getenv(DAEMON_ADDR_KEY, daemon_address))
if context:
self.context = context
if context_missing:
self.context.context_missing = os.getenv(CONTEXT_MISSING_KEY, context_missing)
if dynamic_naming:
self.dynamic_naming = dynamic_naming
if streaming:
self.streaming = streaming
if streaming_threshold:
self.streaming_threshold = streaming_threshold
if type(max_trace_back) == int and max_trace_back >= 0:
self.max_trace_back = max_trace_back
if stream_sql is not None:
self.stream_sql = stream_sql
if plugins:
plugin_modules = get_plugin_modules(plugins)
for plugin in plugin_modules:
plugin.initialize()
if plugin.runtime_context:
self._aws_metadata[plugin.SERVICE_NAME] = plugin.runtime_context
self._origin = plugin.ORIGIN
# handling explicitly using empty list to clean up plugins.
elif plugins is not None:
self._aws_metadata = copy.deepcopy(XRAY_META)
self._origin = None
if type(self.sampler).__name__ == 'DefaultSampler':
self.sampler.load_settings(DaemonConfig(daemon_address),
self.context, self._origin) | [
"def",
"configure",
"(",
"self",
",",
"sampling",
"=",
"None",
",",
"plugins",
"=",
"None",
",",
"context_missing",
"=",
"None",
",",
"sampling_rules",
"=",
"None",
",",
"daemon_address",
"=",
"None",
",",
"service",
"=",
"None",
",",
"context",
"=",
"No... | Configure global X-Ray recorder.
Configure needs to run before patching thrid party libraries
to avoid creating dangling subsegment.
:param bool sampling: If sampling is enabled, every time the recorder
creates a segment it decides whether to send this segment to
the X-Ray daemon. This setting is not used if the recorder
is running in AWS Lambda. The recorder always respect the incoming
sampling decisions regardless of this setting.
:param sampling_rules: Pass a set of local custom sampling rules.
Can be an absolute path of the sampling rule config json file
or a dictionary that defines those rules. This will also be the
fallback rules in case of centralized sampling opted-in while
the cetralized sampling rules are not available.
:param sampler: The sampler used to make sampling decisions. The SDK
provides two built-in samplers. One is centralized rules based and
the other is local rules based. The former is the default.
:param tuple plugins: plugins that add extra metadata to each segment.
Currently available plugins are EC2Plugin, ECS plugin and
ElasticBeanstalkPlugin.
If you want to disable all previously enabled plugins,
pass an empty tuple ``()``.
:param str context_missing: recorder behavior when it tries to mutate
a segment or add a subsegment but there is no active segment.
RUNTIME_ERROR means the recorder will raise an exception.
LOG_ERROR means the recorder will only log the error and
do nothing.
:param str daemon_address: The X-Ray daemon address where the recorder
sends data to.
:param str service: default segment name if creating a segment without
providing a name.
:param context: You can pass your own implementation of context storage
for active segment/subsegment by overriding the default
``Context`` class.
:param emitter: The emitter that sends a segment/subsegment to
the X-Ray daemon. You can override ``UDPEmitter`` class.
:param dynamic_naming: a string that defines a pattern that host names
should match. Alternatively you can pass a module which
overrides ``DefaultDynamicNaming`` module.
:param streaming: The streaming module to stream out trace documents
when they grow too large. You can override ``DefaultStreaming``
class to have your own implementation of the streaming process.
:param streaming_threshold: If breaks within a single segment it will
start streaming out children subsegments. By default it is the
maximum number of subsegments within a segment.
:param int max_trace_back: The maxinum number of stack traces recorded
by auto-capture. Lower this if a single document becomes too large.
:param bool stream_sql: Whether SQL query texts should be streamed.
Environment variables AWS_XRAY_DAEMON_ADDRESS, AWS_XRAY_CONTEXT_MISSING
and AWS_XRAY_TRACING_NAME respectively overrides arguments
daemon_address, context_missing and service. | [
"Configure",
"global",
"X",
"-",
"Ray",
"recorder",
"."
] | 707358cd3a516d51f2ebf71cf34f00e8d906a667 | https://github.com/aws/aws-xray-sdk-python/blob/707358cd3a516d51f2ebf71cf34f00e8d906a667/aws_xray_sdk/core/recorder.py#L81-L183 | train | 213,704 |
aws/aws-xray-sdk-python | aws_xray_sdk/core/recorder.py | AWSXRayRecorder.begin_segment | def begin_segment(self, name=None, traceid=None,
parent_id=None, sampling=None):
"""
Begin a segment on the current thread and return it. The recorder
only keeps one segment at a time. Create the second one without
closing existing one will overwrite it.
:param str name: the name of the segment
:param str traceid: trace id of the segment
:param int sampling: 0 means not sampled, 1 means sampled
"""
seg_name = name or self.service
if not seg_name:
raise SegmentNameMissingException("Segment name is required.")
# Sampling decision is None if not sampled.
# In a sampled case it could be either a string or 1
# depending on if centralized or local sampling rule takes effect.
decision = True
# To disable the recorder, we set the sampling decision to always be false.
# This way, when segments are generated, they become dummy segments and are ultimately never sent.
# The call to self._sampler.should_trace() is never called either so the poller threads are never started.
if not global_sdk_config.sdk_enabled():
sampling = 0
# we respect the input sampling decision
# regardless of recorder configuration.
if sampling == 0:
decision = False
elif sampling:
decision = sampling
elif self.sampling:
decision = self._sampler.should_trace()
if not decision:
segment = DummySegment(seg_name)
else:
segment = Segment(name=seg_name, traceid=traceid,
parent_id=parent_id)
self._populate_runtime_context(segment, decision)
self.context.put_segment(segment)
return segment | python | def begin_segment(self, name=None, traceid=None,
parent_id=None, sampling=None):
"""
Begin a segment on the current thread and return it. The recorder
only keeps one segment at a time. Create the second one without
closing existing one will overwrite it.
:param str name: the name of the segment
:param str traceid: trace id of the segment
:param int sampling: 0 means not sampled, 1 means sampled
"""
seg_name = name or self.service
if not seg_name:
raise SegmentNameMissingException("Segment name is required.")
# Sampling decision is None if not sampled.
# In a sampled case it could be either a string or 1
# depending on if centralized or local sampling rule takes effect.
decision = True
# To disable the recorder, we set the sampling decision to always be false.
# This way, when segments are generated, they become dummy segments and are ultimately never sent.
# The call to self._sampler.should_trace() is never called either so the poller threads are never started.
if not global_sdk_config.sdk_enabled():
sampling = 0
# we respect the input sampling decision
# regardless of recorder configuration.
if sampling == 0:
decision = False
elif sampling:
decision = sampling
elif self.sampling:
decision = self._sampler.should_trace()
if not decision:
segment = DummySegment(seg_name)
else:
segment = Segment(name=seg_name, traceid=traceid,
parent_id=parent_id)
self._populate_runtime_context(segment, decision)
self.context.put_segment(segment)
return segment | [
"def",
"begin_segment",
"(",
"self",
",",
"name",
"=",
"None",
",",
"traceid",
"=",
"None",
",",
"parent_id",
"=",
"None",
",",
"sampling",
"=",
"None",
")",
":",
"seg_name",
"=",
"name",
"or",
"self",
".",
"service",
"if",
"not",
"seg_name",
":",
"r... | Begin a segment on the current thread and return it. The recorder
only keeps one segment at a time. Create the second one without
closing existing one will overwrite it.
:param str name: the name of the segment
:param str traceid: trace id of the segment
:param int sampling: 0 means not sampled, 1 means sampled | [
"Begin",
"a",
"segment",
"on",
"the",
"current",
"thread",
"and",
"return",
"it",
".",
"The",
"recorder",
"only",
"keeps",
"one",
"segment",
"at",
"a",
"time",
".",
"Create",
"the",
"second",
"one",
"without",
"closing",
"existing",
"one",
"will",
"overwri... | 707358cd3a516d51f2ebf71cf34f00e8d906a667 | https://github.com/aws/aws-xray-sdk-python/blob/707358cd3a516d51f2ebf71cf34f00e8d906a667/aws_xray_sdk/core/recorder.py#L203-L246 | train | 213,705 |
aws/aws-xray-sdk-python | aws_xray_sdk/core/recorder.py | AWSXRayRecorder.end_segment | def end_segment(self, end_time=None):
"""
End the current segment and send it to X-Ray daemon
if it is ready to send. Ready means segment and
all its subsegments are closed.
:param float end_time: segment compeletion in unix epoch in seconds.
"""
self.context.end_segment(end_time)
segment = self.current_segment()
if segment and segment.ready_to_send():
self._send_segment() | python | def end_segment(self, end_time=None):
"""
End the current segment and send it to X-Ray daemon
if it is ready to send. Ready means segment and
all its subsegments are closed.
:param float end_time: segment compeletion in unix epoch in seconds.
"""
self.context.end_segment(end_time)
segment = self.current_segment()
if segment and segment.ready_to_send():
self._send_segment() | [
"def",
"end_segment",
"(",
"self",
",",
"end_time",
"=",
"None",
")",
":",
"self",
".",
"context",
".",
"end_segment",
"(",
"end_time",
")",
"segment",
"=",
"self",
".",
"current_segment",
"(",
")",
"if",
"segment",
"and",
"segment",
".",
"ready_to_send",
... | End the current segment and send it to X-Ray daemon
if it is ready to send. Ready means segment and
all its subsegments are closed.
:param float end_time: segment compeletion in unix epoch in seconds. | [
"End",
"the",
"current",
"segment",
"and",
"send",
"it",
"to",
"X",
"-",
"Ray",
"daemon",
"if",
"it",
"is",
"ready",
"to",
"send",
".",
"Ready",
"means",
"segment",
"and",
"all",
"its",
"subsegments",
"are",
"closed",
"."
] | 707358cd3a516d51f2ebf71cf34f00e8d906a667 | https://github.com/aws/aws-xray-sdk-python/blob/707358cd3a516d51f2ebf71cf34f00e8d906a667/aws_xray_sdk/core/recorder.py#L248-L259 | train | 213,706 |
aws/aws-xray-sdk-python | aws_xray_sdk/core/recorder.py | AWSXRayRecorder.current_segment | def current_segment(self):
"""
Return the currently active segment. In a multithreading environment,
this will make sure the segment returned is the one created by the
same thread.
"""
entity = self.get_trace_entity()
if self._is_subsegment(entity):
return entity.parent_segment
else:
return entity | python | def current_segment(self):
"""
Return the currently active segment. In a multithreading environment,
this will make sure the segment returned is the one created by the
same thread.
"""
entity = self.get_trace_entity()
if self._is_subsegment(entity):
return entity.parent_segment
else:
return entity | [
"def",
"current_segment",
"(",
"self",
")",
":",
"entity",
"=",
"self",
".",
"get_trace_entity",
"(",
")",
"if",
"self",
".",
"_is_subsegment",
"(",
"entity",
")",
":",
"return",
"entity",
".",
"parent_segment",
"else",
":",
"return",
"entity"
] | Return the currently active segment. In a multithreading environment,
this will make sure the segment returned is the one created by the
same thread. | [
"Return",
"the",
"currently",
"active",
"segment",
".",
"In",
"a",
"multithreading",
"environment",
"this",
"will",
"make",
"sure",
"the",
"segment",
"returned",
"is",
"the",
"one",
"created",
"by",
"the",
"same",
"thread",
"."
] | 707358cd3a516d51f2ebf71cf34f00e8d906a667 | https://github.com/aws/aws-xray-sdk-python/blob/707358cd3a516d51f2ebf71cf34f00e8d906a667/aws_xray_sdk/core/recorder.py#L261-L271 | train | 213,707 |
aws/aws-xray-sdk-python | aws_xray_sdk/core/recorder.py | AWSXRayRecorder.begin_subsegment | def begin_subsegment(self, name, namespace='local'):
"""
Begin a new subsegment.
If there is open subsegment, the newly created subsegment will be the
child of latest opened subsegment.
If not, it will be the child of the current open segment.
:param str name: the name of the subsegment.
:param str namespace: currently can only be 'local', 'remote', 'aws'.
"""
segment = self.current_segment()
if not segment:
log.warning("No segment found, cannot begin subsegment %s." % name)
return None
if not segment.sampled:
subsegment = DummySubsegment(segment, name)
else:
subsegment = Subsegment(name, namespace, segment)
self.context.put_subsegment(subsegment)
return subsegment | python | def begin_subsegment(self, name, namespace='local'):
"""
Begin a new subsegment.
If there is open subsegment, the newly created subsegment will be the
child of latest opened subsegment.
If not, it will be the child of the current open segment.
:param str name: the name of the subsegment.
:param str namespace: currently can only be 'local', 'remote', 'aws'.
"""
segment = self.current_segment()
if not segment:
log.warning("No segment found, cannot begin subsegment %s." % name)
return None
if not segment.sampled:
subsegment = DummySubsegment(segment, name)
else:
subsegment = Subsegment(name, namespace, segment)
self.context.put_subsegment(subsegment)
return subsegment | [
"def",
"begin_subsegment",
"(",
"self",
",",
"name",
",",
"namespace",
"=",
"'local'",
")",
":",
"segment",
"=",
"self",
".",
"current_segment",
"(",
")",
"if",
"not",
"segment",
":",
"log",
".",
"warning",
"(",
"\"No segment found, cannot begin subsegment %s.\"... | Begin a new subsegment.
If there is open subsegment, the newly created subsegment will be the
child of latest opened subsegment.
If not, it will be the child of the current open segment.
:param str name: the name of the subsegment.
:param str namespace: currently can only be 'local', 'remote', 'aws'. | [
"Begin",
"a",
"new",
"subsegment",
".",
"If",
"there",
"is",
"open",
"subsegment",
"the",
"newly",
"created",
"subsegment",
"will",
"be",
"the",
"child",
"of",
"latest",
"opened",
"subsegment",
".",
"If",
"not",
"it",
"will",
"be",
"the",
"child",
"of",
... | 707358cd3a516d51f2ebf71cf34f00e8d906a667 | https://github.com/aws/aws-xray-sdk-python/blob/707358cd3a516d51f2ebf71cf34f00e8d906a667/aws_xray_sdk/core/recorder.py#L273-L296 | train | 213,708 |
aws/aws-xray-sdk-python | aws_xray_sdk/core/recorder.py | AWSXRayRecorder.end_subsegment | def end_subsegment(self, end_time=None):
"""
End the current active subsegment. If this is the last one open
under its parent segment, the entire segment will be sent.
:param float end_time: subsegment compeletion in unix epoch in seconds.
"""
if not self.context.end_subsegment(end_time):
return
# if segment is already close, we check if we can send entire segment
# otherwise we check if we need to stream some subsegments
if self.current_segment().ready_to_send():
self._send_segment()
else:
self.stream_subsegments() | python | def end_subsegment(self, end_time=None):
"""
End the current active subsegment. If this is the last one open
under its parent segment, the entire segment will be sent.
:param float end_time: subsegment compeletion in unix epoch in seconds.
"""
if not self.context.end_subsegment(end_time):
return
# if segment is already close, we check if we can send entire segment
# otherwise we check if we need to stream some subsegments
if self.current_segment().ready_to_send():
self._send_segment()
else:
self.stream_subsegments() | [
"def",
"end_subsegment",
"(",
"self",
",",
"end_time",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"context",
".",
"end_subsegment",
"(",
"end_time",
")",
":",
"return",
"# if segment is already close, we check if we can send entire segment",
"# otherwise we check ... | End the current active subsegment. If this is the last one open
under its parent segment, the entire segment will be sent.
:param float end_time: subsegment compeletion in unix epoch in seconds. | [
"End",
"the",
"current",
"active",
"subsegment",
".",
"If",
"this",
"is",
"the",
"last",
"one",
"open",
"under",
"its",
"parent",
"segment",
"the",
"entire",
"segment",
"will",
"be",
"sent",
"."
] | 707358cd3a516d51f2ebf71cf34f00e8d906a667 | https://github.com/aws/aws-xray-sdk-python/blob/707358cd3a516d51f2ebf71cf34f00e8d906a667/aws_xray_sdk/core/recorder.py#L310-L325 | train | 213,709 |
aws/aws-xray-sdk-python | aws_xray_sdk/core/recorder.py | AWSXRayRecorder.put_annotation | def put_annotation(self, key, value):
"""
Annotate current active trace entity with a key-value pair.
Annotations will be indexed for later search query.
:param str key: annotation key
:param object value: annotation value. Any type other than
string/number/bool will be dropped
"""
entity = self.get_trace_entity()
if entity and entity.sampled:
entity.put_annotation(key, value) | python | def put_annotation(self, key, value):
"""
Annotate current active trace entity with a key-value pair.
Annotations will be indexed for later search query.
:param str key: annotation key
:param object value: annotation value. Any type other than
string/number/bool will be dropped
"""
entity = self.get_trace_entity()
if entity and entity.sampled:
entity.put_annotation(key, value) | [
"def",
"put_annotation",
"(",
"self",
",",
"key",
",",
"value",
")",
":",
"entity",
"=",
"self",
".",
"get_trace_entity",
"(",
")",
"if",
"entity",
"and",
"entity",
".",
"sampled",
":",
"entity",
".",
"put_annotation",
"(",
"key",
",",
"value",
")"
] | Annotate current active trace entity with a key-value pair.
Annotations will be indexed for later search query.
:param str key: annotation key
:param object value: annotation value. Any type other than
string/number/bool will be dropped | [
"Annotate",
"current",
"active",
"trace",
"entity",
"with",
"a",
"key",
"-",
"value",
"pair",
".",
"Annotations",
"will",
"be",
"indexed",
"for",
"later",
"search",
"query",
"."
] | 707358cd3a516d51f2ebf71cf34f00e8d906a667 | https://github.com/aws/aws-xray-sdk-python/blob/707358cd3a516d51f2ebf71cf34f00e8d906a667/aws_xray_sdk/core/recorder.py#L327-L338 | train | 213,710 |
aws/aws-xray-sdk-python | aws_xray_sdk/core/recorder.py | AWSXRayRecorder.put_metadata | def put_metadata(self, key, value, namespace='default'):
"""
Add metadata to the current active trace entity.
Metadata is not indexed but can be later retrieved
by BatchGetTraces API.
:param str namespace: optional. Default namespace is `default`.
It must be a string and prefix `AWS.` is reserved.
:param str key: metadata key under specified namespace
:param object value: any object that can be serialized into JSON string
"""
entity = self.get_trace_entity()
if entity and entity.sampled:
entity.put_metadata(key, value, namespace) | python | def put_metadata(self, key, value, namespace='default'):
"""
Add metadata to the current active trace entity.
Metadata is not indexed but can be later retrieved
by BatchGetTraces API.
:param str namespace: optional. Default namespace is `default`.
It must be a string and prefix `AWS.` is reserved.
:param str key: metadata key under specified namespace
:param object value: any object that can be serialized into JSON string
"""
entity = self.get_trace_entity()
if entity and entity.sampled:
entity.put_metadata(key, value, namespace) | [
"def",
"put_metadata",
"(",
"self",
",",
"key",
",",
"value",
",",
"namespace",
"=",
"'default'",
")",
":",
"entity",
"=",
"self",
".",
"get_trace_entity",
"(",
")",
"if",
"entity",
"and",
"entity",
".",
"sampled",
":",
"entity",
".",
"put_metadata",
"("... | Add metadata to the current active trace entity.
Metadata is not indexed but can be later retrieved
by BatchGetTraces API.
:param str namespace: optional. Default namespace is `default`.
It must be a string and prefix `AWS.` is reserved.
:param str key: metadata key under specified namespace
:param object value: any object that can be serialized into JSON string | [
"Add",
"metadata",
"to",
"the",
"current",
"active",
"trace",
"entity",
".",
"Metadata",
"is",
"not",
"indexed",
"but",
"can",
"be",
"later",
"retrieved",
"by",
"BatchGetTraces",
"API",
"."
] | 707358cd3a516d51f2ebf71cf34f00e8d906a667 | https://github.com/aws/aws-xray-sdk-python/blob/707358cd3a516d51f2ebf71cf34f00e8d906a667/aws_xray_sdk/core/recorder.py#L340-L353 | train | 213,711 |
aws/aws-xray-sdk-python | aws_xray_sdk/core/recorder.py | AWSXRayRecorder.stream_subsegments | def stream_subsegments(self):
"""
Stream all closed subsegments to the daemon
and remove reference to the parent segment.
No-op for a not sampled segment.
"""
segment = self.current_segment()
if self.streaming.is_eligible(segment):
self.streaming.stream(segment, self._stream_subsegment_out) | python | def stream_subsegments(self):
"""
Stream all closed subsegments to the daemon
and remove reference to the parent segment.
No-op for a not sampled segment.
"""
segment = self.current_segment()
if self.streaming.is_eligible(segment):
self.streaming.stream(segment, self._stream_subsegment_out) | [
"def",
"stream_subsegments",
"(",
"self",
")",
":",
"segment",
"=",
"self",
".",
"current_segment",
"(",
")",
"if",
"self",
".",
"streaming",
".",
"is_eligible",
"(",
"segment",
")",
":",
"self",
".",
"streaming",
".",
"stream",
"(",
"segment",
",",
"sel... | Stream all closed subsegments to the daemon
and remove reference to the parent segment.
No-op for a not sampled segment. | [
"Stream",
"all",
"closed",
"subsegments",
"to",
"the",
"daemon",
"and",
"remove",
"reference",
"to",
"the",
"parent",
"segment",
".",
"No",
"-",
"op",
"for",
"a",
"not",
"sampled",
"segment",
"."
] | 707358cd3a516d51f2ebf71cf34f00e8d906a667 | https://github.com/aws/aws-xray-sdk-python/blob/707358cd3a516d51f2ebf71cf34f00e8d906a667/aws_xray_sdk/core/recorder.py#L383-L392 | train | 213,712 |
aws/aws-xray-sdk-python | aws_xray_sdk/core/recorder.py | AWSXRayRecorder._send_segment | def _send_segment(self):
"""
Send the current segment to X-Ray daemon if it is present and
sampled, then clean up context storage.
The emitter will handle failures.
"""
segment = self.current_segment()
if not segment:
return
if segment.sampled:
self.emitter.send_entity(segment)
self.clear_trace_entities() | python | def _send_segment(self):
"""
Send the current segment to X-Ray daemon if it is present and
sampled, then clean up context storage.
The emitter will handle failures.
"""
segment = self.current_segment()
if not segment:
return
if segment.sampled:
self.emitter.send_entity(segment)
self.clear_trace_entities() | [
"def",
"_send_segment",
"(",
"self",
")",
":",
"segment",
"=",
"self",
".",
"current_segment",
"(",
")",
"if",
"not",
"segment",
":",
"return",
"if",
"segment",
".",
"sampled",
":",
"self",
".",
"emitter",
".",
"send_entity",
"(",
"segment",
")",
"self",... | Send the current segment to X-Ray daemon if it is present and
sampled, then clean up context storage.
The emitter will handle failures. | [
"Send",
"the",
"current",
"segment",
"to",
"X",
"-",
"Ray",
"daemon",
"if",
"it",
"is",
"present",
"and",
"sampled",
"then",
"clean",
"up",
"context",
"storage",
".",
"The",
"emitter",
"will",
"handle",
"failures",
"."
] | 707358cd3a516d51f2ebf71cf34f00e8d906a667 | https://github.com/aws/aws-xray-sdk-python/blob/707358cd3a516d51f2ebf71cf34f00e8d906a667/aws_xray_sdk/core/recorder.py#L460-L473 | train | 213,713 |
aws/aws-xray-sdk-python | aws_xray_sdk/core/sampling/local/sampling_rule.py | SamplingRule.applies | def applies(self, host, method, path):
"""
Determines whether or not this sampling rule applies to
the incoming request based on some of the request's parameters.
Any None parameters provided will be considered an implicit match.
"""
return (not host or wildcard_match(self.host, host)) \
and (not method or wildcard_match(self.method, method)) \
and (not path or wildcard_match(self.path, path)) | python | def applies(self, host, method, path):
"""
Determines whether or not this sampling rule applies to
the incoming request based on some of the request's parameters.
Any None parameters provided will be considered an implicit match.
"""
return (not host or wildcard_match(self.host, host)) \
and (not method or wildcard_match(self.method, method)) \
and (not path or wildcard_match(self.path, path)) | [
"def",
"applies",
"(",
"self",
",",
"host",
",",
"method",
",",
"path",
")",
":",
"return",
"(",
"not",
"host",
"or",
"wildcard_match",
"(",
"self",
".",
"host",
",",
"host",
")",
")",
"and",
"(",
"not",
"method",
"or",
"wildcard_match",
"(",
"self",... | Determines whether or not this sampling rule applies to
the incoming request based on some of the request's parameters.
Any None parameters provided will be considered an implicit match. | [
"Determines",
"whether",
"or",
"not",
"this",
"sampling",
"rule",
"applies",
"to",
"the",
"incoming",
"request",
"based",
"on",
"some",
"of",
"the",
"request",
"s",
"parameters",
".",
"Any",
"None",
"parameters",
"provided",
"will",
"be",
"considered",
"an",
... | 707358cd3a516d51f2ebf71cf34f00e8d906a667 | https://github.com/aws/aws-xray-sdk-python/blob/707358cd3a516d51f2ebf71cf34f00e8d906a667/aws_xray_sdk/core/sampling/local/sampling_rule.py#L43-L51 | train | 213,714 |
aws/aws-xray-sdk-python | aws_xray_sdk/ext/django/apps.py | XRayConfig.ready | def ready(self):
"""
Configure global XRay recorder based on django settings
under XRAY_RECORDER namespace.
This method could be called twice during server startup
because of base command and reload command.
So this function must be idempotent
"""
if not settings.AWS_XRAY_TRACING_NAME:
raise SegmentNameMissingException('Segment name is required.')
xray_recorder.configure(
daemon_address=settings.AWS_XRAY_DAEMON_ADDRESS,
sampling=settings.SAMPLING,
sampling_rules=settings.SAMPLING_RULES,
context_missing=settings.AWS_XRAY_CONTEXT_MISSING,
plugins=settings.PLUGINS,
service=settings.AWS_XRAY_TRACING_NAME,
dynamic_naming=settings.DYNAMIC_NAMING,
streaming_threshold=settings.STREAMING_THRESHOLD,
max_trace_back=settings.MAX_TRACE_BACK,
stream_sql=settings.STREAM_SQL,
)
if settings.PATCH_MODULES:
if settings.AUTO_PATCH_PARENT_SEGMENT_NAME is not None:
with xray_recorder.in_segment(settings.AUTO_PATCH_PARENT_SEGMENT_NAME):
patch(settings.PATCH_MODULES, ignore_module_patterns=settings.IGNORE_MODULE_PATTERNS)
else:
patch(settings.PATCH_MODULES, ignore_module_patterns=settings.IGNORE_MODULE_PATTERNS)
# if turned on subsegment will be generated on
# built-in database and template rendering
if settings.AUTO_INSTRUMENT:
try:
patch_db()
except Exception:
log.debug('failed to patch Django built-in database')
try:
patch_template()
except Exception:
log.debug('failed to patch Django built-in template engine') | python | def ready(self):
"""
Configure global XRay recorder based on django settings
under XRAY_RECORDER namespace.
This method could be called twice during server startup
because of base command and reload command.
So this function must be idempotent
"""
if not settings.AWS_XRAY_TRACING_NAME:
raise SegmentNameMissingException('Segment name is required.')
xray_recorder.configure(
daemon_address=settings.AWS_XRAY_DAEMON_ADDRESS,
sampling=settings.SAMPLING,
sampling_rules=settings.SAMPLING_RULES,
context_missing=settings.AWS_XRAY_CONTEXT_MISSING,
plugins=settings.PLUGINS,
service=settings.AWS_XRAY_TRACING_NAME,
dynamic_naming=settings.DYNAMIC_NAMING,
streaming_threshold=settings.STREAMING_THRESHOLD,
max_trace_back=settings.MAX_TRACE_BACK,
stream_sql=settings.STREAM_SQL,
)
if settings.PATCH_MODULES:
if settings.AUTO_PATCH_PARENT_SEGMENT_NAME is not None:
with xray_recorder.in_segment(settings.AUTO_PATCH_PARENT_SEGMENT_NAME):
patch(settings.PATCH_MODULES, ignore_module_patterns=settings.IGNORE_MODULE_PATTERNS)
else:
patch(settings.PATCH_MODULES, ignore_module_patterns=settings.IGNORE_MODULE_PATTERNS)
# if turned on subsegment will be generated on
# built-in database and template rendering
if settings.AUTO_INSTRUMENT:
try:
patch_db()
except Exception:
log.debug('failed to patch Django built-in database')
try:
patch_template()
except Exception:
log.debug('failed to patch Django built-in template engine') | [
"def",
"ready",
"(",
"self",
")",
":",
"if",
"not",
"settings",
".",
"AWS_XRAY_TRACING_NAME",
":",
"raise",
"SegmentNameMissingException",
"(",
"'Segment name is required.'",
")",
"xray_recorder",
".",
"configure",
"(",
"daemon_address",
"=",
"settings",
".",
"AWS_X... | Configure global XRay recorder based on django settings
under XRAY_RECORDER namespace.
This method could be called twice during server startup
because of base command and reload command.
So this function must be idempotent | [
"Configure",
"global",
"XRay",
"recorder",
"based",
"on",
"django",
"settings",
"under",
"XRAY_RECORDER",
"namespace",
".",
"This",
"method",
"could",
"be",
"called",
"twice",
"during",
"server",
"startup",
"because",
"of",
"base",
"command",
"and",
"reload",
"c... | 707358cd3a516d51f2ebf71cf34f00e8d906a667 | https://github.com/aws/aws-xray-sdk-python/blob/707358cd3a516d51f2ebf71cf34f00e8d906a667/aws_xray_sdk/ext/django/apps.py#L18-L59 | train | 213,715 |
aws/aws-xray-sdk-python | aws_xray_sdk/core/sampling/sampler.py | DefaultSampler.start | def start(self):
"""
Start rule poller and target poller once X-Ray daemon address
and context manager is in place.
"""
if not global_sdk_config.sdk_enabled():
return
with self._lock:
if not self._started:
self._rule_poller.start()
self._target_poller.start()
self._started = True | python | def start(self):
"""
Start rule poller and target poller once X-Ray daemon address
and context manager is in place.
"""
if not global_sdk_config.sdk_enabled():
return
with self._lock:
if not self._started:
self._rule_poller.start()
self._target_poller.start()
self._started = True | [
"def",
"start",
"(",
"self",
")",
":",
"if",
"not",
"global_sdk_config",
".",
"sdk_enabled",
"(",
")",
":",
"return",
"with",
"self",
".",
"_lock",
":",
"if",
"not",
"self",
".",
"_started",
":",
"self",
".",
"_rule_poller",
".",
"start",
"(",
")",
"... | Start rule poller and target poller once X-Ray daemon address
and context manager is in place. | [
"Start",
"rule",
"poller",
"and",
"target",
"poller",
"once",
"X",
"-",
"Ray",
"daemon",
"address",
"and",
"context",
"manager",
"is",
"in",
"place",
"."
] | 707358cd3a516d51f2ebf71cf34f00e8d906a667 | https://github.com/aws/aws-xray-sdk-python/blob/707358cd3a516d51f2ebf71cf34f00e8d906a667/aws_xray_sdk/core/sampling/sampler.py#L36-L48 | train | 213,716 |
aws/aws-xray-sdk-python | aws_xray_sdk/core/sampling/sampler.py | DefaultSampler.should_trace | def should_trace(self, sampling_req=None):
"""
Return the matched sampling rule name if the sampler finds one
and decide to sample. If no sampling rule matched, it falls back
to the local sampler's ``should_trace`` implementation.
All optional arguments are extracted from incoming requests by
X-Ray middleware to perform path based sampling.
"""
if not global_sdk_config.sdk_enabled():
return False
if not self._started:
self.start() # only front-end that actually uses the sampler spawns poller threads
now = int(time.time())
if sampling_req and not sampling_req.get('service_type', None):
sampling_req['service_type'] = self._origin
elif sampling_req is None:
sampling_req = {'service_type': self._origin}
matched_rule = self._cache.get_matched_rule(sampling_req, now)
if matched_rule:
log.debug('Rule %s is selected to make a sampling decision.', matched_rule.name)
return self._process_matched_rule(matched_rule, now)
else:
log.info('No effective centralized sampling rule match. Fallback to local rules.')
return self._local_sampler.should_trace(sampling_req) | python | def should_trace(self, sampling_req=None):
"""
Return the matched sampling rule name if the sampler finds one
and decide to sample. If no sampling rule matched, it falls back
to the local sampler's ``should_trace`` implementation.
All optional arguments are extracted from incoming requests by
X-Ray middleware to perform path based sampling.
"""
if not global_sdk_config.sdk_enabled():
return False
if not self._started:
self.start() # only front-end that actually uses the sampler spawns poller threads
now = int(time.time())
if sampling_req and not sampling_req.get('service_type', None):
sampling_req['service_type'] = self._origin
elif sampling_req is None:
sampling_req = {'service_type': self._origin}
matched_rule = self._cache.get_matched_rule(sampling_req, now)
if matched_rule:
log.debug('Rule %s is selected to make a sampling decision.', matched_rule.name)
return self._process_matched_rule(matched_rule, now)
else:
log.info('No effective centralized sampling rule match. Fallback to local rules.')
return self._local_sampler.should_trace(sampling_req) | [
"def",
"should_trace",
"(",
"self",
",",
"sampling_req",
"=",
"None",
")",
":",
"if",
"not",
"global_sdk_config",
".",
"sdk_enabled",
"(",
")",
":",
"return",
"False",
"if",
"not",
"self",
".",
"_started",
":",
"self",
".",
"start",
"(",
")",
"# only fro... | Return the matched sampling rule name if the sampler finds one
and decide to sample. If no sampling rule matched, it falls back
to the local sampler's ``should_trace`` implementation.
All optional arguments are extracted from incoming requests by
X-Ray middleware to perform path based sampling. | [
"Return",
"the",
"matched",
"sampling",
"rule",
"name",
"if",
"the",
"sampler",
"finds",
"one",
"and",
"decide",
"to",
"sample",
".",
"If",
"no",
"sampling",
"rule",
"matched",
"it",
"falls",
"back",
"to",
"the",
"local",
"sampler",
"s",
"should_trace",
"i... | 707358cd3a516d51f2ebf71cf34f00e8d906a667 | https://github.com/aws/aws-xray-sdk-python/blob/707358cd3a516d51f2ebf71cf34f00e8d906a667/aws_xray_sdk/core/sampling/sampler.py#L50-L75 | train | 213,717 |
aws/aws-xray-sdk-python | aws_xray_sdk/ext/pynamodb/patch.py | patch | def patch():
"""Patch PynamoDB so it generates subsegements when calling DynamoDB."""
import pynamodb
if hasattr(botocore.vendored.requests.sessions, '_xray_enabled'):
return
setattr(botocore.vendored.requests.sessions, '_xray_enabled', True)
wrapt.wrap_function_wrapper(
'botocore.vendored.requests.sessions',
'Session.send',
_xray_traced_pynamodb,
) | python | def patch():
"""Patch PynamoDB so it generates subsegements when calling DynamoDB."""
import pynamodb
if hasattr(botocore.vendored.requests.sessions, '_xray_enabled'):
return
setattr(botocore.vendored.requests.sessions, '_xray_enabled', True)
wrapt.wrap_function_wrapper(
'botocore.vendored.requests.sessions',
'Session.send',
_xray_traced_pynamodb,
) | [
"def",
"patch",
"(",
")",
":",
"import",
"pynamodb",
"if",
"hasattr",
"(",
"botocore",
".",
"vendored",
".",
"requests",
".",
"sessions",
",",
"'_xray_enabled'",
")",
":",
"return",
"setattr",
"(",
"botocore",
".",
"vendored",
".",
"requests",
".",
"sessio... | Patch PynamoDB so it generates subsegements when calling DynamoDB. | [
"Patch",
"PynamoDB",
"so",
"it",
"generates",
"subsegements",
"when",
"calling",
"DynamoDB",
"."
] | 707358cd3a516d51f2ebf71cf34f00e8d906a667 | https://github.com/aws/aws-xray-sdk-python/blob/707358cd3a516d51f2ebf71cf34f00e8d906a667/aws_xray_sdk/ext/pynamodb/patch.py#L10-L22 | train | 213,718 |
aws/aws-xray-sdk-python | aws_xray_sdk/core/context.py | Context.end_segment | def end_segment(self, end_time=None):
"""
End the current active segment.
:param int end_time: epoch in seconds. If not specified the current
system time will be used.
"""
entity = self.get_trace_entity()
if not entity:
log.warning("No segment to end")
return
if self._is_subsegment(entity):
entity.parent_segment.close(end_time)
else:
entity.close(end_time) | python | def end_segment(self, end_time=None):
"""
End the current active segment.
:param int end_time: epoch in seconds. If not specified the current
system time will be used.
"""
entity = self.get_trace_entity()
if not entity:
log.warning("No segment to end")
return
if self._is_subsegment(entity):
entity.parent_segment.close(end_time)
else:
entity.close(end_time) | [
"def",
"end_segment",
"(",
"self",
",",
"end_time",
"=",
"None",
")",
":",
"entity",
"=",
"self",
".",
"get_trace_entity",
"(",
")",
"if",
"not",
"entity",
":",
"log",
".",
"warning",
"(",
"\"No segment to end\"",
")",
"return",
"if",
"self",
".",
"_is_s... | End the current active segment.
:param int end_time: epoch in seconds. If not specified the current
system time will be used. | [
"End",
"the",
"current",
"active",
"segment",
"."
] | 707358cd3a516d51f2ebf71cf34f00e8d906a667 | https://github.com/aws/aws-xray-sdk-python/blob/707358cd3a516d51f2ebf71cf34f00e8d906a667/aws_xray_sdk/core/context.py#L40-L54 | train | 213,719 |
aws/aws-xray-sdk-python | aws_xray_sdk/core/context.py | Context.put_subsegment | def put_subsegment(self, subsegment):
"""
Store the subsegment created by ``xray_recorder`` to the context.
If you put a new subsegment while there is already an open subsegment,
the new subsegment becomes the child of the existing subsegment.
"""
entity = self.get_trace_entity()
if not entity:
log.warning("Active segment or subsegment not found. Discarded %s." % subsegment.name)
return
entity.add_subsegment(subsegment)
self._local.entities.append(subsegment) | python | def put_subsegment(self, subsegment):
"""
Store the subsegment created by ``xray_recorder`` to the context.
If you put a new subsegment while there is already an open subsegment,
the new subsegment becomes the child of the existing subsegment.
"""
entity = self.get_trace_entity()
if not entity:
log.warning("Active segment or subsegment not found. Discarded %s." % subsegment.name)
return
entity.add_subsegment(subsegment)
self._local.entities.append(subsegment) | [
"def",
"put_subsegment",
"(",
"self",
",",
"subsegment",
")",
":",
"entity",
"=",
"self",
".",
"get_trace_entity",
"(",
")",
"if",
"not",
"entity",
":",
"log",
".",
"warning",
"(",
"\"Active segment or subsegment not found. Discarded %s.\"",
"%",
"subsegment",
"."... | Store the subsegment created by ``xray_recorder`` to the context.
If you put a new subsegment while there is already an open subsegment,
the new subsegment becomes the child of the existing subsegment. | [
"Store",
"the",
"subsegment",
"created",
"by",
"xray_recorder",
"to",
"the",
"context",
".",
"If",
"you",
"put",
"a",
"new",
"subsegment",
"while",
"there",
"is",
"already",
"an",
"open",
"subsegment",
"the",
"new",
"subsegment",
"becomes",
"the",
"child",
"... | 707358cd3a516d51f2ebf71cf34f00e8d906a667 | https://github.com/aws/aws-xray-sdk-python/blob/707358cd3a516d51f2ebf71cf34f00e8d906a667/aws_xray_sdk/core/context.py#L56-L68 | train | 213,720 |
aws/aws-xray-sdk-python | aws_xray_sdk/core/context.py | Context.end_subsegment | def end_subsegment(self, end_time=None):
"""
End the current active segment. Return False if there is no
subsegment to end.
:param int end_time: epoch in seconds. If not specified the current
system time will be used.
"""
subsegment = self.get_trace_entity()
if self._is_subsegment(subsegment):
subsegment.close(end_time)
self._local.entities.pop()
return True
else:
log.warning("No subsegment to end.")
return False | python | def end_subsegment(self, end_time=None):
"""
End the current active segment. Return False if there is no
subsegment to end.
:param int end_time: epoch in seconds. If not specified the current
system time will be used.
"""
subsegment = self.get_trace_entity()
if self._is_subsegment(subsegment):
subsegment.close(end_time)
self._local.entities.pop()
return True
else:
log.warning("No subsegment to end.")
return False | [
"def",
"end_subsegment",
"(",
"self",
",",
"end_time",
"=",
"None",
")",
":",
"subsegment",
"=",
"self",
".",
"get_trace_entity",
"(",
")",
"if",
"self",
".",
"_is_subsegment",
"(",
"subsegment",
")",
":",
"subsegment",
".",
"close",
"(",
"end_time",
")",
... | End the current active segment. Return False if there is no
subsegment to end.
:param int end_time: epoch in seconds. If not specified the current
system time will be used. | [
"End",
"the",
"current",
"active",
"segment",
".",
"Return",
"False",
"if",
"there",
"is",
"no",
"subsegment",
"to",
"end",
"."
] | 707358cd3a516d51f2ebf71cf34f00e8d906a667 | https://github.com/aws/aws-xray-sdk-python/blob/707358cd3a516d51f2ebf71cf34f00e8d906a667/aws_xray_sdk/core/context.py#L70-L85 | train | 213,721 |
aws/aws-xray-sdk-python | aws_xray_sdk/core/context.py | Context.handle_context_missing | def handle_context_missing(self):
"""
Called whenever there is no trace entity to access or mutate.
"""
if self.context_missing == 'RUNTIME_ERROR':
log.error(MISSING_SEGMENT_MSG)
raise SegmentNotFoundException(MISSING_SEGMENT_MSG)
else:
log.error(MISSING_SEGMENT_MSG) | python | def handle_context_missing(self):
"""
Called whenever there is no trace entity to access or mutate.
"""
if self.context_missing == 'RUNTIME_ERROR':
log.error(MISSING_SEGMENT_MSG)
raise SegmentNotFoundException(MISSING_SEGMENT_MSG)
else:
log.error(MISSING_SEGMENT_MSG) | [
"def",
"handle_context_missing",
"(",
"self",
")",
":",
"if",
"self",
".",
"context_missing",
"==",
"'RUNTIME_ERROR'",
":",
"log",
".",
"error",
"(",
"MISSING_SEGMENT_MSG",
")",
"raise",
"SegmentNotFoundException",
"(",
"MISSING_SEGMENT_MSG",
")",
"else",
":",
"lo... | Called whenever there is no trace entity to access or mutate. | [
"Called",
"whenever",
"there",
"is",
"no",
"trace",
"entity",
"to",
"access",
"or",
"mutate",
"."
] | 707358cd3a516d51f2ebf71cf34f00e8d906a667 | https://github.com/aws/aws-xray-sdk-python/blob/707358cd3a516d51f2ebf71cf34f00e8d906a667/aws_xray_sdk/core/context.py#L112-L120 | train | 213,722 |
aws/aws-xray-sdk-python | aws_xray_sdk/core/lambda_launcher.py | check_in_lambda | def check_in_lambda():
"""
Return None if SDK is not loaded in AWS Lambda worker.
Otherwise drop a touch file and return a lambda context.
"""
if not os.getenv(LAMBDA_TASK_ROOT_KEY):
return None
try:
os.mkdir(TOUCH_FILE_DIR)
except OSError:
log.debug('directory %s already exists', TOUCH_FILE_DIR)
try:
f = open(TOUCH_FILE_PATH, 'w+')
f.close()
# utime force second parameter in python2.7
os.utime(TOUCH_FILE_PATH, None)
except (IOError, OSError):
log.warning("Unable to write to %s. Failed to signal SDK initialization." % TOUCH_FILE_PATH)
return LambdaContext() | python | def check_in_lambda():
"""
Return None if SDK is not loaded in AWS Lambda worker.
Otherwise drop a touch file and return a lambda context.
"""
if not os.getenv(LAMBDA_TASK_ROOT_KEY):
return None
try:
os.mkdir(TOUCH_FILE_DIR)
except OSError:
log.debug('directory %s already exists', TOUCH_FILE_DIR)
try:
f = open(TOUCH_FILE_PATH, 'w+')
f.close()
# utime force second parameter in python2.7
os.utime(TOUCH_FILE_PATH, None)
except (IOError, OSError):
log.warning("Unable to write to %s. Failed to signal SDK initialization." % TOUCH_FILE_PATH)
return LambdaContext() | [
"def",
"check_in_lambda",
"(",
")",
":",
"if",
"not",
"os",
".",
"getenv",
"(",
"LAMBDA_TASK_ROOT_KEY",
")",
":",
"return",
"None",
"try",
":",
"os",
".",
"mkdir",
"(",
"TOUCH_FILE_DIR",
")",
"except",
"OSError",
":",
"log",
".",
"debug",
"(",
"'director... | Return None if SDK is not loaded in AWS Lambda worker.
Otherwise drop a touch file and return a lambda context. | [
"Return",
"None",
"if",
"SDK",
"is",
"not",
"loaded",
"in",
"AWS",
"Lambda",
"worker",
".",
"Otherwise",
"drop",
"a",
"touch",
"file",
"and",
"return",
"a",
"lambda",
"context",
"."
] | 707358cd3a516d51f2ebf71cf34f00e8d906a667 | https://github.com/aws/aws-xray-sdk-python/blob/707358cd3a516d51f2ebf71cf34f00e8d906a667/aws_xray_sdk/core/lambda_launcher.py#L19-L40 | train | 213,723 |
aws/aws-xray-sdk-python | aws_xray_sdk/core/lambda_launcher.py | LambdaContext._refresh_context | def _refresh_context(self):
"""
Get current facade segment. To prevent resource leaking in Lambda worker,
every time there is segment present, we compare its trace id to current
environment variables. If it is different we create a new facade segment
and clean up subsegments stored.
"""
header_str = os.getenv(LAMBDA_TRACE_HEADER_KEY)
trace_header = TraceHeader.from_header_str(header_str)
if not global_sdk_config.sdk_enabled():
trace_header._sampled = False
segment = getattr(self._local, 'segment', None)
if segment:
# Ensure customers don't have leaked subsegments across invocations
if not trace_header.root or trace_header.root == segment.trace_id:
return
else:
self._initialize_context(trace_header)
else:
self._initialize_context(trace_header) | python | def _refresh_context(self):
"""
Get current facade segment. To prevent resource leaking in Lambda worker,
every time there is segment present, we compare its trace id to current
environment variables. If it is different we create a new facade segment
and clean up subsegments stored.
"""
header_str = os.getenv(LAMBDA_TRACE_HEADER_KEY)
trace_header = TraceHeader.from_header_str(header_str)
if not global_sdk_config.sdk_enabled():
trace_header._sampled = False
segment = getattr(self._local, 'segment', None)
if segment:
# Ensure customers don't have leaked subsegments across invocations
if not trace_header.root or trace_header.root == segment.trace_id:
return
else:
self._initialize_context(trace_header)
else:
self._initialize_context(trace_header) | [
"def",
"_refresh_context",
"(",
"self",
")",
":",
"header_str",
"=",
"os",
".",
"getenv",
"(",
"LAMBDA_TRACE_HEADER_KEY",
")",
"trace_header",
"=",
"TraceHeader",
".",
"from_header_str",
"(",
"header_str",
")",
"if",
"not",
"global_sdk_config",
".",
"sdk_enabled",... | Get current facade segment. To prevent resource leaking in Lambda worker,
every time there is segment present, we compare its trace id to current
environment variables. If it is different we create a new facade segment
and clean up subsegments stored. | [
"Get",
"current",
"facade",
"segment",
".",
"To",
"prevent",
"resource",
"leaking",
"in",
"Lambda",
"worker",
"every",
"time",
"there",
"is",
"segment",
"present",
"we",
"compare",
"its",
"trace",
"id",
"to",
"current",
"environment",
"variables",
".",
"If",
... | 707358cd3a516d51f2ebf71cf34f00e8d906a667 | https://github.com/aws/aws-xray-sdk-python/blob/707358cd3a516d51f2ebf71cf34f00e8d906a667/aws_xray_sdk/core/lambda_launcher.py#L88-L109 | train | 213,724 |
aws/aws-xray-sdk-python | aws_xray_sdk/core/lambda_launcher.py | LambdaContext._initialize_context | def _initialize_context(self, trace_header):
"""
Create a facade segment based on environment variables
set by AWS Lambda and initialize storage for subsegments.
"""
sampled = None
if not global_sdk_config.sdk_enabled():
# Force subsequent subsegments to be disabled and turned into DummySegments.
sampled = False
elif trace_header.sampled == 0:
sampled = False
elif trace_header.sampled == 1:
sampled = True
segment = FacadeSegment(
name='facade',
traceid=trace_header.root,
entityid=trace_header.parent,
sampled=sampled,
)
setattr(self._local, 'segment', segment)
setattr(self._local, 'entities', []) | python | def _initialize_context(self, trace_header):
"""
Create a facade segment based on environment variables
set by AWS Lambda and initialize storage for subsegments.
"""
sampled = None
if not global_sdk_config.sdk_enabled():
# Force subsequent subsegments to be disabled and turned into DummySegments.
sampled = False
elif trace_header.sampled == 0:
sampled = False
elif trace_header.sampled == 1:
sampled = True
segment = FacadeSegment(
name='facade',
traceid=trace_header.root,
entityid=trace_header.parent,
sampled=sampled,
)
setattr(self._local, 'segment', segment)
setattr(self._local, 'entities', []) | [
"def",
"_initialize_context",
"(",
"self",
",",
"trace_header",
")",
":",
"sampled",
"=",
"None",
"if",
"not",
"global_sdk_config",
".",
"sdk_enabled",
"(",
")",
":",
"# Force subsequent subsegments to be disabled and turned into DummySegments.",
"sampled",
"=",
"False",
... | Create a facade segment based on environment variables
set by AWS Lambda and initialize storage for subsegments. | [
"Create",
"a",
"facade",
"segment",
"based",
"on",
"environment",
"variables",
"set",
"by",
"AWS",
"Lambda",
"and",
"initialize",
"storage",
"for",
"subsegments",
"."
] | 707358cd3a516d51f2ebf71cf34f00e8d906a667 | https://github.com/aws/aws-xray-sdk-python/blob/707358cd3a516d51f2ebf71cf34f00e8d906a667/aws_xray_sdk/core/lambda_launcher.py#L125-L146 | train | 213,725 |
aws/aws-xray-sdk-python | aws_xray_sdk/sdk_config.py | SDKConfig.set_sdk_enabled | def set_sdk_enabled(cls, value):
"""
Modifies the enabled flag if the "AWS_XRAY_SDK_ENABLED" environment variable is not set,
otherwise, set the enabled flag to be equal to the environment variable. If the
env variable is an invalid string boolean, it will default to true.
:param bool value: Flag to set whether the SDK is enabled or disabled.
Environment variables AWS_XRAY_SDK_ENABLED overrides argument value.
"""
# Environment Variables take precedence over hardcoded configurations.
if cls.XRAY_ENABLED_KEY in os.environ:
cls.__SDK_ENABLED = str(os.getenv(cls.XRAY_ENABLED_KEY, 'true')).lower() != 'false'
else:
if type(value) == bool:
cls.__SDK_ENABLED = value
else:
cls.__SDK_ENABLED = True
log.warning("Invalid parameter type passed into set_sdk_enabled(). Defaulting to True...") | python | def set_sdk_enabled(cls, value):
"""
Modifies the enabled flag if the "AWS_XRAY_SDK_ENABLED" environment variable is not set,
otherwise, set the enabled flag to be equal to the environment variable. If the
env variable is an invalid string boolean, it will default to true.
:param bool value: Flag to set whether the SDK is enabled or disabled.
Environment variables AWS_XRAY_SDK_ENABLED overrides argument value.
"""
# Environment Variables take precedence over hardcoded configurations.
if cls.XRAY_ENABLED_KEY in os.environ:
cls.__SDK_ENABLED = str(os.getenv(cls.XRAY_ENABLED_KEY, 'true')).lower() != 'false'
else:
if type(value) == bool:
cls.__SDK_ENABLED = value
else:
cls.__SDK_ENABLED = True
log.warning("Invalid parameter type passed into set_sdk_enabled(). Defaulting to True...") | [
"def",
"set_sdk_enabled",
"(",
"cls",
",",
"value",
")",
":",
"# Environment Variables take precedence over hardcoded configurations.",
"if",
"cls",
".",
"XRAY_ENABLED_KEY",
"in",
"os",
".",
"environ",
":",
"cls",
".",
"__SDK_ENABLED",
"=",
"str",
"(",
"os",
".",
... | Modifies the enabled flag if the "AWS_XRAY_SDK_ENABLED" environment variable is not set,
otherwise, set the enabled flag to be equal to the environment variable. If the
env variable is an invalid string boolean, it will default to true.
:param bool value: Flag to set whether the SDK is enabled or disabled.
Environment variables AWS_XRAY_SDK_ENABLED overrides argument value. | [
"Modifies",
"the",
"enabled",
"flag",
"if",
"the",
"AWS_XRAY_SDK_ENABLED",
"environment",
"variable",
"is",
"not",
"set",
"otherwise",
"set",
"the",
"enabled",
"flag",
"to",
"be",
"equal",
"to",
"the",
"environment",
"variable",
".",
"If",
"the",
"env",
"varia... | 707358cd3a516d51f2ebf71cf34f00e8d906a667 | https://github.com/aws/aws-xray-sdk-python/blob/707358cd3a516d51f2ebf71cf34f00e8d906a667/aws_xray_sdk/sdk_config.py#L40-L58 | train | 213,726 |
aws/aws-xray-sdk-python | aws_xray_sdk/ext/psycopg2/patch.py | _xray_register_type_fix | def _xray_register_type_fix(wrapped, instance, args, kwargs):
"""Send the actual connection or curser to register type."""
our_args = list(copy.copy(args))
if len(our_args) == 2 and isinstance(our_args[1], (XRayTracedConn, XRayTracedCursor)):
our_args[1] = our_args[1].__wrapped__
return wrapped(*our_args, **kwargs) | python | def _xray_register_type_fix(wrapped, instance, args, kwargs):
"""Send the actual connection or curser to register type."""
our_args = list(copy.copy(args))
if len(our_args) == 2 and isinstance(our_args[1], (XRayTracedConn, XRayTracedCursor)):
our_args[1] = our_args[1].__wrapped__
return wrapped(*our_args, **kwargs) | [
"def",
"_xray_register_type_fix",
"(",
"wrapped",
",",
"instance",
",",
"args",
",",
"kwargs",
")",
":",
"our_args",
"=",
"list",
"(",
"copy",
".",
"copy",
"(",
"args",
")",
")",
"if",
"len",
"(",
"our_args",
")",
"==",
"2",
"and",
"isinstance",
"(",
... | Send the actual connection or curser to register type. | [
"Send",
"the",
"actual",
"connection",
"or",
"curser",
"to",
"register",
"type",
"."
] | 707358cd3a516d51f2ebf71cf34f00e8d906a667 | https://github.com/aws/aws-xray-sdk-python/blob/707358cd3a516d51f2ebf71cf34f00e8d906a667/aws_xray_sdk/ext/psycopg2/patch.py#L42-L48 | train | 213,727 |
aws/aws-xray-sdk-python | aws_xray_sdk/core/models/trace_header.py | TraceHeader.from_header_str | def from_header_str(cls, header):
"""
Create a TraceHeader object from a tracing header string
extracted from a http request headers.
"""
if not header:
return cls()
try:
params = header.strip().split(HEADER_DELIMITER)
header_dict = {}
data = {}
for param in params:
entry = param.split('=')
key = entry[0]
if key in (ROOT, PARENT, SAMPLE):
header_dict[key] = entry[1]
# Ignore any "Self=" trace ids injected from ALB.
elif key != SELF:
data[key] = entry[1]
return cls(
root=header_dict.get(ROOT, None),
parent=header_dict.get(PARENT, None),
sampled=header_dict.get(SAMPLE, None),
data=data,
)
except Exception:
log.warning("malformed tracing header %s, ignore.", header)
return cls() | python | def from_header_str(cls, header):
"""
Create a TraceHeader object from a tracing header string
extracted from a http request headers.
"""
if not header:
return cls()
try:
params = header.strip().split(HEADER_DELIMITER)
header_dict = {}
data = {}
for param in params:
entry = param.split('=')
key = entry[0]
if key in (ROOT, PARENT, SAMPLE):
header_dict[key] = entry[1]
# Ignore any "Self=" trace ids injected from ALB.
elif key != SELF:
data[key] = entry[1]
return cls(
root=header_dict.get(ROOT, None),
parent=header_dict.get(PARENT, None),
sampled=header_dict.get(SAMPLE, None),
data=data,
)
except Exception:
log.warning("malformed tracing header %s, ignore.", header)
return cls() | [
"def",
"from_header_str",
"(",
"cls",
",",
"header",
")",
":",
"if",
"not",
"header",
":",
"return",
"cls",
"(",
")",
"try",
":",
"params",
"=",
"header",
".",
"strip",
"(",
")",
".",
"split",
"(",
"HEADER_DELIMITER",
")",
"header_dict",
"=",
"{",
"}... | Create a TraceHeader object from a tracing header string
extracted from a http request headers. | [
"Create",
"a",
"TraceHeader",
"object",
"from",
"a",
"tracing",
"header",
"string",
"extracted",
"from",
"a",
"http",
"request",
"headers",
"."
] | 707358cd3a516d51f2ebf71cf34f00e8d906a667 | https://github.com/aws/aws-xray-sdk-python/blob/707358cd3a516d51f2ebf71cf34f00e8d906a667/aws_xray_sdk/core/models/trace_header.py#L42-L73 | train | 213,728 |
aws/aws-xray-sdk-python | aws_xray_sdk/core/models/trace_header.py | TraceHeader.to_header_str | def to_header_str(self):
"""
Convert to a tracing header string that can be injected to
outgoing http request headers.
"""
h_parts = []
if self.root:
h_parts.append(ROOT + '=' + self.root)
if self.parent:
h_parts.append(PARENT + '=' + self.parent)
if self.sampled is not None:
h_parts.append(SAMPLE + '=' + str(self.sampled))
if self.data:
for key in self.data:
h_parts.append(key + '=' + self.data[key])
return HEADER_DELIMITER.join(h_parts) | python | def to_header_str(self):
"""
Convert to a tracing header string that can be injected to
outgoing http request headers.
"""
h_parts = []
if self.root:
h_parts.append(ROOT + '=' + self.root)
if self.parent:
h_parts.append(PARENT + '=' + self.parent)
if self.sampled is not None:
h_parts.append(SAMPLE + '=' + str(self.sampled))
if self.data:
for key in self.data:
h_parts.append(key + '=' + self.data[key])
return HEADER_DELIMITER.join(h_parts) | [
"def",
"to_header_str",
"(",
"self",
")",
":",
"h_parts",
"=",
"[",
"]",
"if",
"self",
".",
"root",
":",
"h_parts",
".",
"append",
"(",
"ROOT",
"+",
"'='",
"+",
"self",
".",
"root",
")",
"if",
"self",
".",
"parent",
":",
"h_parts",
".",
"append",
... | Convert to a tracing header string that can be injected to
outgoing http request headers. | [
"Convert",
"to",
"a",
"tracing",
"header",
"string",
"that",
"can",
"be",
"injected",
"to",
"outgoing",
"http",
"request",
"headers",
"."
] | 707358cd3a516d51f2ebf71cf34f00e8d906a667 | https://github.com/aws/aws-xray-sdk-python/blob/707358cd3a516d51f2ebf71cf34f00e8d906a667/aws_xray_sdk/core/models/trace_header.py#L75-L91 | train | 213,729 |
aws/aws-xray-sdk-python | aws_xray_sdk/core/models/default_dynamic_naming.py | DefaultDynamicNaming.get_name | def get_name(self, host_name):
"""
Returns the segment name based on the input host name.
"""
if wildcard_match(self._pattern, host_name):
return host_name
else:
return self._fallback | python | def get_name(self, host_name):
"""
Returns the segment name based on the input host name.
"""
if wildcard_match(self._pattern, host_name):
return host_name
else:
return self._fallback | [
"def",
"get_name",
"(",
"self",
",",
"host_name",
")",
":",
"if",
"wildcard_match",
"(",
"self",
".",
"_pattern",
",",
"host_name",
")",
":",
"return",
"host_name",
"else",
":",
"return",
"self",
".",
"_fallback"
] | Returns the segment name based on the input host name. | [
"Returns",
"the",
"segment",
"name",
"based",
"on",
"the",
"input",
"host",
"name",
"."
] | 707358cd3a516d51f2ebf71cf34f00e8d906a667 | https://github.com/aws/aws-xray-sdk-python/blob/707358cd3a516d51f2ebf71cf34f00e8d906a667/aws_xray_sdk/core/models/default_dynamic_naming.py#L24-L31 | train | 213,730 |
aws/aws-xray-sdk-python | aws_xray_sdk/core/emitters/udp_emitter.py | UDPEmitter.set_daemon_address | def set_daemon_address(self, address):
"""
Set up UDP ip and port from the raw daemon address
string using ``DaemonConfig`` class utlities.
"""
if address:
daemon_config = DaemonConfig(address)
self._ip, self._port = daemon_config.udp_ip, daemon_config.udp_port | python | def set_daemon_address(self, address):
"""
Set up UDP ip and port from the raw daemon address
string using ``DaemonConfig`` class utlities.
"""
if address:
daemon_config = DaemonConfig(address)
self._ip, self._port = daemon_config.udp_ip, daemon_config.udp_port | [
"def",
"set_daemon_address",
"(",
"self",
",",
"address",
")",
":",
"if",
"address",
":",
"daemon_config",
"=",
"DaemonConfig",
"(",
"address",
")",
"self",
".",
"_ip",
",",
"self",
".",
"_port",
"=",
"daemon_config",
".",
"udp_ip",
",",
"daemon_config",
"... | Set up UDP ip and port from the raw daemon address
string using ``DaemonConfig`` class utlities. | [
"Set",
"up",
"UDP",
"ip",
"and",
"port",
"from",
"the",
"raw",
"daemon",
"address",
"string",
"using",
"DaemonConfig",
"class",
"utlities",
"."
] | 707358cd3a516d51f2ebf71cf34f00e8d906a667 | https://github.com/aws/aws-xray-sdk-python/blob/707358cd3a516d51f2ebf71cf34f00e8d906a667/aws_xray_sdk/core/emitters/udp_emitter.py#L42-L49 | train | 213,731 |
aws/aws-xray-sdk-python | aws_xray_sdk/ext/aiobotocore/patch.py | patch | def patch():
"""
Patch aiobotocore client so it generates subsegments
when calling AWS services.
"""
if hasattr(aiobotocore.client, '_xray_enabled'):
return
setattr(aiobotocore.client, '_xray_enabled', True)
wrapt.wrap_function_wrapper(
'aiobotocore.client',
'AioBaseClient._make_api_call',
_xray_traced_aiobotocore,
)
wrapt.wrap_function_wrapper(
'aiobotocore.endpoint',
'AioEndpoint.prepare_request',
inject_header,
) | python | def patch():
"""
Patch aiobotocore client so it generates subsegments
when calling AWS services.
"""
if hasattr(aiobotocore.client, '_xray_enabled'):
return
setattr(aiobotocore.client, '_xray_enabled', True)
wrapt.wrap_function_wrapper(
'aiobotocore.client',
'AioBaseClient._make_api_call',
_xray_traced_aiobotocore,
)
wrapt.wrap_function_wrapper(
'aiobotocore.endpoint',
'AioEndpoint.prepare_request',
inject_header,
) | [
"def",
"patch",
"(",
")",
":",
"if",
"hasattr",
"(",
"aiobotocore",
".",
"client",
",",
"'_xray_enabled'",
")",
":",
"return",
"setattr",
"(",
"aiobotocore",
".",
"client",
",",
"'_xray_enabled'",
",",
"True",
")",
"wrapt",
".",
"wrap_function_wrapper",
"(",... | Patch aiobotocore client so it generates subsegments
when calling AWS services. | [
"Patch",
"aiobotocore",
"client",
"so",
"it",
"generates",
"subsegments",
"when",
"calling",
"AWS",
"services",
"."
] | 707358cd3a516d51f2ebf71cf34f00e8d906a667 | https://github.com/aws/aws-xray-sdk-python/blob/707358cd3a516d51f2ebf71cf34f00e8d906a667/aws_xray_sdk/ext/aiobotocore/patch.py#L8-L27 | train | 213,732 |
aws/aws-xray-sdk-python | aws_xray_sdk/core/streaming/default_streaming.py | DefaultStreaming.is_eligible | def is_eligible(self, segment):
"""
A segment is eligible to have its children subsegments streamed
if it is sampled and it breaches streaming threshold.
"""
if not segment or not segment.sampled:
return False
return segment.get_total_subsegments_size() > self.streaming_threshold | python | def is_eligible(self, segment):
"""
A segment is eligible to have its children subsegments streamed
if it is sampled and it breaches streaming threshold.
"""
if not segment or not segment.sampled:
return False
return segment.get_total_subsegments_size() > self.streaming_threshold | [
"def",
"is_eligible",
"(",
"self",
",",
"segment",
")",
":",
"if",
"not",
"segment",
"or",
"not",
"segment",
".",
"sampled",
":",
"return",
"False",
"return",
"segment",
".",
"get_total_subsegments_size",
"(",
")",
">",
"self",
".",
"streaming_threshold"
] | A segment is eligible to have its children subsegments streamed
if it is sampled and it breaches streaming threshold. | [
"A",
"segment",
"is",
"eligible",
"to",
"have",
"its",
"children",
"subsegments",
"streamed",
"if",
"it",
"is",
"sampled",
"and",
"it",
"breaches",
"streaming",
"threshold",
"."
] | 707358cd3a516d51f2ebf71cf34f00e8d906a667 | https://github.com/aws/aws-xray-sdk-python/blob/707358cd3a516d51f2ebf71cf34f00e8d906a667/aws_xray_sdk/core/streaming/default_streaming.py#L14-L22 | train | 213,733 |
aws/aws-xray-sdk-python | aws_xray_sdk/core/streaming/default_streaming.py | DefaultStreaming.stream | def stream(self, entity, callback):
"""
Stream out all eligible children of the input entity.
:param entity: The target entity to be streamed.
:param callback: The function that takes the node and
actually send it out.
"""
with self._lock:
self._stream(entity, callback) | python | def stream(self, entity, callback):
"""
Stream out all eligible children of the input entity.
:param entity: The target entity to be streamed.
:param callback: The function that takes the node and
actually send it out.
"""
with self._lock:
self._stream(entity, callback) | [
"def",
"stream",
"(",
"self",
",",
"entity",
",",
"callback",
")",
":",
"with",
"self",
".",
"_lock",
":",
"self",
".",
"_stream",
"(",
"entity",
",",
"callback",
")"
] | Stream out all eligible children of the input entity.
:param entity: The target entity to be streamed.
:param callback: The function that takes the node and
actually send it out. | [
"Stream",
"out",
"all",
"eligible",
"children",
"of",
"the",
"input",
"entity",
"."
] | 707358cd3a516d51f2ebf71cf34f00e8d906a667 | https://github.com/aws/aws-xray-sdk-python/blob/707358cd3a516d51f2ebf71cf34f00e8d906a667/aws_xray_sdk/core/streaming/default_streaming.py#L24-L33 | train | 213,734 |
aws/aws-xray-sdk-python | aws_xray_sdk/ext/sqlalchemy/util/decorators.py | parse_bind | def parse_bind(bind):
"""Parses a connection string and creates SQL trace metadata"""
if isinstance(bind, Connection):
engine = bind.engine
else:
engine = bind
m = re.match(r"Engine\((.*?)\)", str(engine))
if m is not None:
u = urlparse(m.group(1))
# Add Scheme to uses_netloc or // will be missing from url.
uses_netloc.append(u.scheme)
safe_url = ""
if u.password is None:
safe_url = u.geturl()
else:
# Strip password from URL
host_info = u.netloc.rpartition('@')[-1]
parts = u._replace(netloc='{}@{}'.format(u.username, host_info))
safe_url = parts.geturl()
sql = {}
sql['database_type'] = u.scheme
sql['url'] = safe_url
if u.username is not None:
sql['user'] = "{}".format(u.username)
return sql | python | def parse_bind(bind):
"""Parses a connection string and creates SQL trace metadata"""
if isinstance(bind, Connection):
engine = bind.engine
else:
engine = bind
m = re.match(r"Engine\((.*?)\)", str(engine))
if m is not None:
u = urlparse(m.group(1))
# Add Scheme to uses_netloc or // will be missing from url.
uses_netloc.append(u.scheme)
safe_url = ""
if u.password is None:
safe_url = u.geturl()
else:
# Strip password from URL
host_info = u.netloc.rpartition('@')[-1]
parts = u._replace(netloc='{}@{}'.format(u.username, host_info))
safe_url = parts.geturl()
sql = {}
sql['database_type'] = u.scheme
sql['url'] = safe_url
if u.username is not None:
sql['user'] = "{}".format(u.username)
return sql | [
"def",
"parse_bind",
"(",
"bind",
")",
":",
"if",
"isinstance",
"(",
"bind",
",",
"Connection",
")",
":",
"engine",
"=",
"bind",
".",
"engine",
"else",
":",
"engine",
"=",
"bind",
"m",
"=",
"re",
".",
"match",
"(",
"r\"Engine\\((.*?)\\)\"",
",",
"str",... | Parses a connection string and creates SQL trace metadata | [
"Parses",
"a",
"connection",
"string",
"and",
"creates",
"SQL",
"trace",
"metadata"
] | 707358cd3a516d51f2ebf71cf34f00e8d906a667 | https://github.com/aws/aws-xray-sdk-python/blob/707358cd3a516d51f2ebf71cf34f00e8d906a667/aws_xray_sdk/ext/sqlalchemy/util/decorators.py#L89-L113 | train | 213,735 |
aws/aws-xray-sdk-python | aws_xray_sdk/core/models/subsegment.py | Subsegment.add_subsegment | def add_subsegment(self, subsegment):
"""
Add input subsegment as a child subsegment and increment
reference counter and total subsegments counter of the
parent segment.
"""
super(Subsegment, self).add_subsegment(subsegment)
self.parent_segment.increment() | python | def add_subsegment(self, subsegment):
"""
Add input subsegment as a child subsegment and increment
reference counter and total subsegments counter of the
parent segment.
"""
super(Subsegment, self).add_subsegment(subsegment)
self.parent_segment.increment() | [
"def",
"add_subsegment",
"(",
"self",
",",
"subsegment",
")",
":",
"super",
"(",
"Subsegment",
",",
"self",
")",
".",
"add_subsegment",
"(",
"subsegment",
")",
"self",
".",
"parent_segment",
".",
"increment",
"(",
")"
] | Add input subsegment as a child subsegment and increment
reference counter and total subsegments counter of the
parent segment. | [
"Add",
"input",
"subsegment",
"as",
"a",
"child",
"subsegment",
"and",
"increment",
"reference",
"counter",
"and",
"total",
"subsegments",
"counter",
"of",
"the",
"parent",
"segment",
"."
] | 707358cd3a516d51f2ebf71cf34f00e8d906a667 | https://github.com/aws/aws-xray-sdk-python/blob/707358cd3a516d51f2ebf71cf34f00e8d906a667/aws_xray_sdk/core/models/subsegment.py#L111-L118 | train | 213,736 |
aws/aws-xray-sdk-python | aws_xray_sdk/core/models/subsegment.py | Subsegment.remove_subsegment | def remove_subsegment(self, subsegment):
"""
Remove input subsegment from child subsegemnts and
decrement parent segment total subsegments count.
:param Subsegment: subsegment to remove.
"""
super(Subsegment, self).remove_subsegment(subsegment)
self.parent_segment.decrement_subsegments_size() | python | def remove_subsegment(self, subsegment):
"""
Remove input subsegment from child subsegemnts and
decrement parent segment total subsegments count.
:param Subsegment: subsegment to remove.
"""
super(Subsegment, self).remove_subsegment(subsegment)
self.parent_segment.decrement_subsegments_size() | [
"def",
"remove_subsegment",
"(",
"self",
",",
"subsegment",
")",
":",
"super",
"(",
"Subsegment",
",",
"self",
")",
".",
"remove_subsegment",
"(",
"subsegment",
")",
"self",
".",
"parent_segment",
".",
"decrement_subsegments_size",
"(",
")"
] | Remove input subsegment from child subsegemnts and
decrement parent segment total subsegments count.
:param Subsegment: subsegment to remove. | [
"Remove",
"input",
"subsegment",
"from",
"child",
"subsegemnts",
"and",
"decrement",
"parent",
"segment",
"total",
"subsegments",
"count",
"."
] | 707358cd3a516d51f2ebf71cf34f00e8d906a667 | https://github.com/aws/aws-xray-sdk-python/blob/707358cd3a516d51f2ebf71cf34f00e8d906a667/aws_xray_sdk/core/models/subsegment.py#L120-L128 | train | 213,737 |
aws/aws-xray-sdk-python | aws_xray_sdk/core/models/subsegment.py | Subsegment.close | def close(self, end_time=None):
"""
Close the trace entity by setting `end_time`
and flip the in progress flag to False. Also decrement
parent segment's ref counter by 1.
:param int end_time: Epoch in seconds. If not specified
current time will be used.
"""
super(Subsegment, self).close(end_time)
self.parent_segment.decrement_ref_counter() | python | def close(self, end_time=None):
"""
Close the trace entity by setting `end_time`
and flip the in progress flag to False. Also decrement
parent segment's ref counter by 1.
:param int end_time: Epoch in seconds. If not specified
current time will be used.
"""
super(Subsegment, self).close(end_time)
self.parent_segment.decrement_ref_counter() | [
"def",
"close",
"(",
"self",
",",
"end_time",
"=",
"None",
")",
":",
"super",
"(",
"Subsegment",
",",
"self",
")",
".",
"close",
"(",
"end_time",
")",
"self",
".",
"parent_segment",
".",
"decrement_ref_counter",
"(",
")"
] | Close the trace entity by setting `end_time`
and flip the in progress flag to False. Also decrement
parent segment's ref counter by 1.
:param int end_time: Epoch in seconds. If not specified
current time will be used. | [
"Close",
"the",
"trace",
"entity",
"by",
"setting",
"end_time",
"and",
"flip",
"the",
"in",
"progress",
"flag",
"to",
"False",
".",
"Also",
"decrement",
"parent",
"segment",
"s",
"ref",
"counter",
"by",
"1",
"."
] | 707358cd3a516d51f2ebf71cf34f00e8d906a667 | https://github.com/aws/aws-xray-sdk-python/blob/707358cd3a516d51f2ebf71cf34f00e8d906a667/aws_xray_sdk/core/models/subsegment.py#L130-L140 | train | 213,738 |
aws/aws-xray-sdk-python | aws_xray_sdk/core/sampling/connector.py | ServiceConnector.fetch_sampling_rules | def fetch_sampling_rules(self):
"""
Use X-Ray botocore client to get the centralized sampling rules
from X-Ray service. The call is proxied and signed by X-Ray Daemon.
"""
new_rules = []
resp = self._xray_client.get_sampling_rules()
records = resp['SamplingRuleRecords']
for record in records:
rule_def = record['SamplingRule']
if self._is_rule_valid(rule_def):
rule = SamplingRule(name=rule_def['RuleName'],
priority=rule_def['Priority'],
rate=rule_def['FixedRate'],
reservoir_size=rule_def['ReservoirSize'],
host=rule_def['Host'],
service=rule_def['ServiceName'],
method=rule_def['HTTPMethod'],
path=rule_def['URLPath'],
service_type=rule_def['ServiceType'])
new_rules.append(rule)
return new_rules | python | def fetch_sampling_rules(self):
"""
Use X-Ray botocore client to get the centralized sampling rules
from X-Ray service. The call is proxied and signed by X-Ray Daemon.
"""
new_rules = []
resp = self._xray_client.get_sampling_rules()
records = resp['SamplingRuleRecords']
for record in records:
rule_def = record['SamplingRule']
if self._is_rule_valid(rule_def):
rule = SamplingRule(name=rule_def['RuleName'],
priority=rule_def['Priority'],
rate=rule_def['FixedRate'],
reservoir_size=rule_def['ReservoirSize'],
host=rule_def['Host'],
service=rule_def['ServiceName'],
method=rule_def['HTTPMethod'],
path=rule_def['URLPath'],
service_type=rule_def['ServiceType'])
new_rules.append(rule)
return new_rules | [
"def",
"fetch_sampling_rules",
"(",
"self",
")",
":",
"new_rules",
"=",
"[",
"]",
"resp",
"=",
"self",
".",
"_xray_client",
".",
"get_sampling_rules",
"(",
")",
"records",
"=",
"resp",
"[",
"'SamplingRuleRecords'",
"]",
"for",
"record",
"in",
"records",
":",... | Use X-Ray botocore client to get the centralized sampling rules
from X-Ray service. The call is proxied and signed by X-Ray Daemon. | [
"Use",
"X",
"-",
"Ray",
"botocore",
"client",
"to",
"get",
"the",
"centralized",
"sampling",
"rules",
"from",
"X",
"-",
"Ray",
"service",
".",
"The",
"call",
"is",
"proxied",
"and",
"signed",
"by",
"X",
"-",
"Ray",
"Daemon",
"."
] | 707358cd3a516d51f2ebf71cf34f00e8d906a667 | https://github.com/aws/aws-xray-sdk-python/blob/707358cd3a516d51f2ebf71cf34f00e8d906a667/aws_xray_sdk/core/sampling/connector.py#L49-L73 | train | 213,739 |
aws/aws-xray-sdk-python | aws_xray_sdk/core/sampling/connector.py | ServiceConnector.setup_xray_client | def setup_xray_client(self, ip, port, client):
"""
Setup the xray client based on ip and port.
If a preset client is specified, ip and port
will be ignored.
"""
if not client:
client = self._create_xray_client(ip, port)
self._xray_client = client | python | def setup_xray_client(self, ip, port, client):
"""
Setup the xray client based on ip and port.
If a preset client is specified, ip and port
will be ignored.
"""
if not client:
client = self._create_xray_client(ip, port)
self._xray_client = client | [
"def",
"setup_xray_client",
"(",
"self",
",",
"ip",
",",
"port",
",",
"client",
")",
":",
"if",
"not",
"client",
":",
"client",
"=",
"self",
".",
"_create_xray_client",
"(",
"ip",
",",
"port",
")",
"self",
".",
"_xray_client",
"=",
"client"
] | Setup the xray client based on ip and port.
If a preset client is specified, ip and port
will be ignored. | [
"Setup",
"the",
"xray",
"client",
"based",
"on",
"ip",
"and",
"port",
".",
"If",
"a",
"preset",
"client",
"is",
"specified",
"ip",
"and",
"port",
"will",
"be",
"ignored",
"."
] | 707358cd3a516d51f2ebf71cf34f00e8d906a667 | https://github.com/aws/aws-xray-sdk-python/blob/707358cd3a516d51f2ebf71cf34f00e8d906a667/aws_xray_sdk/core/sampling/connector.py#L102-L110 | train | 213,740 |
aws/aws-xray-sdk-python | aws_xray_sdk/core/sampling/connector.py | ServiceConnector._dt_to_epoch | def _dt_to_epoch(self, dt):
"""
Convert a offset-aware datetime to POSIX time.
"""
if PY2:
# The input datetime is from botocore unmarshalling and it is
# offset-aware so the timedelta of subtracting this time
# to 01/01/1970 using the same tzinfo gives us
# Unix Time (also known as POSIX Time).
time_delta = dt - datetime(1970, 1, 1).replace(tzinfo=dt.tzinfo)
return int(time_delta.total_seconds())
else:
# Added in python 3.3+ and directly returns POSIX time.
return int(dt.timestamp()) | python | def _dt_to_epoch(self, dt):
"""
Convert a offset-aware datetime to POSIX time.
"""
if PY2:
# The input datetime is from botocore unmarshalling and it is
# offset-aware so the timedelta of subtracting this time
# to 01/01/1970 using the same tzinfo gives us
# Unix Time (also known as POSIX Time).
time_delta = dt - datetime(1970, 1, 1).replace(tzinfo=dt.tzinfo)
return int(time_delta.total_seconds())
else:
# Added in python 3.3+ and directly returns POSIX time.
return int(dt.timestamp()) | [
"def",
"_dt_to_epoch",
"(",
"self",
",",
"dt",
")",
":",
"if",
"PY2",
":",
"# The input datetime is from botocore unmarshalling and it is",
"# offset-aware so the timedelta of subtracting this time",
"# to 01/01/1970 using the same tzinfo gives us",
"# Unix Time (also known as POSIX Time... | Convert a offset-aware datetime to POSIX time. | [
"Convert",
"a",
"offset",
"-",
"aware",
"datetime",
"to",
"POSIX",
"time",
"."
] | 707358cd3a516d51f2ebf71cf34f00e8d906a667 | https://github.com/aws/aws-xray-sdk-python/blob/707358cd3a516d51f2ebf71cf34f00e8d906a667/aws_xray_sdk/core/sampling/connector.py#L136-L149 | train | 213,741 |
aws/aws-xray-sdk-python | aws_xray_sdk/core/utils/stacktrace.py | get_stacktrace | def get_stacktrace(limit=None):
"""
Get a full stacktrace for the current state of execution.
Include the current state of the stack, minus this function.
If there is an active exception, include the stacktrace information from
the exception as well.
:param int limit:
Optionally limit stack trace size results. This parmaeters has the same
meaning as the `limit` parameter in `traceback.print_stack`.
:returns:
List of stack trace objects, in the same form as
`traceback.extract_stack`.
"""
if limit is not None and limit == 0:
# Nothing to return. This is consistent with the behavior of the
# functions in the `traceback` module.
return []
stack = traceback.extract_stack()
# Remove this `get_stacktrace()` function call from the stack info.
# For what we want to report, this is superfluous information and arguably
# adds garbage to the report.
# Also drop the `traceback.extract_stack()` call above from the returned
# stack info, since this is also superfluous.
stack = stack[:-2]
_exc_type, _exc, exc_traceback = sys.exc_info()
if exc_traceback is not None:
# If and only if there is a currently triggered exception, combine the
# exception traceback information with the current stack state to get a
# complete trace.
exc_stack = traceback.extract_tb(exc_traceback)
stack += exc_stack
# Limit the stack trace size, if a limit was specified:
if limit is not None:
# Copy the behavior of `traceback` functions with a `limit` argument.
# See https://docs.python.org/3/library/traceback.html.
if limit > 0:
# limit > 0: include the last `limit` items
stack = stack[-limit:]
else:
# limit < 0: include the first `abs(limit)` items
stack = stack[:abs(limit)]
return stack | python | def get_stacktrace(limit=None):
"""
Get a full stacktrace for the current state of execution.
Include the current state of the stack, minus this function.
If there is an active exception, include the stacktrace information from
the exception as well.
:param int limit:
Optionally limit stack trace size results. This parmaeters has the same
meaning as the `limit` parameter in `traceback.print_stack`.
:returns:
List of stack trace objects, in the same form as
`traceback.extract_stack`.
"""
if limit is not None and limit == 0:
# Nothing to return. This is consistent with the behavior of the
# functions in the `traceback` module.
return []
stack = traceback.extract_stack()
# Remove this `get_stacktrace()` function call from the stack info.
# For what we want to report, this is superfluous information and arguably
# adds garbage to the report.
# Also drop the `traceback.extract_stack()` call above from the returned
# stack info, since this is also superfluous.
stack = stack[:-2]
_exc_type, _exc, exc_traceback = sys.exc_info()
if exc_traceback is not None:
# If and only if there is a currently triggered exception, combine the
# exception traceback information with the current stack state to get a
# complete trace.
exc_stack = traceback.extract_tb(exc_traceback)
stack += exc_stack
# Limit the stack trace size, if a limit was specified:
if limit is not None:
# Copy the behavior of `traceback` functions with a `limit` argument.
# See https://docs.python.org/3/library/traceback.html.
if limit > 0:
# limit > 0: include the last `limit` items
stack = stack[-limit:]
else:
# limit < 0: include the first `abs(limit)` items
stack = stack[:abs(limit)]
return stack | [
"def",
"get_stacktrace",
"(",
"limit",
"=",
"None",
")",
":",
"if",
"limit",
"is",
"not",
"None",
"and",
"limit",
"==",
"0",
":",
"# Nothing to return. This is consistent with the behavior of the",
"# functions in the `traceback` module.",
"return",
"[",
"]",
"stack",
... | Get a full stacktrace for the current state of execution.
Include the current state of the stack, minus this function.
If there is an active exception, include the stacktrace information from
the exception as well.
:param int limit:
Optionally limit stack trace size results. This parmaeters has the same
meaning as the `limit` parameter in `traceback.print_stack`.
:returns:
List of stack trace objects, in the same form as
`traceback.extract_stack`. | [
"Get",
"a",
"full",
"stacktrace",
"for",
"the",
"current",
"state",
"of",
"execution",
"."
] | 707358cd3a516d51f2ebf71cf34f00e8d906a667 | https://github.com/aws/aws-xray-sdk-python/blob/707358cd3a516d51f2ebf71cf34f00e8d906a667/aws_xray_sdk/core/utils/stacktrace.py#L5-L51 | train | 213,742 |
aws/aws-xray-sdk-python | aws_xray_sdk/core/sampling/local/sampler.py | LocalSampler.should_trace | def should_trace(self, sampling_req=None):
"""
Return True if the sampler decide to sample based on input
information and sampling rules. It will first check if any
custom rule should be applied, if not it falls back to the
default sampling rule.
All optional arugments are extracted from incoming requests by
X-Ray middleware to perform path based sampling.
"""
if sampling_req is None:
return self._should_trace(self._default_rule)
host = sampling_req.get('host', None)
method = sampling_req.get('method', None)
path = sampling_req.get('path', None)
for rule in self._rules:
if rule.applies(host, method, path):
return self._should_trace(rule)
return self._should_trace(self._default_rule) | python | def should_trace(self, sampling_req=None):
"""
Return True if the sampler decide to sample based on input
information and sampling rules. It will first check if any
custom rule should be applied, if not it falls back to the
default sampling rule.
All optional arugments are extracted from incoming requests by
X-Ray middleware to perform path based sampling.
"""
if sampling_req is None:
return self._should_trace(self._default_rule)
host = sampling_req.get('host', None)
method = sampling_req.get('method', None)
path = sampling_req.get('path', None)
for rule in self._rules:
if rule.applies(host, method, path):
return self._should_trace(rule)
return self._should_trace(self._default_rule) | [
"def",
"should_trace",
"(",
"self",
",",
"sampling_req",
"=",
"None",
")",
":",
"if",
"sampling_req",
"is",
"None",
":",
"return",
"self",
".",
"_should_trace",
"(",
"self",
".",
"_default_rule",
")",
"host",
"=",
"sampling_req",
".",
"get",
"(",
"'host'",... | Return True if the sampler decide to sample based on input
information and sampling rules. It will first check if any
custom rule should be applied, if not it falls back to the
default sampling rule.
All optional arugments are extracted from incoming requests by
X-Ray middleware to perform path based sampling. | [
"Return",
"True",
"if",
"the",
"sampler",
"decide",
"to",
"sample",
"based",
"on",
"input",
"information",
"and",
"sampling",
"rules",
".",
"It",
"will",
"first",
"check",
"if",
"any",
"custom",
"rule",
"should",
"be",
"applied",
"if",
"not",
"it",
"falls"... | 707358cd3a516d51f2ebf71cf34f00e8d906a667 | https://github.com/aws/aws-xray-sdk-python/blob/707358cd3a516d51f2ebf71cf34f00e8d906a667/aws_xray_sdk/core/sampling/local/sampler.py#L53-L74 | train | 213,743 |
aws/aws-xray-sdk-python | aws_xray_sdk/ext/django/middleware.py | XRayMiddleware.process_exception | def process_exception(self, request, exception):
"""
Add exception information and fault flag to the
current segment.
"""
if self.in_lambda_ctx:
segment = xray_recorder.current_subsegment()
else:
segment = xray_recorder.current_segment()
segment.put_http_meta(http.STATUS, 500)
stack = stacktrace.get_stacktrace(limit=xray_recorder._max_trace_back)
segment.add_exception(exception, stack) | python | def process_exception(self, request, exception):
"""
Add exception information and fault flag to the
current segment.
"""
if self.in_lambda_ctx:
segment = xray_recorder.current_subsegment()
else:
segment = xray_recorder.current_segment()
segment.put_http_meta(http.STATUS, 500)
stack = stacktrace.get_stacktrace(limit=xray_recorder._max_trace_back)
segment.add_exception(exception, stack) | [
"def",
"process_exception",
"(",
"self",
",",
"request",
",",
"exception",
")",
":",
"if",
"self",
".",
"in_lambda_ctx",
":",
"segment",
"=",
"xray_recorder",
".",
"current_subsegment",
"(",
")",
"else",
":",
"segment",
"=",
"xray_recorder",
".",
"current_segm... | Add exception information and fault flag to the
current segment. | [
"Add",
"exception",
"information",
"and",
"fault",
"flag",
"to",
"the",
"current",
"segment",
"."
] | 707358cd3a516d51f2ebf71cf34f00e8d906a667 | https://github.com/aws/aws-xray-sdk-python/blob/707358cd3a516d51f2ebf71cf34f00e8d906a667/aws_xray_sdk/ext/django/middleware.py#L93-L105 | train | 213,744 |
aws/aws-xray-sdk-python | aws_xray_sdk/core/sampling/local/reservoir.py | Reservoir.take | def take(self):
"""
Returns True if there are segments left within the
current second, otherwise return False.
"""
with self._lock:
now = int(time.time())
if now != self.this_sec:
self.used_this_sec = 0
self.this_sec = now
if self.used_this_sec >= self.traces_per_sec:
return False
self.used_this_sec = self.used_this_sec + 1
return True | python | def take(self):
"""
Returns True if there are segments left within the
current second, otherwise return False.
"""
with self._lock:
now = int(time.time())
if now != self.this_sec:
self.used_this_sec = 0
self.this_sec = now
if self.used_this_sec >= self.traces_per_sec:
return False
self.used_this_sec = self.used_this_sec + 1
return True | [
"def",
"take",
"(",
"self",
")",
":",
"with",
"self",
".",
"_lock",
":",
"now",
"=",
"int",
"(",
"time",
".",
"time",
"(",
")",
")",
"if",
"now",
"!=",
"self",
".",
"this_sec",
":",
"self",
".",
"used_this_sec",
"=",
"0",
"self",
".",
"this_sec",... | Returns True if there are segments left within the
current second, otherwise return False. | [
"Returns",
"True",
"if",
"there",
"are",
"segments",
"left",
"within",
"the",
"current",
"second",
"otherwise",
"return",
"False",
"."
] | 707358cd3a516d51f2ebf71cf34f00e8d906a667 | https://github.com/aws/aws-xray-sdk-python/blob/707358cd3a516d51f2ebf71cf34f00e8d906a667/aws_xray_sdk/core/sampling/local/reservoir.py#L21-L37 | train | 213,745 |
GoogleCloudPlatform/google-cloud-datastore | python/googledatastore/__init__.py | get_default_connection | def get_default_connection():
"""Returns the default datastore connection.
Defaults endpoint to helper.get_project_endpoint_from_env() and
credentials to helper.get_credentials_from_env().
Use set_options to override defaults.
"""
tid = id(threading.current_thread())
conn = _conn_holder.get(tid)
if not conn:
with(_rlock):
# No other thread would insert a value in our slot, so no need
# to recheck existence inside the lock.
if 'project_endpoint' not in _options and 'project_id' not in _options:
_options['project_endpoint'] = helper.get_project_endpoint_from_env()
if 'credentials' not in _options:
_options['credentials'] = helper.get_credentials_from_env()
# We still need the lock when caching the thread local connection so we
# don't race with _conn_holder.clear() in set_options().
_conn_holder[tid] = conn = connection.Datastore(**_options)
return conn | python | def get_default_connection():
"""Returns the default datastore connection.
Defaults endpoint to helper.get_project_endpoint_from_env() and
credentials to helper.get_credentials_from_env().
Use set_options to override defaults.
"""
tid = id(threading.current_thread())
conn = _conn_holder.get(tid)
if not conn:
with(_rlock):
# No other thread would insert a value in our slot, so no need
# to recheck existence inside the lock.
if 'project_endpoint' not in _options and 'project_id' not in _options:
_options['project_endpoint'] = helper.get_project_endpoint_from_env()
if 'credentials' not in _options:
_options['credentials'] = helper.get_credentials_from_env()
# We still need the lock when caching the thread local connection so we
# don't race with _conn_holder.clear() in set_options().
_conn_holder[tid] = conn = connection.Datastore(**_options)
return conn | [
"def",
"get_default_connection",
"(",
")",
":",
"tid",
"=",
"id",
"(",
"threading",
".",
"current_thread",
"(",
")",
")",
"conn",
"=",
"_conn_holder",
".",
"get",
"(",
"tid",
")",
"if",
"not",
"conn",
":",
"with",
"(",
"_rlock",
")",
":",
"# No other t... | Returns the default datastore connection.
Defaults endpoint to helper.get_project_endpoint_from_env() and
credentials to helper.get_credentials_from_env().
Use set_options to override defaults. | [
"Returns",
"the",
"default",
"datastore",
"connection",
"."
] | a23940d0634d7f537faf01ad9e60598046bcb40a | https://github.com/GoogleCloudPlatform/google-cloud-datastore/blob/a23940d0634d7f537faf01ad9e60598046bcb40a/python/googledatastore/__init__.py#L79-L100 | train | 213,746 |
GoogleCloudPlatform/google-cloud-datastore | python/demos/todos/todos.py | Todo.get_all | def get_all(cls):
"""Query for all Todo items ordered by creation date.
This method is eventually consistent to avoid the need for an extra index.
"""
req = datastore.RunQueryRequest()
q = req.query
set_kind(q, kind='Todo')
add_property_orders(q, 'created')
resp = datastore.run_query(req)
todos = [Todo.from_proto(r.entity) for r in resp.batch.entity_results]
return todos | python | def get_all(cls):
"""Query for all Todo items ordered by creation date.
This method is eventually consistent to avoid the need for an extra index.
"""
req = datastore.RunQueryRequest()
q = req.query
set_kind(q, kind='Todo')
add_property_orders(q, 'created')
resp = datastore.run_query(req)
todos = [Todo.from_proto(r.entity) for r in resp.batch.entity_results]
return todos | [
"def",
"get_all",
"(",
"cls",
")",
":",
"req",
"=",
"datastore",
".",
"RunQueryRequest",
"(",
")",
"q",
"=",
"req",
".",
"query",
"set_kind",
"(",
"q",
",",
"kind",
"=",
"'Todo'",
")",
"add_property_orders",
"(",
"q",
",",
"'created'",
")",
"resp",
"... | Query for all Todo items ordered by creation date.
This method is eventually consistent to avoid the need for an extra index. | [
"Query",
"for",
"all",
"Todo",
"items",
"ordered",
"by",
"creation",
"date",
"."
] | a23940d0634d7f537faf01ad9e60598046bcb40a | https://github.com/GoogleCloudPlatform/google-cloud-datastore/blob/a23940d0634d7f537faf01ad9e60598046bcb40a/python/demos/todos/todos.py#L131-L143 | train | 213,747 |
GoogleCloudPlatform/google-cloud-datastore | python/demos/todos/todos.py | Todo.archive | def archive(cls):
"""Delete all Todo items that are done."""
req = datastore.BeginTransactionRequest()
resp = datastore.begin_transaction(req)
tx = resp.transaction
req = datastore.RunQueryRequest()
req.read_options.transaction = tx
q = req.query
set_kind(q, kind='Todo')
add_projection(q, '__key__')
set_composite_filter(q.filter,
datastore.CompositeFilter.AND,
set_property_filter(
datastore.Filter(),
'done', datastore.PropertyFilter.EQUAL, True),
set_property_filter(
datastore.Filter(),
'__key__', datastore.PropertyFilter.HAS_ANCESTOR,
default_todo_list.key))
resp = datastore.run_query(req)
req = datastore.CommitRequest()
req.transaction = tx
for result in resp.batch.entity_results:
req.mutations.add().delete.CopyFrom(result.entity.key)
resp = datastore.commit(req)
return '' | python | def archive(cls):
"""Delete all Todo items that are done."""
req = datastore.BeginTransactionRequest()
resp = datastore.begin_transaction(req)
tx = resp.transaction
req = datastore.RunQueryRequest()
req.read_options.transaction = tx
q = req.query
set_kind(q, kind='Todo')
add_projection(q, '__key__')
set_composite_filter(q.filter,
datastore.CompositeFilter.AND,
set_property_filter(
datastore.Filter(),
'done', datastore.PropertyFilter.EQUAL, True),
set_property_filter(
datastore.Filter(),
'__key__', datastore.PropertyFilter.HAS_ANCESTOR,
default_todo_list.key))
resp = datastore.run_query(req)
req = datastore.CommitRequest()
req.transaction = tx
for result in resp.batch.entity_results:
req.mutations.add().delete.CopyFrom(result.entity.key)
resp = datastore.commit(req)
return '' | [
"def",
"archive",
"(",
"cls",
")",
":",
"req",
"=",
"datastore",
".",
"BeginTransactionRequest",
"(",
")",
"resp",
"=",
"datastore",
".",
"begin_transaction",
"(",
"req",
")",
"tx",
"=",
"resp",
".",
"transaction",
"req",
"=",
"datastore",
".",
"RunQueryRe... | Delete all Todo items that are done. | [
"Delete",
"all",
"Todo",
"items",
"that",
"are",
"done",
"."
] | a23940d0634d7f537faf01ad9e60598046bcb40a | https://github.com/GoogleCloudPlatform/google-cloud-datastore/blob/a23940d0634d7f537faf01ad9e60598046bcb40a/python/demos/todos/todos.py#L146-L171 | train | 213,748 |
GoogleCloudPlatform/google-cloud-datastore | python/demos/todos/todos.py | Todo.save | def save(self):
"""Update or insert a Todo item."""
req = datastore.CommitRequest()
req.mode = datastore.CommitRequest.NON_TRANSACTIONAL
req.mutations.add().upsert.CopyFrom(self.to_proto())
resp = datastore.commit(req)
if not self.id:
self.id = resp.mutation_results[0].key.path[-1].id
return self | python | def save(self):
"""Update or insert a Todo item."""
req = datastore.CommitRequest()
req.mode = datastore.CommitRequest.NON_TRANSACTIONAL
req.mutations.add().upsert.CopyFrom(self.to_proto())
resp = datastore.commit(req)
if not self.id:
self.id = resp.mutation_results[0].key.path[-1].id
return self | [
"def",
"save",
"(",
"self",
")",
":",
"req",
"=",
"datastore",
".",
"CommitRequest",
"(",
")",
"req",
".",
"mode",
"=",
"datastore",
".",
"CommitRequest",
".",
"NON_TRANSACTIONAL",
"req",
".",
"mutations",
".",
"add",
"(",
")",
".",
"upsert",
".",
"Cop... | Update or insert a Todo item. | [
"Update",
"or",
"insert",
"a",
"Todo",
"item",
"."
] | a23940d0634d7f537faf01ad9e60598046bcb40a | https://github.com/GoogleCloudPlatform/google-cloud-datastore/blob/a23940d0634d7f537faf01ad9e60598046bcb40a/python/demos/todos/todos.py#L173-L181 | train | 213,749 |
GoogleCloudPlatform/google-cloud-datastore | python/googledatastore/datastore_emulator.py | DatastoreEmulatorFactory.Get | def Get(self, project_id):
"""Returns an existing emulator instance for the provided project_id.
If an emulator instance doesn't yet exist, it creates one.
Args:
project_id: project ID
Returns:
a DatastoreEmulator
"""
if project_id in self._emulators:
return self._emulators[project_id]
emulator = self.Create(project_id)
self._emulators[project_id] = emulator
return emulator | python | def Get(self, project_id):
"""Returns an existing emulator instance for the provided project_id.
If an emulator instance doesn't yet exist, it creates one.
Args:
project_id: project ID
Returns:
a DatastoreEmulator
"""
if project_id in self._emulators:
return self._emulators[project_id]
emulator = self.Create(project_id)
self._emulators[project_id] = emulator
return emulator | [
"def",
"Get",
"(",
"self",
",",
"project_id",
")",
":",
"if",
"project_id",
"in",
"self",
".",
"_emulators",
":",
"return",
"self",
".",
"_emulators",
"[",
"project_id",
"]",
"emulator",
"=",
"self",
".",
"Create",
"(",
"project_id",
")",
"self",
".",
... | Returns an existing emulator instance for the provided project_id.
If an emulator instance doesn't yet exist, it creates one.
Args:
project_id: project ID
Returns:
a DatastoreEmulator | [
"Returns",
"an",
"existing",
"emulator",
"instance",
"for",
"the",
"provided",
"project_id",
"."
] | a23940d0634d7f537faf01ad9e60598046bcb40a | https://github.com/GoogleCloudPlatform/google-cloud-datastore/blob/a23940d0634d7f537faf01ad9e60598046bcb40a/python/googledatastore/datastore_emulator.py#L71-L87 | train | 213,750 |
GoogleCloudPlatform/google-cloud-datastore | python/googledatastore/datastore_emulator.py | DatastoreEmulatorFactory.Create | def Create(self, project_id, start_options=None, deadline=10):
"""Creates an emulator instance.
This method will wait for up to 'deadline' seconds for the emulator to
start.
Args:
project_id: project ID
start_options: a list of additional command-line options to pass to the
emulator 'start' command
deadline: number of seconds to wait for the datastore to respond
Returns:
a DatastoreEmulator
Raises:
IOError: if the emulator could not be started within the deadline
"""
return DatastoreEmulator(self._emulator_cmd, self._working_directory,
project_id, deadline, start_options) | python | def Create(self, project_id, start_options=None, deadline=10):
"""Creates an emulator instance.
This method will wait for up to 'deadline' seconds for the emulator to
start.
Args:
project_id: project ID
start_options: a list of additional command-line options to pass to the
emulator 'start' command
deadline: number of seconds to wait for the datastore to respond
Returns:
a DatastoreEmulator
Raises:
IOError: if the emulator could not be started within the deadline
"""
return DatastoreEmulator(self._emulator_cmd, self._working_directory,
project_id, deadline, start_options) | [
"def",
"Create",
"(",
"self",
",",
"project_id",
",",
"start_options",
"=",
"None",
",",
"deadline",
"=",
"10",
")",
":",
"return",
"DatastoreEmulator",
"(",
"self",
".",
"_emulator_cmd",
",",
"self",
".",
"_working_directory",
",",
"project_id",
",",
"deadl... | Creates an emulator instance.
This method will wait for up to 'deadline' seconds for the emulator to
start.
Args:
project_id: project ID
start_options: a list of additional command-line options to pass to the
emulator 'start' command
deadline: number of seconds to wait for the datastore to respond
Returns:
a DatastoreEmulator
Raises:
IOError: if the emulator could not be started within the deadline | [
"Creates",
"an",
"emulator",
"instance",
"."
] | a23940d0634d7f537faf01ad9e60598046bcb40a | https://github.com/GoogleCloudPlatform/google-cloud-datastore/blob/a23940d0634d7f537faf01ad9e60598046bcb40a/python/googledatastore/datastore_emulator.py#L89-L108 | train | 213,751 |
GoogleCloudPlatform/google-cloud-datastore | python/googledatastore/datastore_emulator.py | DatastoreEmulator._WaitForStartup | def _WaitForStartup(self, deadline):
"""Waits for the emulator to start.
Args:
deadline: deadline in seconds
Returns:
True if the emulator responds within the deadline, False otherwise.
"""
start = time.time()
sleep = 0.05
def Elapsed():
return time.time() - start
while True:
try:
response, _ = self._http.request(self._host)
if response.status == 200:
logging.info('emulator responded after %f seconds', Elapsed())
return True
except (socket.error, httplib.ResponseNotReady):
pass
if Elapsed() >= deadline:
# Out of time; give up.
return False
else:
time.sleep(sleep)
sleep *= 2 | python | def _WaitForStartup(self, deadline):
"""Waits for the emulator to start.
Args:
deadline: deadline in seconds
Returns:
True if the emulator responds within the deadline, False otherwise.
"""
start = time.time()
sleep = 0.05
def Elapsed():
return time.time() - start
while True:
try:
response, _ = self._http.request(self._host)
if response.status == 200:
logging.info('emulator responded after %f seconds', Elapsed())
return True
except (socket.error, httplib.ResponseNotReady):
pass
if Elapsed() >= deadline:
# Out of time; give up.
return False
else:
time.sleep(sleep)
sleep *= 2 | [
"def",
"_WaitForStartup",
"(",
"self",
",",
"deadline",
")",
":",
"start",
"=",
"time",
".",
"time",
"(",
")",
"sleep",
"=",
"0.05",
"def",
"Elapsed",
"(",
")",
":",
"return",
"time",
".",
"time",
"(",
")",
"-",
"start",
"while",
"True",
":",
"try"... | Waits for the emulator to start.
Args:
deadline: deadline in seconds
Returns:
True if the emulator responds within the deadline, False otherwise. | [
"Waits",
"for",
"the",
"emulator",
"to",
"start",
"."
] | a23940d0634d7f537faf01ad9e60598046bcb40a | https://github.com/GoogleCloudPlatform/google-cloud-datastore/blob/a23940d0634d7f537faf01ad9e60598046bcb40a/python/googledatastore/datastore_emulator.py#L170-L198 | train | 213,752 |
GoogleCloudPlatform/google-cloud-datastore | python/googledatastore/datastore_emulator.py | DatastoreEmulator.Clear | def Clear(self):
"""Clears all data from the emulator instance.
Returns:
True if the data was successfully cleared, False otherwise.
"""
headers = {'Content-length': '0'}
response, _ = self._http.request('%s/reset' % self._host, method='POST',
headers=headers)
if response.status == 200:
return True
else:
logging.warning('failed to clear emulator; response was: %s', response) | python | def Clear(self):
"""Clears all data from the emulator instance.
Returns:
True if the data was successfully cleared, False otherwise.
"""
headers = {'Content-length': '0'}
response, _ = self._http.request('%s/reset' % self._host, method='POST',
headers=headers)
if response.status == 200:
return True
else:
logging.warning('failed to clear emulator; response was: %s', response) | [
"def",
"Clear",
"(",
"self",
")",
":",
"headers",
"=",
"{",
"'Content-length'",
":",
"'0'",
"}",
"response",
",",
"_",
"=",
"self",
".",
"_http",
".",
"request",
"(",
"'%s/reset'",
"%",
"self",
".",
"_host",
",",
"method",
"=",
"'POST'",
",",
"header... | Clears all data from the emulator instance.
Returns:
True if the data was successfully cleared, False otherwise. | [
"Clears",
"all",
"data",
"from",
"the",
"emulator",
"instance",
"."
] | a23940d0634d7f537faf01ad9e60598046bcb40a | https://github.com/GoogleCloudPlatform/google-cloud-datastore/blob/a23940d0634d7f537faf01ad9e60598046bcb40a/python/googledatastore/datastore_emulator.py#L200-L212 | train | 213,753 |
GoogleCloudPlatform/google-cloud-datastore | python/googledatastore/datastore_emulator.py | DatastoreEmulator.Stop | def Stop(self):
"""Stops the emulator instance."""
if not self.__running:
return
logging.info('shutting down the emulator running at %s', self._host)
headers = {'Content-length': '0'}
response, _ = self._http.request('%s/shutdown' % self._host,
method='POST', headers=headers)
if response.status != 200:
logging.warning('failed to shut down emulator; response: %s', response)
self.__running = False
# Delete temp files.
shutil.rmtree(self._tmp_dir) | python | def Stop(self):
"""Stops the emulator instance."""
if not self.__running:
return
logging.info('shutting down the emulator running at %s', self._host)
headers = {'Content-length': '0'}
response, _ = self._http.request('%s/shutdown' % self._host,
method='POST', headers=headers)
if response.status != 200:
logging.warning('failed to shut down emulator; response: %s', response)
self.__running = False
# Delete temp files.
shutil.rmtree(self._tmp_dir) | [
"def",
"Stop",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"__running",
":",
"return",
"logging",
".",
"info",
"(",
"'shutting down the emulator running at %s'",
",",
"self",
".",
"_host",
")",
"headers",
"=",
"{",
"'Content-length'",
":",
"'0'",
"}",
... | Stops the emulator instance. | [
"Stops",
"the",
"emulator",
"instance",
"."
] | a23940d0634d7f537faf01ad9e60598046bcb40a | https://github.com/GoogleCloudPlatform/google-cloud-datastore/blob/a23940d0634d7f537faf01ad9e60598046bcb40a/python/googledatastore/datastore_emulator.py#L214-L227 | train | 213,754 |
GoogleCloudPlatform/google-cloud-datastore | python/googledatastore/connection.py | Datastore._call_method | def _call_method(self, method, req, resp_class):
"""_call_method call the given RPC method over HTTP.
It uses the given protobuf message request as the payload and
returns the deserialized protobuf message response.
Args:
method: RPC method name to be called.
req: protobuf message for the RPC request.
resp_class: protobuf message class for the RPC response.
Returns:
Deserialized resp_class protobuf message instance.
Raises:
RPCError: The rpc method call failed.
"""
payload = req.SerializeToString()
headers = {
'Content-Type': 'application/x-protobuf',
'Content-Length': str(len(payload)),
'X-Goog-Api-Format-Version': '2'
}
response, content = self._http.request(
'%s:%s' % (self._url, method),
method='POST', body=payload, headers=headers)
if response.status != 200:
raise _make_rpc_error(method, response, content)
resp = resp_class()
resp.ParseFromString(content)
return resp | python | def _call_method(self, method, req, resp_class):
"""_call_method call the given RPC method over HTTP.
It uses the given protobuf message request as the payload and
returns the deserialized protobuf message response.
Args:
method: RPC method name to be called.
req: protobuf message for the RPC request.
resp_class: protobuf message class for the RPC response.
Returns:
Deserialized resp_class protobuf message instance.
Raises:
RPCError: The rpc method call failed.
"""
payload = req.SerializeToString()
headers = {
'Content-Type': 'application/x-protobuf',
'Content-Length': str(len(payload)),
'X-Goog-Api-Format-Version': '2'
}
response, content = self._http.request(
'%s:%s' % (self._url, method),
method='POST', body=payload, headers=headers)
if response.status != 200:
raise _make_rpc_error(method, response, content)
resp = resp_class()
resp.ParseFromString(content)
return resp | [
"def",
"_call_method",
"(",
"self",
",",
"method",
",",
"req",
",",
"resp_class",
")",
":",
"payload",
"=",
"req",
".",
"SerializeToString",
"(",
")",
"headers",
"=",
"{",
"'Content-Type'",
":",
"'application/x-protobuf'",
",",
"'Content-Length'",
":",
"str",
... | _call_method call the given RPC method over HTTP.
It uses the given protobuf message request as the payload and
returns the deserialized protobuf message response.
Args:
method: RPC method name to be called.
req: protobuf message for the RPC request.
resp_class: protobuf message class for the RPC response.
Returns:
Deserialized resp_class protobuf message instance.
Raises:
RPCError: The rpc method call failed. | [
"_call_method",
"call",
"the",
"given",
"RPC",
"method",
"over",
"HTTP",
"."
] | a23940d0634d7f537faf01ad9e60598046bcb40a | https://github.com/GoogleCloudPlatform/google-cloud-datastore/blob/a23940d0634d7f537faf01ad9e60598046bcb40a/python/googledatastore/connection.py#L174-L204 | train | 213,755 |
GoogleCloudPlatform/google-cloud-datastore | python/googledatastore/helper.py | get_credentials_from_env | def get_credentials_from_env():
"""Get credentials from environment variables.
Preference of credentials is:
- No credentials if DATASTORE_EMULATOR_HOST is set.
- Google APIs Signed JWT credentials based on
DATASTORE_SERVICE_ACCOUNT and DATASTORE_PRIVATE_KEY_FILE
environments variables
- Google Application Default
https://developers.google.com/identity/protocols/application-default-credentials
Returns:
credentials or None.
"""
if os.getenv(_DATASTORE_USE_STUB_CREDENTIAL_FOR_TEST_ENV):
logging.info('connecting without credentials because %s is set.',
_DATASTORE_USE_STUB_CREDENTIAL_FOR_TEST_ENV)
return None
if os.getenv(_DATASTORE_EMULATOR_HOST_ENV):
logging.info('connecting without credentials because %s is set.',
_DATASTORE_EMULATOR_HOST_ENV)
return None
if (os.getenv(_DATASTORE_SERVICE_ACCOUNT_ENV)
and os.getenv(_DATASTORE_PRIVATE_KEY_FILE_ENV)):
with open(os.getenv(_DATASTORE_PRIVATE_KEY_FILE_ENV), 'rb') as f:
key = f.read()
credentials = client.SignedJwtAssertionCredentials(
os.getenv(_DATASTORE_SERVICE_ACCOUNT_ENV), key, SCOPE)
logging.info('connecting using private key file.')
return credentials
try:
credentials = client.GoogleCredentials.get_application_default()
credentials = credentials.create_scoped(SCOPE)
logging.info('connecting using Google Application Default Credentials.')
return credentials
except client.ApplicationDefaultCredentialsError, e:
logging.error('Unable to find any credentials to use. '
'If you are running locally, make sure to set the '
'%s environment variable.', _DATASTORE_EMULATOR_HOST_ENV)
raise e | python | def get_credentials_from_env():
"""Get credentials from environment variables.
Preference of credentials is:
- No credentials if DATASTORE_EMULATOR_HOST is set.
- Google APIs Signed JWT credentials based on
DATASTORE_SERVICE_ACCOUNT and DATASTORE_PRIVATE_KEY_FILE
environments variables
- Google Application Default
https://developers.google.com/identity/protocols/application-default-credentials
Returns:
credentials or None.
"""
if os.getenv(_DATASTORE_USE_STUB_CREDENTIAL_FOR_TEST_ENV):
logging.info('connecting without credentials because %s is set.',
_DATASTORE_USE_STUB_CREDENTIAL_FOR_TEST_ENV)
return None
if os.getenv(_DATASTORE_EMULATOR_HOST_ENV):
logging.info('connecting without credentials because %s is set.',
_DATASTORE_EMULATOR_HOST_ENV)
return None
if (os.getenv(_DATASTORE_SERVICE_ACCOUNT_ENV)
and os.getenv(_DATASTORE_PRIVATE_KEY_FILE_ENV)):
with open(os.getenv(_DATASTORE_PRIVATE_KEY_FILE_ENV), 'rb') as f:
key = f.read()
credentials = client.SignedJwtAssertionCredentials(
os.getenv(_DATASTORE_SERVICE_ACCOUNT_ENV), key, SCOPE)
logging.info('connecting using private key file.')
return credentials
try:
credentials = client.GoogleCredentials.get_application_default()
credentials = credentials.create_scoped(SCOPE)
logging.info('connecting using Google Application Default Credentials.')
return credentials
except client.ApplicationDefaultCredentialsError, e:
logging.error('Unable to find any credentials to use. '
'If you are running locally, make sure to set the '
'%s environment variable.', _DATASTORE_EMULATOR_HOST_ENV)
raise e | [
"def",
"get_credentials_from_env",
"(",
")",
":",
"if",
"os",
".",
"getenv",
"(",
"_DATASTORE_USE_STUB_CREDENTIAL_FOR_TEST_ENV",
")",
":",
"logging",
".",
"info",
"(",
"'connecting without credentials because %s is set.'",
",",
"_DATASTORE_USE_STUB_CREDENTIAL_FOR_TEST_ENV",
"... | Get credentials from environment variables.
Preference of credentials is:
- No credentials if DATASTORE_EMULATOR_HOST is set.
- Google APIs Signed JWT credentials based on
DATASTORE_SERVICE_ACCOUNT and DATASTORE_PRIVATE_KEY_FILE
environments variables
- Google Application Default
https://developers.google.com/identity/protocols/application-default-credentials
Returns:
credentials or None. | [
"Get",
"credentials",
"from",
"environment",
"variables",
"."
] | a23940d0634d7f537faf01ad9e60598046bcb40a | https://github.com/GoogleCloudPlatform/google-cloud-datastore/blob/a23940d0634d7f537faf01ad9e60598046bcb40a/python/googledatastore/helper.py#L71-L111 | train | 213,756 |
GoogleCloudPlatform/google-cloud-datastore | python/googledatastore/helper.py | get_project_endpoint_from_env | def get_project_endpoint_from_env(project_id=None, host=None):
"""Get Datastore project endpoint from environment variables.
Args:
project_id: The Cloud project, defaults to the environment
variable DATASTORE_PROJECT_ID.
host: The Cloud Datastore API host to use.
Returns:
the endpoint to use, for example
https://datastore.googleapis.com/v1/projects/my-project
Raises:
ValueError: if the wrong environment variable was set or a project_id was
not provided.
"""
project_id = project_id or os.getenv(_DATASTORE_PROJECT_ID_ENV)
if not project_id:
raise ValueError('project_id was not provided. Either pass it in '
'directly or set DATASTORE_PROJECT_ID.')
# DATASTORE_HOST is deprecated.
if os.getenv(_DATASTORE_HOST_ENV):
logging.warning('Ignoring value of environment variable DATASTORE_HOST. '
'To point datastore to a host running locally, use the '
'environment variable DATASTORE_EMULATOR_HOST')
url_override = os.getenv(_DATASTORE_URL_OVERRIDE_ENV)
if url_override:
return '%s/projects/%s' % (url_override, project_id)
localhost = os.getenv(_DATASTORE_EMULATOR_HOST_ENV)
if localhost:
return ('http://%s/%s/projects/%s'
% (localhost, API_VERSION, project_id))
host = host or GOOGLEAPIS_HOST
return 'https://%s/%s/projects/%s' % (host, API_VERSION, project_id) | python | def get_project_endpoint_from_env(project_id=None, host=None):
"""Get Datastore project endpoint from environment variables.
Args:
project_id: The Cloud project, defaults to the environment
variable DATASTORE_PROJECT_ID.
host: The Cloud Datastore API host to use.
Returns:
the endpoint to use, for example
https://datastore.googleapis.com/v1/projects/my-project
Raises:
ValueError: if the wrong environment variable was set or a project_id was
not provided.
"""
project_id = project_id or os.getenv(_DATASTORE_PROJECT_ID_ENV)
if not project_id:
raise ValueError('project_id was not provided. Either pass it in '
'directly or set DATASTORE_PROJECT_ID.')
# DATASTORE_HOST is deprecated.
if os.getenv(_DATASTORE_HOST_ENV):
logging.warning('Ignoring value of environment variable DATASTORE_HOST. '
'To point datastore to a host running locally, use the '
'environment variable DATASTORE_EMULATOR_HOST')
url_override = os.getenv(_DATASTORE_URL_OVERRIDE_ENV)
if url_override:
return '%s/projects/%s' % (url_override, project_id)
localhost = os.getenv(_DATASTORE_EMULATOR_HOST_ENV)
if localhost:
return ('http://%s/%s/projects/%s'
% (localhost, API_VERSION, project_id))
host = host or GOOGLEAPIS_HOST
return 'https://%s/%s/projects/%s' % (host, API_VERSION, project_id) | [
"def",
"get_project_endpoint_from_env",
"(",
"project_id",
"=",
"None",
",",
"host",
"=",
"None",
")",
":",
"project_id",
"=",
"project_id",
"or",
"os",
".",
"getenv",
"(",
"_DATASTORE_PROJECT_ID_ENV",
")",
"if",
"not",
"project_id",
":",
"raise",
"ValueError",
... | Get Datastore project endpoint from environment variables.
Args:
project_id: The Cloud project, defaults to the environment
variable DATASTORE_PROJECT_ID.
host: The Cloud Datastore API host to use.
Returns:
the endpoint to use, for example
https://datastore.googleapis.com/v1/projects/my-project
Raises:
ValueError: if the wrong environment variable was set or a project_id was
not provided. | [
"Get",
"Datastore",
"project",
"endpoint",
"from",
"environment",
"variables",
"."
] | a23940d0634d7f537faf01ad9e60598046bcb40a | https://github.com/GoogleCloudPlatform/google-cloud-datastore/blob/a23940d0634d7f537faf01ad9e60598046bcb40a/python/googledatastore/helper.py#L114-L150 | train | 213,757 |
GoogleCloudPlatform/google-cloud-datastore | python/googledatastore/helper.py | add_key_path | def add_key_path(key_proto, *path_elements):
"""Add path elements to the given datastore.Key proto message.
Args:
key_proto: datastore.Key proto message.
*path_elements: list of ancestors to add to the key.
(kind1, id1/name1, ..., kindN, idN/nameN), the last 2 elements
represent the entity key, if no terminating id/name: they key
will be an incomplete key.
Raises:
TypeError: the given id or name has the wrong type.
Returns:
the same datastore.Key.
Usage:
>>> add_key_path(key_proto, 'Kind', 'name') # no parent, with name
datastore.Key(...)
>>> add_key_path(key_proto, 'Kind2', 1) # no parent, with id
datastore.Key(...)
>>> add_key_path(key_proto, 'Kind', 'name', 'Kind2', 1) # parent, complete
datastore.Key(...)
>>> add_key_path(key_proto, 'Kind', 'name', 'Kind2') # parent, incomplete
datastore.Key(...)
"""
for i in range(0, len(path_elements), 2):
pair = path_elements[i:i+2]
elem = key_proto.path.add()
elem.kind = pair[0]
if len(pair) == 1:
return # incomplete key
id_or_name = pair[1]
if isinstance(id_or_name, (int, long)):
elem.id = id_or_name
elif isinstance(id_or_name, basestring):
elem.name = id_or_name
else:
raise TypeError(
'Expected an integer id or string name as argument %d; '
'received %r (a %s).' % (i + 2, id_or_name, type(id_or_name)))
return key_proto | python | def add_key_path(key_proto, *path_elements):
"""Add path elements to the given datastore.Key proto message.
Args:
key_proto: datastore.Key proto message.
*path_elements: list of ancestors to add to the key.
(kind1, id1/name1, ..., kindN, idN/nameN), the last 2 elements
represent the entity key, if no terminating id/name: they key
will be an incomplete key.
Raises:
TypeError: the given id or name has the wrong type.
Returns:
the same datastore.Key.
Usage:
>>> add_key_path(key_proto, 'Kind', 'name') # no parent, with name
datastore.Key(...)
>>> add_key_path(key_proto, 'Kind2', 1) # no parent, with id
datastore.Key(...)
>>> add_key_path(key_proto, 'Kind', 'name', 'Kind2', 1) # parent, complete
datastore.Key(...)
>>> add_key_path(key_proto, 'Kind', 'name', 'Kind2') # parent, incomplete
datastore.Key(...)
"""
for i in range(0, len(path_elements), 2):
pair = path_elements[i:i+2]
elem = key_proto.path.add()
elem.kind = pair[0]
if len(pair) == 1:
return # incomplete key
id_or_name = pair[1]
if isinstance(id_or_name, (int, long)):
elem.id = id_or_name
elif isinstance(id_or_name, basestring):
elem.name = id_or_name
else:
raise TypeError(
'Expected an integer id or string name as argument %d; '
'received %r (a %s).' % (i + 2, id_or_name, type(id_or_name)))
return key_proto | [
"def",
"add_key_path",
"(",
"key_proto",
",",
"*",
"path_elements",
")",
":",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"path_elements",
")",
",",
"2",
")",
":",
"pair",
"=",
"path_elements",
"[",
"i",
":",
"i",
"+",
"2",
"]",
"elem",
... | Add path elements to the given datastore.Key proto message.
Args:
key_proto: datastore.Key proto message.
*path_elements: list of ancestors to add to the key.
(kind1, id1/name1, ..., kindN, idN/nameN), the last 2 elements
represent the entity key, if no terminating id/name: they key
will be an incomplete key.
Raises:
TypeError: the given id or name has the wrong type.
Returns:
the same datastore.Key.
Usage:
>>> add_key_path(key_proto, 'Kind', 'name') # no parent, with name
datastore.Key(...)
>>> add_key_path(key_proto, 'Kind2', 1) # no parent, with id
datastore.Key(...)
>>> add_key_path(key_proto, 'Kind', 'name', 'Kind2', 1) # parent, complete
datastore.Key(...)
>>> add_key_path(key_proto, 'Kind', 'name', 'Kind2') # parent, incomplete
datastore.Key(...) | [
"Add",
"path",
"elements",
"to",
"the",
"given",
"datastore",
".",
"Key",
"proto",
"message",
"."
] | a23940d0634d7f537faf01ad9e60598046bcb40a | https://github.com/GoogleCloudPlatform/google-cloud-datastore/blob/a23940d0634d7f537faf01ad9e60598046bcb40a/python/googledatastore/helper.py#L153-L194 | train | 213,758 |
GoogleCloudPlatform/google-cloud-datastore | python/googledatastore/helper.py | add_properties | def add_properties(entity_proto, property_dict, exclude_from_indexes=None):
"""Add values to the given datastore.Entity proto message.
Args:
entity_proto: datastore.Entity proto message.
property_dict: a dictionary from property name to either a python object or
datastore.Value.
exclude_from_indexes: if the value should be exclude from indexes. None
leaves indexing as is (defaults to False if value is not a Value
message).
Usage:
>>> add_properties(proto, {'foo': u'a', 'bar': [1, 2]})
Raises:
TypeError: if a given property value type is not supported.
"""
for name, value in property_dict.iteritems():
set_property(entity_proto.properties, name, value, exclude_from_indexes) | python | def add_properties(entity_proto, property_dict, exclude_from_indexes=None):
"""Add values to the given datastore.Entity proto message.
Args:
entity_proto: datastore.Entity proto message.
property_dict: a dictionary from property name to either a python object or
datastore.Value.
exclude_from_indexes: if the value should be exclude from indexes. None
leaves indexing as is (defaults to False if value is not a Value
message).
Usage:
>>> add_properties(proto, {'foo': u'a', 'bar': [1, 2]})
Raises:
TypeError: if a given property value type is not supported.
"""
for name, value in property_dict.iteritems():
set_property(entity_proto.properties, name, value, exclude_from_indexes) | [
"def",
"add_properties",
"(",
"entity_proto",
",",
"property_dict",
",",
"exclude_from_indexes",
"=",
"None",
")",
":",
"for",
"name",
",",
"value",
"in",
"property_dict",
".",
"iteritems",
"(",
")",
":",
"set_property",
"(",
"entity_proto",
".",
"properties",
... | Add values to the given datastore.Entity proto message.
Args:
entity_proto: datastore.Entity proto message.
property_dict: a dictionary from property name to either a python object or
datastore.Value.
exclude_from_indexes: if the value should be exclude from indexes. None
leaves indexing as is (defaults to False if value is not a Value
message).
Usage:
>>> add_properties(proto, {'foo': u'a', 'bar': [1, 2]})
Raises:
TypeError: if a given property value type is not supported. | [
"Add",
"values",
"to",
"the",
"given",
"datastore",
".",
"Entity",
"proto",
"message",
"."
] | a23940d0634d7f537faf01ad9e60598046bcb40a | https://github.com/GoogleCloudPlatform/google-cloud-datastore/blob/a23940d0634d7f537faf01ad9e60598046bcb40a/python/googledatastore/helper.py#L197-L215 | train | 213,759 |
GoogleCloudPlatform/google-cloud-datastore | python/googledatastore/helper.py | set_property | def set_property(property_map, name, value, exclude_from_indexes=None):
"""Set property value in the given datastore.Property proto message.
Args:
property_map: a string->datastore.Value protobuf map.
name: name of the property.
value: python object or datastore.Value.
exclude_from_indexes: if the value should be exclude from indexes. None
leaves indexing as is (defaults to False if value is not a Value message).
Usage:
>>> set_property(property_proto, 'foo', u'a')
Raises:
TypeError: if the given value type is not supported.
"""
set_value(property_map[name], value, exclude_from_indexes) | python | def set_property(property_map, name, value, exclude_from_indexes=None):
"""Set property value in the given datastore.Property proto message.
Args:
property_map: a string->datastore.Value protobuf map.
name: name of the property.
value: python object or datastore.Value.
exclude_from_indexes: if the value should be exclude from indexes. None
leaves indexing as is (defaults to False if value is not a Value message).
Usage:
>>> set_property(property_proto, 'foo', u'a')
Raises:
TypeError: if the given value type is not supported.
"""
set_value(property_map[name], value, exclude_from_indexes) | [
"def",
"set_property",
"(",
"property_map",
",",
"name",
",",
"value",
",",
"exclude_from_indexes",
"=",
"None",
")",
":",
"set_value",
"(",
"property_map",
"[",
"name",
"]",
",",
"value",
",",
"exclude_from_indexes",
")"
] | Set property value in the given datastore.Property proto message.
Args:
property_map: a string->datastore.Value protobuf map.
name: name of the property.
value: python object or datastore.Value.
exclude_from_indexes: if the value should be exclude from indexes. None
leaves indexing as is (defaults to False if value is not a Value message).
Usage:
>>> set_property(property_proto, 'foo', u'a')
Raises:
TypeError: if the given value type is not supported. | [
"Set",
"property",
"value",
"in",
"the",
"given",
"datastore",
".",
"Property",
"proto",
"message",
"."
] | a23940d0634d7f537faf01ad9e60598046bcb40a | https://github.com/GoogleCloudPlatform/google-cloud-datastore/blob/a23940d0634d7f537faf01ad9e60598046bcb40a/python/googledatastore/helper.py#L218-L234 | train | 213,760 |
GoogleCloudPlatform/google-cloud-datastore | python/googledatastore/helper.py | set_value | def set_value(value_proto, value, exclude_from_indexes=None):
"""Set the corresponding datastore.Value _value field for the given arg.
Args:
value_proto: datastore.Value proto message.
value: python object or datastore.Value. (unicode value will set a
datastore string value, str value will set a blob string value).
Undefined behavior if value is/contains value_proto.
exclude_from_indexes: if the value should be exclude from indexes. None
leaves indexing as is (defaults to False if value is not a Value
message).
Raises:
TypeError: if the given value type is not supported.
"""
value_proto.Clear()
if isinstance(value, (list, tuple)):
for sub_value in value:
set_value(value_proto.array_value.values.add(), sub_value,
exclude_from_indexes)
return # do not set indexed for a list property.
if isinstance(value, entity_pb2.Value):
value_proto.MergeFrom(value)
elif isinstance(value, unicode):
value_proto.string_value = value
elif isinstance(value, str):
value_proto.blob_value = value
elif isinstance(value, bool):
value_proto.boolean_value = value
elif isinstance(value, (int, long)):
value_proto.integer_value = value
elif isinstance(value, float):
value_proto.double_value = value
elif isinstance(value, datetime.datetime):
to_timestamp(value, value_proto.timestamp_value)
elif isinstance(value, entity_pb2.Key):
value_proto.key_value.CopyFrom(value)
elif isinstance(value, entity_pb2.Entity):
value_proto.entity_value.CopyFrom(value)
else:
raise TypeError('value type: %r not supported' % (value,))
if exclude_from_indexes is not None:
value_proto.exclude_from_indexes = exclude_from_indexes | python | def set_value(value_proto, value, exclude_from_indexes=None):
"""Set the corresponding datastore.Value _value field for the given arg.
Args:
value_proto: datastore.Value proto message.
value: python object or datastore.Value. (unicode value will set a
datastore string value, str value will set a blob string value).
Undefined behavior if value is/contains value_proto.
exclude_from_indexes: if the value should be exclude from indexes. None
leaves indexing as is (defaults to False if value is not a Value
message).
Raises:
TypeError: if the given value type is not supported.
"""
value_proto.Clear()
if isinstance(value, (list, tuple)):
for sub_value in value:
set_value(value_proto.array_value.values.add(), sub_value,
exclude_from_indexes)
return # do not set indexed for a list property.
if isinstance(value, entity_pb2.Value):
value_proto.MergeFrom(value)
elif isinstance(value, unicode):
value_proto.string_value = value
elif isinstance(value, str):
value_proto.blob_value = value
elif isinstance(value, bool):
value_proto.boolean_value = value
elif isinstance(value, (int, long)):
value_proto.integer_value = value
elif isinstance(value, float):
value_proto.double_value = value
elif isinstance(value, datetime.datetime):
to_timestamp(value, value_proto.timestamp_value)
elif isinstance(value, entity_pb2.Key):
value_proto.key_value.CopyFrom(value)
elif isinstance(value, entity_pb2.Entity):
value_proto.entity_value.CopyFrom(value)
else:
raise TypeError('value type: %r not supported' % (value,))
if exclude_from_indexes is not None:
value_proto.exclude_from_indexes = exclude_from_indexes | [
"def",
"set_value",
"(",
"value_proto",
",",
"value",
",",
"exclude_from_indexes",
"=",
"None",
")",
":",
"value_proto",
".",
"Clear",
"(",
")",
"if",
"isinstance",
"(",
"value",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"for",
"sub_value",
"in",
... | Set the corresponding datastore.Value _value field for the given arg.
Args:
value_proto: datastore.Value proto message.
value: python object or datastore.Value. (unicode value will set a
datastore string value, str value will set a blob string value).
Undefined behavior if value is/contains value_proto.
exclude_from_indexes: if the value should be exclude from indexes. None
leaves indexing as is (defaults to False if value is not a Value
message).
Raises:
TypeError: if the given value type is not supported. | [
"Set",
"the",
"corresponding",
"datastore",
".",
"Value",
"_value",
"field",
"for",
"the",
"given",
"arg",
"."
] | a23940d0634d7f537faf01ad9e60598046bcb40a | https://github.com/GoogleCloudPlatform/google-cloud-datastore/blob/a23940d0634d7f537faf01ad9e60598046bcb40a/python/googledatastore/helper.py#L237-L282 | train | 213,761 |
GoogleCloudPlatform/google-cloud-datastore | python/googledatastore/helper.py | get_value | def get_value(value_proto):
"""Gets the python object equivalent for the given value proto.
Args:
value_proto: datastore.Value proto message.
Returns:
the corresponding python object value. timestamps are converted to
datetime, and datastore.Value is returned for blob_key_value.
"""
field = value_proto.WhichOneof('value_type')
if field in __native_value_types:
return getattr(value_proto, field)
if field == 'timestamp_value':
return from_timestamp(value_proto.timestamp_value)
if field == 'array_value':
return [get_value(sub_value)
for sub_value in value_proto.array_value.values]
return None | python | def get_value(value_proto):
"""Gets the python object equivalent for the given value proto.
Args:
value_proto: datastore.Value proto message.
Returns:
the corresponding python object value. timestamps are converted to
datetime, and datastore.Value is returned for blob_key_value.
"""
field = value_proto.WhichOneof('value_type')
if field in __native_value_types:
return getattr(value_proto, field)
if field == 'timestamp_value':
return from_timestamp(value_proto.timestamp_value)
if field == 'array_value':
return [get_value(sub_value)
for sub_value in value_proto.array_value.values]
return None | [
"def",
"get_value",
"(",
"value_proto",
")",
":",
"field",
"=",
"value_proto",
".",
"WhichOneof",
"(",
"'value_type'",
")",
"if",
"field",
"in",
"__native_value_types",
":",
"return",
"getattr",
"(",
"value_proto",
",",
"field",
")",
"if",
"field",
"==",
"'t... | Gets the python object equivalent for the given value proto.
Args:
value_proto: datastore.Value proto message.
Returns:
the corresponding python object value. timestamps are converted to
datetime, and datastore.Value is returned for blob_key_value. | [
"Gets",
"the",
"python",
"object",
"equivalent",
"for",
"the",
"given",
"value",
"proto",
"."
] | a23940d0634d7f537faf01ad9e60598046bcb40a | https://github.com/GoogleCloudPlatform/google-cloud-datastore/blob/a23940d0634d7f537faf01ad9e60598046bcb40a/python/googledatastore/helper.py#L285-L303 | train | 213,762 |
GoogleCloudPlatform/google-cloud-datastore | python/googledatastore/helper.py | get_property_dict | def get_property_dict(entity_proto):
"""Convert datastore.Entity to a dict of property name -> datastore.Value.
Args:
entity_proto: datastore.Entity proto message.
Usage:
>>> get_property_dict(entity_proto)
{'foo': {string_value='a'}, 'bar': {integer_value=2}}
Returns:
dict of entity properties.
"""
return dict((p.key, p.value) for p in entity_proto.property) | python | def get_property_dict(entity_proto):
"""Convert datastore.Entity to a dict of property name -> datastore.Value.
Args:
entity_proto: datastore.Entity proto message.
Usage:
>>> get_property_dict(entity_proto)
{'foo': {string_value='a'}, 'bar': {integer_value=2}}
Returns:
dict of entity properties.
"""
return dict((p.key, p.value) for p in entity_proto.property) | [
"def",
"get_property_dict",
"(",
"entity_proto",
")",
":",
"return",
"dict",
"(",
"(",
"p",
".",
"key",
",",
"p",
".",
"value",
")",
"for",
"p",
"in",
"entity_proto",
".",
"property",
")"
] | Convert datastore.Entity to a dict of property name -> datastore.Value.
Args:
entity_proto: datastore.Entity proto message.
Usage:
>>> get_property_dict(entity_proto)
{'foo': {string_value='a'}, 'bar': {integer_value=2}}
Returns:
dict of entity properties. | [
"Convert",
"datastore",
".",
"Entity",
"to",
"a",
"dict",
"of",
"property",
"name",
"-",
">",
"datastore",
".",
"Value",
"."
] | a23940d0634d7f537faf01ad9e60598046bcb40a | https://github.com/GoogleCloudPlatform/google-cloud-datastore/blob/a23940d0634d7f537faf01ad9e60598046bcb40a/python/googledatastore/helper.py#L306-L319 | train | 213,763 |
GoogleCloudPlatform/google-cloud-datastore | python/googledatastore/helper.py | set_kind | def set_kind(query_proto, kind):
"""Set the kind constraint for the given datastore.Query proto message."""
del query_proto.kind[:]
query_proto.kind.add().name = kind | python | def set_kind(query_proto, kind):
"""Set the kind constraint for the given datastore.Query proto message."""
del query_proto.kind[:]
query_proto.kind.add().name = kind | [
"def",
"set_kind",
"(",
"query_proto",
",",
"kind",
")",
":",
"del",
"query_proto",
".",
"kind",
"[",
":",
"]",
"query_proto",
".",
"kind",
".",
"add",
"(",
")",
".",
"name",
"=",
"kind"
] | Set the kind constraint for the given datastore.Query proto message. | [
"Set",
"the",
"kind",
"constraint",
"for",
"the",
"given",
"datastore",
".",
"Query",
"proto",
"message",
"."
] | a23940d0634d7f537faf01ad9e60598046bcb40a | https://github.com/GoogleCloudPlatform/google-cloud-datastore/blob/a23940d0634d7f537faf01ad9e60598046bcb40a/python/googledatastore/helper.py#L322-L325 | train | 213,764 |
GoogleCloudPlatform/google-cloud-datastore | python/googledatastore/helper.py | add_property_orders | def add_property_orders(query_proto, *orders):
"""Add ordering constraint for the given datastore.Query proto message.
Args:
query_proto: datastore.Query proto message.
orders: list of propertype name string, default to ascending
order and set descending if prefixed by '-'.
Usage:
>>> add_property_orders(query_proto, 'foo') # sort by foo asc
>>> add_property_orders(query_proto, '-bar') # sort by bar desc
"""
for order in orders:
proto = query_proto.order.add()
if order[0] == '-':
order = order[1:]
proto.direction = query_pb2.PropertyOrder.DESCENDING
else:
proto.direction = query_pb2.PropertyOrder.ASCENDING
proto.property.name = order | python | def add_property_orders(query_proto, *orders):
"""Add ordering constraint for the given datastore.Query proto message.
Args:
query_proto: datastore.Query proto message.
orders: list of propertype name string, default to ascending
order and set descending if prefixed by '-'.
Usage:
>>> add_property_orders(query_proto, 'foo') # sort by foo asc
>>> add_property_orders(query_proto, '-bar') # sort by bar desc
"""
for order in orders:
proto = query_proto.order.add()
if order[0] == '-':
order = order[1:]
proto.direction = query_pb2.PropertyOrder.DESCENDING
else:
proto.direction = query_pb2.PropertyOrder.ASCENDING
proto.property.name = order | [
"def",
"add_property_orders",
"(",
"query_proto",
",",
"*",
"orders",
")",
":",
"for",
"order",
"in",
"orders",
":",
"proto",
"=",
"query_proto",
".",
"order",
".",
"add",
"(",
")",
"if",
"order",
"[",
"0",
"]",
"==",
"'-'",
":",
"order",
"=",
"order... | Add ordering constraint for the given datastore.Query proto message.
Args:
query_proto: datastore.Query proto message.
orders: list of propertype name string, default to ascending
order and set descending if prefixed by '-'.
Usage:
>>> add_property_orders(query_proto, 'foo') # sort by foo asc
>>> add_property_orders(query_proto, '-bar') # sort by bar desc | [
"Add",
"ordering",
"constraint",
"for",
"the",
"given",
"datastore",
".",
"Query",
"proto",
"message",
"."
] | a23940d0634d7f537faf01ad9e60598046bcb40a | https://github.com/GoogleCloudPlatform/google-cloud-datastore/blob/a23940d0634d7f537faf01ad9e60598046bcb40a/python/googledatastore/helper.py#L328-L347 | train | 213,765 |
GoogleCloudPlatform/google-cloud-datastore | python/googledatastore/helper.py | add_projection | def add_projection(query_proto, *projection):
"""Add projection properties to the given datatstore.Query proto message."""
for p in projection:
proto = query_proto.projection.add()
proto.property.name = p | python | def add_projection(query_proto, *projection):
"""Add projection properties to the given datatstore.Query proto message."""
for p in projection:
proto = query_proto.projection.add()
proto.property.name = p | [
"def",
"add_projection",
"(",
"query_proto",
",",
"*",
"projection",
")",
":",
"for",
"p",
"in",
"projection",
":",
"proto",
"=",
"query_proto",
".",
"projection",
".",
"add",
"(",
")",
"proto",
".",
"property",
".",
"name",
"=",
"p"
] | Add projection properties to the given datatstore.Query proto message. | [
"Add",
"projection",
"properties",
"to",
"the",
"given",
"datatstore",
".",
"Query",
"proto",
"message",
"."
] | a23940d0634d7f537faf01ad9e60598046bcb40a | https://github.com/GoogleCloudPlatform/google-cloud-datastore/blob/a23940d0634d7f537faf01ad9e60598046bcb40a/python/googledatastore/helper.py#L350-L354 | train | 213,766 |
GoogleCloudPlatform/google-cloud-datastore | python/googledatastore/helper.py | set_property_filter | def set_property_filter(filter_proto, name, op, value):
"""Set property filter contraint in the given datastore.Filter proto message.
Args:
filter_proto: datastore.Filter proto message
name: property name
op: datastore.PropertyFilter.Operation
value: property value
Returns:
the same datastore.Filter.
Usage:
>>> set_property_filter(filter_proto, 'foo',
... datastore.PropertyFilter.EQUAL, 'a') # WHERE 'foo' = 'a'
"""
filter_proto.Clear()
pf = filter_proto.property_filter
pf.property.name = name
pf.op = op
set_value(pf.value, value)
return filter_proto | python | def set_property_filter(filter_proto, name, op, value):
"""Set property filter contraint in the given datastore.Filter proto message.
Args:
filter_proto: datastore.Filter proto message
name: property name
op: datastore.PropertyFilter.Operation
value: property value
Returns:
the same datastore.Filter.
Usage:
>>> set_property_filter(filter_proto, 'foo',
... datastore.PropertyFilter.EQUAL, 'a') # WHERE 'foo' = 'a'
"""
filter_proto.Clear()
pf = filter_proto.property_filter
pf.property.name = name
pf.op = op
set_value(pf.value, value)
return filter_proto | [
"def",
"set_property_filter",
"(",
"filter_proto",
",",
"name",
",",
"op",
",",
"value",
")",
":",
"filter_proto",
".",
"Clear",
"(",
")",
"pf",
"=",
"filter_proto",
".",
"property_filter",
"pf",
".",
"property",
".",
"name",
"=",
"name",
"pf",
".",
"op"... | Set property filter contraint in the given datastore.Filter proto message.
Args:
filter_proto: datastore.Filter proto message
name: property name
op: datastore.PropertyFilter.Operation
value: property value
Returns:
the same datastore.Filter.
Usage:
>>> set_property_filter(filter_proto, 'foo',
... datastore.PropertyFilter.EQUAL, 'a') # WHERE 'foo' = 'a' | [
"Set",
"property",
"filter",
"contraint",
"in",
"the",
"given",
"datastore",
".",
"Filter",
"proto",
"message",
"."
] | a23940d0634d7f537faf01ad9e60598046bcb40a | https://github.com/GoogleCloudPlatform/google-cloud-datastore/blob/a23940d0634d7f537faf01ad9e60598046bcb40a/python/googledatastore/helper.py#L357-L378 | train | 213,767 |
GoogleCloudPlatform/google-cloud-datastore | python/googledatastore/helper.py | set_composite_filter | def set_composite_filter(filter_proto, op, *filters):
"""Set composite filter contraint in the given datastore.Filter proto message.
Args:
filter_proto: datastore.Filter proto message
op: datastore.CompositeFilter.Operation
filters: vararg list of datastore.Filter
Returns:
the same datastore.Filter.
Usage:
>>> set_composite_filter(filter_proto, datastore.CompositeFilter.AND,
... set_property_filter(datastore.Filter(), ...),
... set_property_filter(datastore.Filter(), ...)) # WHERE ... AND ...
"""
filter_proto.Clear()
cf = filter_proto.composite_filter
cf.op = op
for f in filters:
cf.filters.add().CopyFrom(f)
return filter_proto | python | def set_composite_filter(filter_proto, op, *filters):
"""Set composite filter contraint in the given datastore.Filter proto message.
Args:
filter_proto: datastore.Filter proto message
op: datastore.CompositeFilter.Operation
filters: vararg list of datastore.Filter
Returns:
the same datastore.Filter.
Usage:
>>> set_composite_filter(filter_proto, datastore.CompositeFilter.AND,
... set_property_filter(datastore.Filter(), ...),
... set_property_filter(datastore.Filter(), ...)) # WHERE ... AND ...
"""
filter_proto.Clear()
cf = filter_proto.composite_filter
cf.op = op
for f in filters:
cf.filters.add().CopyFrom(f)
return filter_proto | [
"def",
"set_composite_filter",
"(",
"filter_proto",
",",
"op",
",",
"*",
"filters",
")",
":",
"filter_proto",
".",
"Clear",
"(",
")",
"cf",
"=",
"filter_proto",
".",
"composite_filter",
"cf",
".",
"op",
"=",
"op",
"for",
"f",
"in",
"filters",
":",
"cf",
... | Set composite filter contraint in the given datastore.Filter proto message.
Args:
filter_proto: datastore.Filter proto message
op: datastore.CompositeFilter.Operation
filters: vararg list of datastore.Filter
Returns:
the same datastore.Filter.
Usage:
>>> set_composite_filter(filter_proto, datastore.CompositeFilter.AND,
... set_property_filter(datastore.Filter(), ...),
... set_property_filter(datastore.Filter(), ...)) # WHERE ... AND ... | [
"Set",
"composite",
"filter",
"contraint",
"in",
"the",
"given",
"datastore",
".",
"Filter",
"proto",
"message",
"."
] | a23940d0634d7f537faf01ad9e60598046bcb40a | https://github.com/GoogleCloudPlatform/google-cloud-datastore/blob/a23940d0634d7f537faf01ad9e60598046bcb40a/python/googledatastore/helper.py#L381-L402 | train | 213,768 |
GoogleCloudPlatform/google-cloud-datastore | python/googledatastore/helper.py | micros_to_timestamp | def micros_to_timestamp(micros, timestamp):
"""Convert microseconds from utc epoch to google.protobuf.timestamp.
Args:
micros: a long, number of microseconds since utc epoch.
timestamp: a google.protobuf.timestamp.Timestamp to populate.
"""
seconds = long(micros / _MICROS_PER_SECOND)
micro_remainder = micros % _MICROS_PER_SECOND
timestamp.seconds = seconds
timestamp.nanos = micro_remainder * _NANOS_PER_MICRO | python | def micros_to_timestamp(micros, timestamp):
"""Convert microseconds from utc epoch to google.protobuf.timestamp.
Args:
micros: a long, number of microseconds since utc epoch.
timestamp: a google.protobuf.timestamp.Timestamp to populate.
"""
seconds = long(micros / _MICROS_PER_SECOND)
micro_remainder = micros % _MICROS_PER_SECOND
timestamp.seconds = seconds
timestamp.nanos = micro_remainder * _NANOS_PER_MICRO | [
"def",
"micros_to_timestamp",
"(",
"micros",
",",
"timestamp",
")",
":",
"seconds",
"=",
"long",
"(",
"micros",
"/",
"_MICROS_PER_SECOND",
")",
"micro_remainder",
"=",
"micros",
"%",
"_MICROS_PER_SECOND",
"timestamp",
".",
"seconds",
"=",
"seconds",
"timestamp",
... | Convert microseconds from utc epoch to google.protobuf.timestamp.
Args:
micros: a long, number of microseconds since utc epoch.
timestamp: a google.protobuf.timestamp.Timestamp to populate. | [
"Convert",
"microseconds",
"from",
"utc",
"epoch",
"to",
"google",
".",
"protobuf",
".",
"timestamp",
"."
] | a23940d0634d7f537faf01ad9e60598046bcb40a | https://github.com/GoogleCloudPlatform/google-cloud-datastore/blob/a23940d0634d7f537faf01ad9e60598046bcb40a/python/googledatastore/helper.py#L422-L432 | train | 213,769 |
GoogleCloudPlatform/google-cloud-datastore | python/googledatastore/helper.py | to_timestamp | def to_timestamp(dt, timestamp):
"""Convert datetime to google.protobuf.Timestamp.
Args:
dt: a timezone naive datetime.
timestamp: a google.protobuf.Timestamp to populate.
Raises:
TypeError: if a timezone aware datetime was provided.
"""
if dt.tzinfo:
# this is an "aware" datetime with an explicit timezone. Throw an error.
raise TypeError('Cannot store a timezone aware datetime. '
'Convert to UTC and store the naive datetime.')
timestamp.seconds = calendar.timegm(dt.timetuple())
timestamp.nanos = dt.microsecond * _NANOS_PER_MICRO | python | def to_timestamp(dt, timestamp):
"""Convert datetime to google.protobuf.Timestamp.
Args:
dt: a timezone naive datetime.
timestamp: a google.protobuf.Timestamp to populate.
Raises:
TypeError: if a timezone aware datetime was provided.
"""
if dt.tzinfo:
# this is an "aware" datetime with an explicit timezone. Throw an error.
raise TypeError('Cannot store a timezone aware datetime. '
'Convert to UTC and store the naive datetime.')
timestamp.seconds = calendar.timegm(dt.timetuple())
timestamp.nanos = dt.microsecond * _NANOS_PER_MICRO | [
"def",
"to_timestamp",
"(",
"dt",
",",
"timestamp",
")",
":",
"if",
"dt",
".",
"tzinfo",
":",
"# this is an \"aware\" datetime with an explicit timezone. Throw an error.",
"raise",
"TypeError",
"(",
"'Cannot store a timezone aware datetime. '",
"'Convert to UTC and store the naiv... | Convert datetime to google.protobuf.Timestamp.
Args:
dt: a timezone naive datetime.
timestamp: a google.protobuf.Timestamp to populate.
Raises:
TypeError: if a timezone aware datetime was provided. | [
"Convert",
"datetime",
"to",
"google",
".",
"protobuf",
".",
"Timestamp",
"."
] | a23940d0634d7f537faf01ad9e60598046bcb40a | https://github.com/GoogleCloudPlatform/google-cloud-datastore/blob/a23940d0634d7f537faf01ad9e60598046bcb40a/python/googledatastore/helper.py#L435-L450 | train | 213,770 |
HDI-Project/MLBlocks | mlblocks/mlblock.py | MLBlock._extract_params | def _extract_params(self, kwargs, hyperparameters):
"""Extract init, fit and produce params from kwargs.
The `init_params`, `fit_params` and `produce_params` are extracted
from the passed `kwargs` taking the metadata hyperparameters as a
reference.
During this extraction, make sure that all the required hyperparameters
have been given and that nothing unexpected exists in the input.
Args:
kwargs (dict): dict containing the Keyword arguments that have
been passed to the `__init__` method upon
initialization.
hyperparameters (dict): hyperparameters dictionary, as found in
the JSON annotation.
Raises:
TypeError: A `TypeError` is raised if a required argument is not
found in the `kwargs` dict, or if an unexpected
argument has been given.
"""
init_params = dict()
fit_params = dict()
produce_params = dict()
for name, param in hyperparameters.get('fixed', dict()).items():
if name in kwargs:
value = kwargs.pop(name)
elif 'default' in param:
value = param['default']
else:
raise TypeError("{} required argument '{}' not found".format(self.name, name))
init_params[name] = value
for name, param in hyperparameters.get('tunable', dict()).items():
if name in kwargs:
init_params[name] = kwargs.pop(name)
fit_args = [arg['name'] for arg in self.fit_args]
produce_args = [arg['name'] for arg in self.produce_args]
for name in list(kwargs.keys()):
if name in fit_args:
fit_params[name] = kwargs.pop(name)
elif name in produce_args:
produce_params[name] = kwargs.pop(name)
if kwargs:
error = "Unexpected hyperparameters '{}'".format(', '.join(kwargs.keys()))
raise TypeError(error)
return init_params, fit_params, produce_params | python | def _extract_params(self, kwargs, hyperparameters):
"""Extract init, fit and produce params from kwargs.
The `init_params`, `fit_params` and `produce_params` are extracted
from the passed `kwargs` taking the metadata hyperparameters as a
reference.
During this extraction, make sure that all the required hyperparameters
have been given and that nothing unexpected exists in the input.
Args:
kwargs (dict): dict containing the Keyword arguments that have
been passed to the `__init__` method upon
initialization.
hyperparameters (dict): hyperparameters dictionary, as found in
the JSON annotation.
Raises:
TypeError: A `TypeError` is raised if a required argument is not
found in the `kwargs` dict, or if an unexpected
argument has been given.
"""
init_params = dict()
fit_params = dict()
produce_params = dict()
for name, param in hyperparameters.get('fixed', dict()).items():
if name in kwargs:
value = kwargs.pop(name)
elif 'default' in param:
value = param['default']
else:
raise TypeError("{} required argument '{}' not found".format(self.name, name))
init_params[name] = value
for name, param in hyperparameters.get('tunable', dict()).items():
if name in kwargs:
init_params[name] = kwargs.pop(name)
fit_args = [arg['name'] for arg in self.fit_args]
produce_args = [arg['name'] for arg in self.produce_args]
for name in list(kwargs.keys()):
if name in fit_args:
fit_params[name] = kwargs.pop(name)
elif name in produce_args:
produce_params[name] = kwargs.pop(name)
if kwargs:
error = "Unexpected hyperparameters '{}'".format(', '.join(kwargs.keys()))
raise TypeError(error)
return init_params, fit_params, produce_params | [
"def",
"_extract_params",
"(",
"self",
",",
"kwargs",
",",
"hyperparameters",
")",
":",
"init_params",
"=",
"dict",
"(",
")",
"fit_params",
"=",
"dict",
"(",
")",
"produce_params",
"=",
"dict",
"(",
")",
"for",
"name",
",",
"param",
"in",
"hyperparameters"... | Extract init, fit and produce params from kwargs.
The `init_params`, `fit_params` and `produce_params` are extracted
from the passed `kwargs` taking the metadata hyperparameters as a
reference.
During this extraction, make sure that all the required hyperparameters
have been given and that nothing unexpected exists in the input.
Args:
kwargs (dict): dict containing the Keyword arguments that have
been passed to the `__init__` method upon
initialization.
hyperparameters (dict): hyperparameters dictionary, as found in
the JSON annotation.
Raises:
TypeError: A `TypeError` is raised if a required argument is not
found in the `kwargs` dict, or if an unexpected
argument has been given. | [
"Extract",
"init",
"fit",
"and",
"produce",
"params",
"from",
"kwargs",
"."
] | e1ca77bce3c4537c0800a4c1395e1b6bbde5465d | https://github.com/HDI-Project/MLBlocks/blob/e1ca77bce3c4537c0800a4c1395e1b6bbde5465d/mlblocks/mlblock.py#L55-L111 | train | 213,771 |
HDI-Project/MLBlocks | mlblocks/mlblock.py | MLBlock.set_hyperparameters | def set_hyperparameters(self, hyperparameters):
"""Set new hyperparameters.
Only the specified hyperparameters are modified, so any other
hyperparameter keeps the value that had been previously given.
If necessary, a new instance of the primitive is created.
Args:
hyperparameters (dict): Dictionary containing as keys the name
of the hyperparameters and as values
the values to be used.
"""
self._hyperparameters.update(hyperparameters)
if self._class:
LOGGER.debug('Creating a new primitive instance for %s', self.name)
self.instance = self.primitive(**self._hyperparameters) | python | def set_hyperparameters(self, hyperparameters):
"""Set new hyperparameters.
Only the specified hyperparameters are modified, so any other
hyperparameter keeps the value that had been previously given.
If necessary, a new instance of the primitive is created.
Args:
hyperparameters (dict): Dictionary containing as keys the name
of the hyperparameters and as values
the values to be used.
"""
self._hyperparameters.update(hyperparameters)
if self._class:
LOGGER.debug('Creating a new primitive instance for %s', self.name)
self.instance = self.primitive(**self._hyperparameters) | [
"def",
"set_hyperparameters",
"(",
"self",
",",
"hyperparameters",
")",
":",
"self",
".",
"_hyperparameters",
".",
"update",
"(",
"hyperparameters",
")",
"if",
"self",
".",
"_class",
":",
"LOGGER",
".",
"debug",
"(",
"'Creating a new primitive instance for %s'",
"... | Set new hyperparameters.
Only the specified hyperparameters are modified, so any other
hyperparameter keeps the value that had been previously given.
If necessary, a new instance of the primitive is created.
Args:
hyperparameters (dict): Dictionary containing as keys the name
of the hyperparameters and as values
the values to be used. | [
"Set",
"new",
"hyperparameters",
"."
] | e1ca77bce3c4537c0800a4c1395e1b6bbde5465d | https://github.com/HDI-Project/MLBlocks/blob/e1ca77bce3c4537c0800a4c1395e1b6bbde5465d/mlblocks/mlblock.py#L204-L221 | train | 213,772 |
HDI-Project/MLBlocks | mlblocks/mlblock.py | MLBlock.fit | def fit(self, **kwargs):
"""Call the fit method of the primitive.
The given keyword arguments will be passed directly to the `fit`
method of the primitive instance specified in the JSON annotation.
If any of the arguments expected by the produce method had been
given during the MLBlock initialization, they will be passed as well.
If the fit method was not specified in the JSON annotation, or if
the primitive is a simple function, this will be a noop.
Args:
**kwargs: Any given keyword argument will be directly passed
to the primitive fit method.
Raises:
TypeError: A `TypeError` might be raised if any argument not
expected by the primitive fit method is given.
"""
if self.fit_method is not None:
fit_args = self._fit_params.copy()
fit_args.update(kwargs)
getattr(self.instance, self.fit_method)(**fit_args) | python | def fit(self, **kwargs):
"""Call the fit method of the primitive.
The given keyword arguments will be passed directly to the `fit`
method of the primitive instance specified in the JSON annotation.
If any of the arguments expected by the produce method had been
given during the MLBlock initialization, they will be passed as well.
If the fit method was not specified in the JSON annotation, or if
the primitive is a simple function, this will be a noop.
Args:
**kwargs: Any given keyword argument will be directly passed
to the primitive fit method.
Raises:
TypeError: A `TypeError` might be raised if any argument not
expected by the primitive fit method is given.
"""
if self.fit_method is not None:
fit_args = self._fit_params.copy()
fit_args.update(kwargs)
getattr(self.instance, self.fit_method)(**fit_args) | [
"def",
"fit",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"fit_method",
"is",
"not",
"None",
":",
"fit_args",
"=",
"self",
".",
"_fit_params",
".",
"copy",
"(",
")",
"fit_args",
".",
"update",
"(",
"kwargs",
")",
"getattr",
"... | Call the fit method of the primitive.
The given keyword arguments will be passed directly to the `fit`
method of the primitive instance specified in the JSON annotation.
If any of the arguments expected by the produce method had been
given during the MLBlock initialization, they will be passed as well.
If the fit method was not specified in the JSON annotation, or if
the primitive is a simple function, this will be a noop.
Args:
**kwargs: Any given keyword argument will be directly passed
to the primitive fit method.
Raises:
TypeError: A `TypeError` might be raised if any argument not
expected by the primitive fit method is given. | [
"Call",
"the",
"fit",
"method",
"of",
"the",
"primitive",
"."
] | e1ca77bce3c4537c0800a4c1395e1b6bbde5465d | https://github.com/HDI-Project/MLBlocks/blob/e1ca77bce3c4537c0800a4c1395e1b6bbde5465d/mlblocks/mlblock.py#L223-L246 | train | 213,773 |
HDI-Project/MLBlocks | mlblocks/mlblock.py | MLBlock.produce | def produce(self, **kwargs):
"""Call the primitive function, or the predict method of the primitive.
The given keyword arguments will be passed directly to the primitive,
if it is a simple function, or to the `produce` method of the
primitive instance specified in the JSON annotation, if it is a class.
If any of the arguments expected by the fit method had been given
during the MLBlock initialization, they will be passed as well.
Returns:
The output of the call to the primitive function or primitive
produce method.
"""
produce_args = self._produce_params.copy()
produce_args.update(kwargs)
if self._class:
return getattr(self.instance, self.produce_method)(**produce_args)
produce_args.update(self._hyperparameters)
return self.primitive(**produce_args) | python | def produce(self, **kwargs):
"""Call the primitive function, or the predict method of the primitive.
The given keyword arguments will be passed directly to the primitive,
if it is a simple function, or to the `produce` method of the
primitive instance specified in the JSON annotation, if it is a class.
If any of the arguments expected by the fit method had been given
during the MLBlock initialization, they will be passed as well.
Returns:
The output of the call to the primitive function or primitive
produce method.
"""
produce_args = self._produce_params.copy()
produce_args.update(kwargs)
if self._class:
return getattr(self.instance, self.produce_method)(**produce_args)
produce_args.update(self._hyperparameters)
return self.primitive(**produce_args) | [
"def",
"produce",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"produce_args",
"=",
"self",
".",
"_produce_params",
".",
"copy",
"(",
")",
"produce_args",
".",
"update",
"(",
"kwargs",
")",
"if",
"self",
".",
"_class",
":",
"return",
"getattr",
"(",... | Call the primitive function, or the predict method of the primitive.
The given keyword arguments will be passed directly to the primitive,
if it is a simple function, or to the `produce` method of the
primitive instance specified in the JSON annotation, if it is a class.
If any of the arguments expected by the fit method had been given
during the MLBlock initialization, they will be passed as well.
Returns:
The output of the call to the primitive function or primitive
produce method. | [
"Call",
"the",
"primitive",
"function",
"or",
"the",
"predict",
"method",
"of",
"the",
"primitive",
"."
] | e1ca77bce3c4537c0800a4c1395e1b6bbde5465d | https://github.com/HDI-Project/MLBlocks/blob/e1ca77bce3c4537c0800a4c1395e1b6bbde5465d/mlblocks/mlblock.py#L248-L268 | train | 213,774 |
HDI-Project/MLBlocks | mlblocks/mlpipeline.py | MLPipeline.get_hyperparameters | def get_hyperparameters(self):
"""Get the current hyperparamters of each block.
Returns:
dict:
A dictionary containing the block names as keys and
the current block hyperparameters dictionary as values.
"""
hyperparameters = {}
for block_name, block in self.blocks.items():
hyperparameters[block_name] = block.get_hyperparameters()
return hyperparameters | python | def get_hyperparameters(self):
"""Get the current hyperparamters of each block.
Returns:
dict:
A dictionary containing the block names as keys and
the current block hyperparameters dictionary as values.
"""
hyperparameters = {}
for block_name, block in self.blocks.items():
hyperparameters[block_name] = block.get_hyperparameters()
return hyperparameters | [
"def",
"get_hyperparameters",
"(",
"self",
")",
":",
"hyperparameters",
"=",
"{",
"}",
"for",
"block_name",
",",
"block",
"in",
"self",
".",
"blocks",
".",
"items",
"(",
")",
":",
"hyperparameters",
"[",
"block_name",
"]",
"=",
"block",
".",
"get_hyperpara... | Get the current hyperparamters of each block.
Returns:
dict:
A dictionary containing the block names as keys and
the current block hyperparameters dictionary as values. | [
"Get",
"the",
"current",
"hyperparamters",
"of",
"each",
"block",
"."
] | e1ca77bce3c4537c0800a4c1395e1b6bbde5465d | https://github.com/HDI-Project/MLBlocks/blob/e1ca77bce3c4537c0800a4c1395e1b6bbde5465d/mlblocks/mlpipeline.py#L117-L129 | train | 213,775 |
HDI-Project/MLBlocks | mlblocks/mlpipeline.py | MLPipeline.set_hyperparameters | def set_hyperparameters(self, hyperparameters):
"""Set new hyperparameter values for some blocks.
Args:
hyperparameters (dict): A dictionary containing the block names as
keys and the new hyperparameters dictionary
as values.
"""
for block_name, block_hyperparams in hyperparameters.items():
self.blocks[block_name].set_hyperparameters(block_hyperparams) | python | def set_hyperparameters(self, hyperparameters):
"""Set new hyperparameter values for some blocks.
Args:
hyperparameters (dict): A dictionary containing the block names as
keys and the new hyperparameters dictionary
as values.
"""
for block_name, block_hyperparams in hyperparameters.items():
self.blocks[block_name].set_hyperparameters(block_hyperparams) | [
"def",
"set_hyperparameters",
"(",
"self",
",",
"hyperparameters",
")",
":",
"for",
"block_name",
",",
"block_hyperparams",
"in",
"hyperparameters",
".",
"items",
"(",
")",
":",
"self",
".",
"blocks",
"[",
"block_name",
"]",
".",
"set_hyperparameters",
"(",
"b... | Set new hyperparameter values for some blocks.
Args:
hyperparameters (dict): A dictionary containing the block names as
keys and the new hyperparameters dictionary
as values. | [
"Set",
"new",
"hyperparameter",
"values",
"for",
"some",
"blocks",
"."
] | e1ca77bce3c4537c0800a4c1395e1b6bbde5465d | https://github.com/HDI-Project/MLBlocks/blob/e1ca77bce3c4537c0800a4c1395e1b6bbde5465d/mlblocks/mlpipeline.py#L131-L140 | train | 213,776 |
HDI-Project/MLBlocks | mlblocks/mlpipeline.py | MLPipeline.fit | def fit(self, X=None, y=None, **kwargs):
"""Fit the blocks of this pipeline.
Sequentially call the `fit` and the `produce` methods of each block,
capturing the outputs each `produce` method before calling the `fit`
method of the next one.
During the whole process a context dictionary is built, where both the
passed arguments and the captured outputs of the `produce` methods
are stored, and from which the arguments for the next `fit` and
`produce` calls will be taken.
Args:
X: Fit Data, which the pipeline will learn from.
y: Fit Data labels, which the pipeline will use to learn how to
behave.
**kwargs: Any additional keyword arguments will be directly added
to the context dictionary and available for the blocks.
"""
context = {
'X': X,
'y': y
}
context.update(kwargs)
last_block_name = list(self.blocks.keys())[-1]
for block_name, block in self.blocks.items():
LOGGER.debug("Fitting block %s", block_name)
try:
fit_args = self._get_block_args(block_name, block.fit_args, context)
block.fit(**fit_args)
except Exception:
LOGGER.exception("Exception caught fitting MLBlock %s", block_name)
raise
if block_name != last_block_name:
LOGGER.debug("Producing block %s", block_name)
try:
produce_args = self._get_block_args(block_name, block.produce_args, context)
outputs = block.produce(**produce_args)
output_dict = self._get_outputs(block_name, outputs, block.produce_output)
context.update(output_dict)
except Exception:
LOGGER.exception("Exception caught producing MLBlock %s", block_name)
raise | python | def fit(self, X=None, y=None, **kwargs):
"""Fit the blocks of this pipeline.
Sequentially call the `fit` and the `produce` methods of each block,
capturing the outputs each `produce` method before calling the `fit`
method of the next one.
During the whole process a context dictionary is built, where both the
passed arguments and the captured outputs of the `produce` methods
are stored, and from which the arguments for the next `fit` and
`produce` calls will be taken.
Args:
X: Fit Data, which the pipeline will learn from.
y: Fit Data labels, which the pipeline will use to learn how to
behave.
**kwargs: Any additional keyword arguments will be directly added
to the context dictionary and available for the blocks.
"""
context = {
'X': X,
'y': y
}
context.update(kwargs)
last_block_name = list(self.blocks.keys())[-1]
for block_name, block in self.blocks.items():
LOGGER.debug("Fitting block %s", block_name)
try:
fit_args = self._get_block_args(block_name, block.fit_args, context)
block.fit(**fit_args)
except Exception:
LOGGER.exception("Exception caught fitting MLBlock %s", block_name)
raise
if block_name != last_block_name:
LOGGER.debug("Producing block %s", block_name)
try:
produce_args = self._get_block_args(block_name, block.produce_args, context)
outputs = block.produce(**produce_args)
output_dict = self._get_outputs(block_name, outputs, block.produce_output)
context.update(output_dict)
except Exception:
LOGGER.exception("Exception caught producing MLBlock %s", block_name)
raise | [
"def",
"fit",
"(",
"self",
",",
"X",
"=",
"None",
",",
"y",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"context",
"=",
"{",
"'X'",
":",
"X",
",",
"'y'",
":",
"y",
"}",
"context",
".",
"update",
"(",
"kwargs",
")",
"last_block_name",
"=",
... | Fit the blocks of this pipeline.
Sequentially call the `fit` and the `produce` methods of each block,
capturing the outputs each `produce` method before calling the `fit`
method of the next one.
During the whole process a context dictionary is built, where both the
passed arguments and the captured outputs of the `produce` methods
are stored, and from which the arguments for the next `fit` and
`produce` calls will be taken.
Args:
X: Fit Data, which the pipeline will learn from.
y: Fit Data labels, which the pipeline will use to learn how to
behave.
**kwargs: Any additional keyword arguments will be directly added
to the context dictionary and available for the blocks. | [
"Fit",
"the",
"blocks",
"of",
"this",
"pipeline",
"."
] | e1ca77bce3c4537c0800a4c1395e1b6bbde5465d | https://github.com/HDI-Project/MLBlocks/blob/e1ca77bce3c4537c0800a4c1395e1b6bbde5465d/mlblocks/mlpipeline.py#L191-L236 | train | 213,777 |
HDI-Project/MLBlocks | mlblocks/mlpipeline.py | MLPipeline.predict | def predict(self, X=None, **kwargs):
"""Produce predictions using the blocks of this pipeline.
Sequentially call the `produce` method of each block, capturing the
outputs before calling the next one.
During the whole process a context dictionary is built, where both the
passed arguments and the captured outputs of the `produce` methods
are stored, and from which the arguments for the next `produce` calls
will be taken.
Args:
X: Data which the pipeline will use to make predictions.
**kwargs: Any additional keyword arguments will be directly added
to the context dictionary and available for the blocks.
"""
context = {
'X': X
}
context.update(kwargs)
last_block_name = list(self.blocks.keys())[-1]
for block_name, block in self.blocks.items():
LOGGER.debug("Producing block %s", block_name)
try:
produce_args = self._get_block_args(block_name, block.produce_args, context)
outputs = block.produce(**produce_args)
if block_name != last_block_name:
output_dict = self._get_outputs(block_name, outputs, block.produce_output)
context.update(output_dict)
except Exception:
LOGGER.exception("Exception caught producing MLBlock %s", block_name)
raise
return outputs | python | def predict(self, X=None, **kwargs):
"""Produce predictions using the blocks of this pipeline.
Sequentially call the `produce` method of each block, capturing the
outputs before calling the next one.
During the whole process a context dictionary is built, where both the
passed arguments and the captured outputs of the `produce` methods
are stored, and from which the arguments for the next `produce` calls
will be taken.
Args:
X: Data which the pipeline will use to make predictions.
**kwargs: Any additional keyword arguments will be directly added
to the context dictionary and available for the blocks.
"""
context = {
'X': X
}
context.update(kwargs)
last_block_name = list(self.blocks.keys())[-1]
for block_name, block in self.blocks.items():
LOGGER.debug("Producing block %s", block_name)
try:
produce_args = self._get_block_args(block_name, block.produce_args, context)
outputs = block.produce(**produce_args)
if block_name != last_block_name:
output_dict = self._get_outputs(block_name, outputs, block.produce_output)
context.update(output_dict)
except Exception:
LOGGER.exception("Exception caught producing MLBlock %s", block_name)
raise
return outputs | [
"def",
"predict",
"(",
"self",
",",
"X",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"context",
"=",
"{",
"'X'",
":",
"X",
"}",
"context",
".",
"update",
"(",
"kwargs",
")",
"last_block_name",
"=",
"list",
"(",
"self",
".",
"blocks",
".",
"ke... | Produce predictions using the blocks of this pipeline.
Sequentially call the `produce` method of each block, capturing the
outputs before calling the next one.
During the whole process a context dictionary is built, where both the
passed arguments and the captured outputs of the `produce` methods
are stored, and from which the arguments for the next `produce` calls
will be taken.
Args:
X: Data which the pipeline will use to make predictions.
**kwargs: Any additional keyword arguments will be directly added
to the context dictionary and available for the blocks. | [
"Produce",
"predictions",
"using",
"the",
"blocks",
"of",
"this",
"pipeline",
"."
] | e1ca77bce3c4537c0800a4c1395e1b6bbde5465d | https://github.com/HDI-Project/MLBlocks/blob/e1ca77bce3c4537c0800a4c1395e1b6bbde5465d/mlblocks/mlpipeline.py#L238-L274 | train | 213,778 |
HDI-Project/MLBlocks | mlblocks/mlpipeline.py | MLPipeline.to_dict | def to_dict(self):
"""Return all the details of this MLPipeline in a dict.
The dict structure contains all the `__init__` arguments of the
MLPipeline, as well as the current hyperparameter values and the
specification of the tunable_hyperparameters::
{
"primitives": [
"a_primitive",
"another_primitive"
],
"init_params": {
"a_primitive": {
"an_argument": "a_value"
}
},
"hyperparameters": {
"a_primitive#1": {
"an_argument": "a_value",
"another_argument": "another_value",
},
"another_primitive#1": {
"yet_another_argument": "yet_another_value"
}
},
"tunable_hyperparameters": {
"another_primitive#1": {
"yet_another_argument": {
"type": "str",
"default": "a_default_value",
"values": [
"a_default_value",
"yet_another_value"
]
}
}
}
}
"""
return {
'primitives': self.primitives,
'init_params': self.init_params,
'input_names': self.input_names,
'output_names': self.output_names,
'hyperparameters': self.get_hyperparameters(),
'tunable_hyperparameters': self._tunable_hyperparameters
} | python | def to_dict(self):
"""Return all the details of this MLPipeline in a dict.
The dict structure contains all the `__init__` arguments of the
MLPipeline, as well as the current hyperparameter values and the
specification of the tunable_hyperparameters::
{
"primitives": [
"a_primitive",
"another_primitive"
],
"init_params": {
"a_primitive": {
"an_argument": "a_value"
}
},
"hyperparameters": {
"a_primitive#1": {
"an_argument": "a_value",
"another_argument": "another_value",
},
"another_primitive#1": {
"yet_another_argument": "yet_another_value"
}
},
"tunable_hyperparameters": {
"another_primitive#1": {
"yet_another_argument": {
"type": "str",
"default": "a_default_value",
"values": [
"a_default_value",
"yet_another_value"
]
}
}
}
}
"""
return {
'primitives': self.primitives,
'init_params': self.init_params,
'input_names': self.input_names,
'output_names': self.output_names,
'hyperparameters': self.get_hyperparameters(),
'tunable_hyperparameters': self._tunable_hyperparameters
} | [
"def",
"to_dict",
"(",
"self",
")",
":",
"return",
"{",
"'primitives'",
":",
"self",
".",
"primitives",
",",
"'init_params'",
":",
"self",
".",
"init_params",
",",
"'input_names'",
":",
"self",
".",
"input_names",
",",
"'output_names'",
":",
"self",
".",
"... | Return all the details of this MLPipeline in a dict.
The dict structure contains all the `__init__` arguments of the
MLPipeline, as well as the current hyperparameter values and the
specification of the tunable_hyperparameters::
{
"primitives": [
"a_primitive",
"another_primitive"
],
"init_params": {
"a_primitive": {
"an_argument": "a_value"
}
},
"hyperparameters": {
"a_primitive#1": {
"an_argument": "a_value",
"another_argument": "another_value",
},
"another_primitive#1": {
"yet_another_argument": "yet_another_value"
}
},
"tunable_hyperparameters": {
"another_primitive#1": {
"yet_another_argument": {
"type": "str",
"default": "a_default_value",
"values": [
"a_default_value",
"yet_another_value"
]
}
}
}
} | [
"Return",
"all",
"the",
"details",
"of",
"this",
"MLPipeline",
"in",
"a",
"dict",
"."
] | e1ca77bce3c4537c0800a4c1395e1b6bbde5465d | https://github.com/HDI-Project/MLBlocks/blob/e1ca77bce3c4537c0800a4c1395e1b6bbde5465d/mlblocks/mlpipeline.py#L276-L323 | train | 213,779 |
HDI-Project/MLBlocks | mlblocks/mlpipeline.py | MLPipeline.save | def save(self, path):
"""Save the specification of this MLPipeline in a JSON file.
The content of the JSON file is the dict returned by the `to_dict` method.
Args:
path (str): Path to the JSON file to write.
"""
with open(path, 'w') as out_file:
json.dump(self.to_dict(), out_file, indent=4) | python | def save(self, path):
"""Save the specification of this MLPipeline in a JSON file.
The content of the JSON file is the dict returned by the `to_dict` method.
Args:
path (str): Path to the JSON file to write.
"""
with open(path, 'w') as out_file:
json.dump(self.to_dict(), out_file, indent=4) | [
"def",
"save",
"(",
"self",
",",
"path",
")",
":",
"with",
"open",
"(",
"path",
",",
"'w'",
")",
"as",
"out_file",
":",
"json",
".",
"dump",
"(",
"self",
".",
"to_dict",
"(",
")",
",",
"out_file",
",",
"indent",
"=",
"4",
")"
] | Save the specification of this MLPipeline in a JSON file.
The content of the JSON file is the dict returned by the `to_dict` method.
Args:
path (str): Path to the JSON file to write. | [
"Save",
"the",
"specification",
"of",
"this",
"MLPipeline",
"in",
"a",
"JSON",
"file",
"."
] | e1ca77bce3c4537c0800a4c1395e1b6bbde5465d | https://github.com/HDI-Project/MLBlocks/blob/e1ca77bce3c4537c0800a4c1395e1b6bbde5465d/mlblocks/mlpipeline.py#L325-L334 | train | 213,780 |
HDI-Project/MLBlocks | mlblocks/mlpipeline.py | MLPipeline.from_dict | def from_dict(cls, metadata):
"""Create a new MLPipeline from a dict specification.
The dict structure is the same as the one created by the `to_dict` method.
Args:
metadata (dict): Dictionary containing the pipeline specification.
Returns:
MLPipeline:
A new MLPipeline instance with the details found in the
given specification dictionary.
"""
hyperparameters = metadata.get('hyperparameters')
tunable = metadata.get('tunable_hyperparameters')
pipeline = cls(
metadata['primitives'],
metadata.get('init_params'),
metadata.get('input_names'),
metadata.get('output_names'),
)
if hyperparameters:
pipeline.set_hyperparameters(hyperparameters)
if tunable is not None:
pipeline._tunable_hyperparameters = tunable
return pipeline | python | def from_dict(cls, metadata):
"""Create a new MLPipeline from a dict specification.
The dict structure is the same as the one created by the `to_dict` method.
Args:
metadata (dict): Dictionary containing the pipeline specification.
Returns:
MLPipeline:
A new MLPipeline instance with the details found in the
given specification dictionary.
"""
hyperparameters = metadata.get('hyperparameters')
tunable = metadata.get('tunable_hyperparameters')
pipeline = cls(
metadata['primitives'],
metadata.get('init_params'),
metadata.get('input_names'),
metadata.get('output_names'),
)
if hyperparameters:
pipeline.set_hyperparameters(hyperparameters)
if tunable is not None:
pipeline._tunable_hyperparameters = tunable
return pipeline | [
"def",
"from_dict",
"(",
"cls",
",",
"metadata",
")",
":",
"hyperparameters",
"=",
"metadata",
".",
"get",
"(",
"'hyperparameters'",
")",
"tunable",
"=",
"metadata",
".",
"get",
"(",
"'tunable_hyperparameters'",
")",
"pipeline",
"=",
"cls",
"(",
"metadata",
... | Create a new MLPipeline from a dict specification.
The dict structure is the same as the one created by the `to_dict` method.
Args:
metadata (dict): Dictionary containing the pipeline specification.
Returns:
MLPipeline:
A new MLPipeline instance with the details found in the
given specification dictionary. | [
"Create",
"a",
"new",
"MLPipeline",
"from",
"a",
"dict",
"specification",
"."
] | e1ca77bce3c4537c0800a4c1395e1b6bbde5465d | https://github.com/HDI-Project/MLBlocks/blob/e1ca77bce3c4537c0800a4c1395e1b6bbde5465d/mlblocks/mlpipeline.py#L337-L366 | train | 213,781 |
HDI-Project/MLBlocks | mlblocks/mlpipeline.py | MLPipeline.load | def load(cls, path):
"""Create a new MLPipeline from a JSON specification.
The JSON file format is the same as the one created by the `to_dict` method.
Args:
path (str): Path of the JSON file to load.
Returns:
MLPipeline:
A new MLPipeline instance with the specification found
in the JSON file.
"""
with open(path, 'r') as in_file:
metadata = json.load(in_file)
return cls.from_dict(metadata) | python | def load(cls, path):
"""Create a new MLPipeline from a JSON specification.
The JSON file format is the same as the one created by the `to_dict` method.
Args:
path (str): Path of the JSON file to load.
Returns:
MLPipeline:
A new MLPipeline instance with the specification found
in the JSON file.
"""
with open(path, 'r') as in_file:
metadata = json.load(in_file)
return cls.from_dict(metadata) | [
"def",
"load",
"(",
"cls",
",",
"path",
")",
":",
"with",
"open",
"(",
"path",
",",
"'r'",
")",
"as",
"in_file",
":",
"metadata",
"=",
"json",
".",
"load",
"(",
"in_file",
")",
"return",
"cls",
".",
"from_dict",
"(",
"metadata",
")"
] | Create a new MLPipeline from a JSON specification.
The JSON file format is the same as the one created by the `to_dict` method.
Args:
path (str): Path of the JSON file to load.
Returns:
MLPipeline:
A new MLPipeline instance with the specification found
in the JSON file. | [
"Create",
"a",
"new",
"MLPipeline",
"from",
"a",
"JSON",
"specification",
"."
] | e1ca77bce3c4537c0800a4c1395e1b6bbde5465d | https://github.com/HDI-Project/MLBlocks/blob/e1ca77bce3c4537c0800a4c1395e1b6bbde5465d/mlblocks/mlpipeline.py#L369-L385 | train | 213,782 |
HDI-Project/MLBlocks | mlblocks/primitives.py | add_primitives_path | def add_primitives_path(path):
"""Add a new path to look for primitives.
The new path will be inserted in the first place of the list,
so any primitive found in this new folder will take precedence
over any other primitive with the same name that existed in the
system before.
Args:
path (str): path to add
Raises:
ValueError: A `ValueError` will be raised if the path is not valid.
"""
if path not in _PRIMITIVES_PATHS:
if not os.path.isdir(path):
raise ValueError('Invalid path: {}'.format(path))
LOGGER.debug('Adding new primitives path %s', path)
_PRIMITIVES_PATHS.insert(0, os.path.abspath(path)) | python | def add_primitives_path(path):
"""Add a new path to look for primitives.
The new path will be inserted in the first place of the list,
so any primitive found in this new folder will take precedence
over any other primitive with the same name that existed in the
system before.
Args:
path (str): path to add
Raises:
ValueError: A `ValueError` will be raised if the path is not valid.
"""
if path not in _PRIMITIVES_PATHS:
if not os.path.isdir(path):
raise ValueError('Invalid path: {}'.format(path))
LOGGER.debug('Adding new primitives path %s', path)
_PRIMITIVES_PATHS.insert(0, os.path.abspath(path)) | [
"def",
"add_primitives_path",
"(",
"path",
")",
":",
"if",
"path",
"not",
"in",
"_PRIMITIVES_PATHS",
":",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"path",
")",
":",
"raise",
"ValueError",
"(",
"'Invalid path: {}'",
".",
"format",
"(",
"path",
"... | Add a new path to look for primitives.
The new path will be inserted in the first place of the list,
so any primitive found in this new folder will take precedence
over any other primitive with the same name that existed in the
system before.
Args:
path (str): path to add
Raises:
ValueError: A `ValueError` will be raised if the path is not valid. | [
"Add",
"a",
"new",
"path",
"to",
"look",
"for",
"primitives",
"."
] | e1ca77bce3c4537c0800a4c1395e1b6bbde5465d | https://github.com/HDI-Project/MLBlocks/blob/e1ca77bce3c4537c0800a4c1395e1b6bbde5465d/mlblocks/primitives.py#L27-L46 | train | 213,783 |
HDI-Project/MLBlocks | mlblocks/primitives.py | get_primitives_paths | def get_primitives_paths():
"""Get the list of folders where the primitives will be looked for.
This list will include the value of any `entry_point` named `jsons_path` published under
the name `mlprimitives`.
An example of such an entry point would be::
entry_points = {
'mlprimitives': [
'jsons_path=some_module:SOME_VARIABLE'
]
}
where the module `some_module` contains a variable such as::
SOME_VARIABLE = os.path.join(os.path.dirname(__file__), 'jsons')
Returns:
list:
The list of folders.
"""
primitives_paths = list()
entry_points = pkg_resources.iter_entry_points('mlprimitives')
for entry_point in entry_points:
if entry_point.name == 'jsons_path':
path = entry_point.load()
primitives_paths.append(path)
return _PRIMITIVES_PATHS + primitives_paths | python | def get_primitives_paths():
"""Get the list of folders where the primitives will be looked for.
This list will include the value of any `entry_point` named `jsons_path` published under
the name `mlprimitives`.
An example of such an entry point would be::
entry_points = {
'mlprimitives': [
'jsons_path=some_module:SOME_VARIABLE'
]
}
where the module `some_module` contains a variable such as::
SOME_VARIABLE = os.path.join(os.path.dirname(__file__), 'jsons')
Returns:
list:
The list of folders.
"""
primitives_paths = list()
entry_points = pkg_resources.iter_entry_points('mlprimitives')
for entry_point in entry_points:
if entry_point.name == 'jsons_path':
path = entry_point.load()
primitives_paths.append(path)
return _PRIMITIVES_PATHS + primitives_paths | [
"def",
"get_primitives_paths",
"(",
")",
":",
"primitives_paths",
"=",
"list",
"(",
")",
"entry_points",
"=",
"pkg_resources",
".",
"iter_entry_points",
"(",
"'mlprimitives'",
")",
"for",
"entry_point",
"in",
"entry_points",
":",
"if",
"entry_point",
".",
"name",
... | Get the list of folders where the primitives will be looked for.
This list will include the value of any `entry_point` named `jsons_path` published under
the name `mlprimitives`.
An example of such an entry point would be::
entry_points = {
'mlprimitives': [
'jsons_path=some_module:SOME_VARIABLE'
]
}
where the module `some_module` contains a variable such as::
SOME_VARIABLE = os.path.join(os.path.dirname(__file__), 'jsons')
Returns:
list:
The list of folders. | [
"Get",
"the",
"list",
"of",
"folders",
"where",
"the",
"primitives",
"will",
"be",
"looked",
"for",
"."
] | e1ca77bce3c4537c0800a4c1395e1b6bbde5465d | https://github.com/HDI-Project/MLBlocks/blob/e1ca77bce3c4537c0800a4c1395e1b6bbde5465d/mlblocks/primitives.py#L49-L79 | train | 213,784 |
HDI-Project/MLBlocks | mlblocks/primitives.py | load_primitive | def load_primitive(name):
"""Locate and load the JSON annotation of the given primitive.
All the paths found in PRIMTIVE_PATHS will be scanned to find a JSON file
with the given name, and as soon as a JSON with the given name is found it
is returned.
Args:
name (str): name of the primitive to look for. The name should
correspond to the primitive, not to the filename, as the
`.json` extension will be added dynamically.
Returns:
dict:
The content of the JSON annotation file loaded into a dict.
Raises:
ValueError: A `ValueError` will be raised if the primitive cannot be
found.
"""
for base_path in get_primitives_paths():
parts = name.split('.')
number_of_parts = len(parts)
for folder_parts in range(number_of_parts):
folder = os.path.join(base_path, *parts[:folder_parts])
filename = '.'.join(parts[folder_parts:]) + '.json'
json_path = os.path.join(folder, filename)
if os.path.isfile(json_path):
with open(json_path, 'r') as json_file:
LOGGER.debug('Loading primitive %s from %s', name, json_path)
return json.load(json_file)
raise ValueError("Unknown primitive: {}".format(name)) | python | def load_primitive(name):
"""Locate and load the JSON annotation of the given primitive.
All the paths found in PRIMTIVE_PATHS will be scanned to find a JSON file
with the given name, and as soon as a JSON with the given name is found it
is returned.
Args:
name (str): name of the primitive to look for. The name should
correspond to the primitive, not to the filename, as the
`.json` extension will be added dynamically.
Returns:
dict:
The content of the JSON annotation file loaded into a dict.
Raises:
ValueError: A `ValueError` will be raised if the primitive cannot be
found.
"""
for base_path in get_primitives_paths():
parts = name.split('.')
number_of_parts = len(parts)
for folder_parts in range(number_of_parts):
folder = os.path.join(base_path, *parts[:folder_parts])
filename = '.'.join(parts[folder_parts:]) + '.json'
json_path = os.path.join(folder, filename)
if os.path.isfile(json_path):
with open(json_path, 'r') as json_file:
LOGGER.debug('Loading primitive %s from %s', name, json_path)
return json.load(json_file)
raise ValueError("Unknown primitive: {}".format(name)) | [
"def",
"load_primitive",
"(",
"name",
")",
":",
"for",
"base_path",
"in",
"get_primitives_paths",
"(",
")",
":",
"parts",
"=",
"name",
".",
"split",
"(",
"'.'",
")",
"number_of_parts",
"=",
"len",
"(",
"parts",
")",
"for",
"folder_parts",
"in",
"range",
... | Locate and load the JSON annotation of the given primitive.
All the paths found in PRIMTIVE_PATHS will be scanned to find a JSON file
with the given name, and as soon as a JSON with the given name is found it
is returned.
Args:
name (str): name of the primitive to look for. The name should
correspond to the primitive, not to the filename, as the
`.json` extension will be added dynamically.
Returns:
dict:
The content of the JSON annotation file loaded into a dict.
Raises:
ValueError: A `ValueError` will be raised if the primitive cannot be
found. | [
"Locate",
"and",
"load",
"the",
"JSON",
"annotation",
"of",
"the",
"given",
"primitive",
"."
] | e1ca77bce3c4537c0800a4c1395e1b6bbde5465d | https://github.com/HDI-Project/MLBlocks/blob/e1ca77bce3c4537c0800a4c1395e1b6bbde5465d/mlblocks/primitives.py#L82-L117 | train | 213,785 |
HDI-Project/MLBlocks | mlblocks/datasets.py | load_usps | def load_usps():
"""USPs Digits Dataset.
The data of this dataset is a 3d numpy array vector with shape (224, 224, 3)
containing 9298 224x224 RGB photos of handwritten digits, and the target is
a 1d numpy integer array containing the label of the digit represented in
the image.
"""
dataset_path = _load('usps')
df = _load_csv(dataset_path, 'data')
X = _load_images(os.path.join(dataset_path, 'images'), df.image)
y = df.label.values
return Dataset(load_usps.__doc__, X, y, accuracy_score, stratify=True) | python | def load_usps():
"""USPs Digits Dataset.
The data of this dataset is a 3d numpy array vector with shape (224, 224, 3)
containing 9298 224x224 RGB photos of handwritten digits, and the target is
a 1d numpy integer array containing the label of the digit represented in
the image.
"""
dataset_path = _load('usps')
df = _load_csv(dataset_path, 'data')
X = _load_images(os.path.join(dataset_path, 'images'), df.image)
y = df.label.values
return Dataset(load_usps.__doc__, X, y, accuracy_score, stratify=True) | [
"def",
"load_usps",
"(",
")",
":",
"dataset_path",
"=",
"_load",
"(",
"'usps'",
")",
"df",
"=",
"_load_csv",
"(",
"dataset_path",
",",
"'data'",
")",
"X",
"=",
"_load_images",
"(",
"os",
".",
"path",
".",
"join",
"(",
"dataset_path",
",",
"'images'",
"... | USPs Digits Dataset.
The data of this dataset is a 3d numpy array vector with shape (224, 224, 3)
containing 9298 224x224 RGB photos of handwritten digits, and the target is
a 1d numpy integer array containing the label of the digit represented in
the image. | [
"USPs",
"Digits",
"Dataset",
"."
] | e1ca77bce3c4537c0800a4c1395e1b6bbde5465d | https://github.com/HDI-Project/MLBlocks/blob/e1ca77bce3c4537c0800a4c1395e1b6bbde5465d/mlblocks/datasets.py#L237-L251 | train | 213,786 |
HDI-Project/MLBlocks | mlblocks/datasets.py | load_handgeometry | def load_handgeometry():
"""Hand Geometry Dataset.
The data of this dataset is a 3d numpy array vector with shape (224, 224, 3)
containing 112 224x224 RGB photos of hands, and the target is a 1d numpy
float array containing the width of the wrist in centimeters.
"""
dataset_path = _load('handgeometry')
df = _load_csv(dataset_path, 'data')
X = _load_images(os.path.join(dataset_path, 'images'), df.image)
y = df.target.values
return Dataset(load_handgeometry.__doc__, X, y, r2_score) | python | def load_handgeometry():
"""Hand Geometry Dataset.
The data of this dataset is a 3d numpy array vector with shape (224, 224, 3)
containing 112 224x224 RGB photos of hands, and the target is a 1d numpy
float array containing the width of the wrist in centimeters.
"""
dataset_path = _load('handgeometry')
df = _load_csv(dataset_path, 'data')
X = _load_images(os.path.join(dataset_path, 'images'), df.image)
y = df.target.values
return Dataset(load_handgeometry.__doc__, X, y, r2_score) | [
"def",
"load_handgeometry",
"(",
")",
":",
"dataset_path",
"=",
"_load",
"(",
"'handgeometry'",
")",
"df",
"=",
"_load_csv",
"(",
"dataset_path",
",",
"'data'",
")",
"X",
"=",
"_load_images",
"(",
"os",
".",
"path",
".",
"join",
"(",
"dataset_path",
",",
... | Hand Geometry Dataset.
The data of this dataset is a 3d numpy array vector with shape (224, 224, 3)
containing 112 224x224 RGB photos of hands, and the target is a 1d numpy
float array containing the width of the wrist in centimeters. | [
"Hand",
"Geometry",
"Dataset",
"."
] | e1ca77bce3c4537c0800a4c1395e1b6bbde5465d | https://github.com/HDI-Project/MLBlocks/blob/e1ca77bce3c4537c0800a4c1395e1b6bbde5465d/mlblocks/datasets.py#L254-L267 | train | 213,787 |
HDI-Project/MLBlocks | mlblocks/datasets.py | load_personae | def load_personae():
"""Personae Dataset.
The data of this dataset is a 2d numpy array vector containing 145 entries
that include texts written by Dutch users in Twitter, with some additional
information about the author, and the target is a 1d numpy binary integer
array indicating whether the author was extrovert or not.
"""
dataset_path = _load('personae')
X = _load_csv(dataset_path, 'data')
y = X.pop('label').values
return Dataset(load_personae.__doc__, X, y, accuracy_score, stratify=True) | python | def load_personae():
"""Personae Dataset.
The data of this dataset is a 2d numpy array vector containing 145 entries
that include texts written by Dutch users in Twitter, with some additional
information about the author, and the target is a 1d numpy binary integer
array indicating whether the author was extrovert or not.
"""
dataset_path = _load('personae')
X = _load_csv(dataset_path, 'data')
y = X.pop('label').values
return Dataset(load_personae.__doc__, X, y, accuracy_score, stratify=True) | [
"def",
"load_personae",
"(",
")",
":",
"dataset_path",
"=",
"_load",
"(",
"'personae'",
")",
"X",
"=",
"_load_csv",
"(",
"dataset_path",
",",
"'data'",
")",
"y",
"=",
"X",
".",
"pop",
"(",
"'label'",
")",
".",
"values",
"return",
"Dataset",
"(",
"load_... | Personae Dataset.
The data of this dataset is a 2d numpy array vector containing 145 entries
that include texts written by Dutch users in Twitter, with some additional
information about the author, and the target is a 1d numpy binary integer
array indicating whether the author was extrovert or not. | [
"Personae",
"Dataset",
"."
] | e1ca77bce3c4537c0800a4c1395e1b6bbde5465d | https://github.com/HDI-Project/MLBlocks/blob/e1ca77bce3c4537c0800a4c1395e1b6bbde5465d/mlblocks/datasets.py#L270-L283 | train | 213,788 |
HDI-Project/MLBlocks | mlblocks/datasets.py | load_umls | def load_umls():
"""UMLs Dataset.
The data consists of information about a 135 Graph and the relations between
their nodes given as a DataFrame with three columns, source, target and type,
indicating which nodes are related and with which type of link. The target is
a 1d numpy binary integer array indicating whether the indicated link exists
or not.
"""
dataset_path = _load('umls')
X = _load_csv(dataset_path, 'data')
y = X.pop('label').values
graph = nx.Graph(nx.read_gml(os.path.join(dataset_path, 'graph.gml')))
return Dataset(load_umls.__doc__, X, y, accuracy_score, stratify=True, graph=graph) | python | def load_umls():
"""UMLs Dataset.
The data consists of information about a 135 Graph and the relations between
their nodes given as a DataFrame with three columns, source, target and type,
indicating which nodes are related and with which type of link. The target is
a 1d numpy binary integer array indicating whether the indicated link exists
or not.
"""
dataset_path = _load('umls')
X = _load_csv(dataset_path, 'data')
y = X.pop('label').values
graph = nx.Graph(nx.read_gml(os.path.join(dataset_path, 'graph.gml')))
return Dataset(load_umls.__doc__, X, y, accuracy_score, stratify=True, graph=graph) | [
"def",
"load_umls",
"(",
")",
":",
"dataset_path",
"=",
"_load",
"(",
"'umls'",
")",
"X",
"=",
"_load_csv",
"(",
"dataset_path",
",",
"'data'",
")",
"y",
"=",
"X",
".",
"pop",
"(",
"'label'",
")",
".",
"values",
"graph",
"=",
"nx",
".",
"Graph",
"(... | UMLs Dataset.
The data consists of information about a 135 Graph and the relations between
their nodes given as a DataFrame with three columns, source, target and type,
indicating which nodes are related and with which type of link. The target is
a 1d numpy binary integer array indicating whether the indicated link exists
or not. | [
"UMLs",
"Dataset",
"."
] | e1ca77bce3c4537c0800a4c1395e1b6bbde5465d | https://github.com/HDI-Project/MLBlocks/blob/e1ca77bce3c4537c0800a4c1395e1b6bbde5465d/mlblocks/datasets.py#L286-L302 | train | 213,789 |
HDI-Project/MLBlocks | mlblocks/datasets.py | load_dic28 | def load_dic28():
"""DIC28 Dataset from Pajek.
This network represents connections among English words in a dictionary.
It was generated from Knuth's dictionary. Two words are connected by an
edge if we can reach one from the other by
- changing a single character (e. g., work - word)
- adding / removing a single character (e. g., ever - fever).
There exist 52,652 words (vertices in a network) having 2 up to 8 characters
in the dictionary. The obtained network has 89038 edges.
"""
dataset_path = _load('dic28')
X = _load_csv(dataset_path, 'data')
y = X.pop('label').values
graph1 = nx.Graph(nx.read_gml(os.path.join(dataset_path, 'graph1.gml')))
graph2 = nx.Graph(nx.read_gml(os.path.join(dataset_path, 'graph2.gml')))
graph = graph1.copy()
graph.add_nodes_from(graph2.nodes(data=True))
graph.add_edges_from(graph2.edges)
graph.add_edges_from(X[['graph1', 'graph2']].values)
graphs = {
'graph1': graph1,
'graph2': graph2,
}
return Dataset(load_dic28.__doc__, X, y, accuracy_score,
stratify=True, graph=graph, graphs=graphs) | python | def load_dic28():
"""DIC28 Dataset from Pajek.
This network represents connections among English words in a dictionary.
It was generated from Knuth's dictionary. Two words are connected by an
edge if we can reach one from the other by
- changing a single character (e. g., work - word)
- adding / removing a single character (e. g., ever - fever).
There exist 52,652 words (vertices in a network) having 2 up to 8 characters
in the dictionary. The obtained network has 89038 edges.
"""
dataset_path = _load('dic28')
X = _load_csv(dataset_path, 'data')
y = X.pop('label').values
graph1 = nx.Graph(nx.read_gml(os.path.join(dataset_path, 'graph1.gml')))
graph2 = nx.Graph(nx.read_gml(os.path.join(dataset_path, 'graph2.gml')))
graph = graph1.copy()
graph.add_nodes_from(graph2.nodes(data=True))
graph.add_edges_from(graph2.edges)
graph.add_edges_from(X[['graph1', 'graph2']].values)
graphs = {
'graph1': graph1,
'graph2': graph2,
}
return Dataset(load_dic28.__doc__, X, y, accuracy_score,
stratify=True, graph=graph, graphs=graphs) | [
"def",
"load_dic28",
"(",
")",
":",
"dataset_path",
"=",
"_load",
"(",
"'dic28'",
")",
"X",
"=",
"_load_csv",
"(",
"dataset_path",
",",
"'data'",
")",
"y",
"=",
"X",
".",
"pop",
"(",
"'label'",
")",
".",
"values",
"graph1",
"=",
"nx",
".",
"Graph",
... | DIC28 Dataset from Pajek.
This network represents connections among English words in a dictionary.
It was generated from Knuth's dictionary. Two words are connected by an
edge if we can reach one from the other by
- changing a single character (e. g., work - word)
- adding / removing a single character (e. g., ever - fever).
There exist 52,652 words (vertices in a network) having 2 up to 8 characters
in the dictionary. The obtained network has 89038 edges. | [
"DIC28",
"Dataset",
"from",
"Pajek",
"."
] | e1ca77bce3c4537c0800a4c1395e1b6bbde5465d | https://github.com/HDI-Project/MLBlocks/blob/e1ca77bce3c4537c0800a4c1395e1b6bbde5465d/mlblocks/datasets.py#L305-L337 | train | 213,790 |
HDI-Project/MLBlocks | mlblocks/datasets.py | load_amazon | def load_amazon():
"""Amazon product co-purchasing network and ground-truth communities.
Network was collected by crawling Amazon website. It is based on Customers Who Bought
This Item Also Bought feature of the Amazon website. If a product i is frequently
co-purchased with product j, the graph contains an undirected edge from i to j.
Each product category provided by Amazon defines each ground-truth community.
"""
dataset_path = _load('amazon')
X = _load_csv(dataset_path, 'data')
y = X.pop('label').values
graph = nx.Graph(nx.read_gml(os.path.join(dataset_path, 'graph.gml')))
return Dataset(load_amazon.__doc__, X, y, normalized_mutual_info_score, graph=graph) | python | def load_amazon():
"""Amazon product co-purchasing network and ground-truth communities.
Network was collected by crawling Amazon website. It is based on Customers Who Bought
This Item Also Bought feature of the Amazon website. If a product i is frequently
co-purchased with product j, the graph contains an undirected edge from i to j.
Each product category provided by Amazon defines each ground-truth community.
"""
dataset_path = _load('amazon')
X = _load_csv(dataset_path, 'data')
y = X.pop('label').values
graph = nx.Graph(nx.read_gml(os.path.join(dataset_path, 'graph.gml')))
return Dataset(load_amazon.__doc__, X, y, normalized_mutual_info_score, graph=graph) | [
"def",
"load_amazon",
"(",
")",
":",
"dataset_path",
"=",
"_load",
"(",
"'amazon'",
")",
"X",
"=",
"_load_csv",
"(",
"dataset_path",
",",
"'data'",
")",
"y",
"=",
"X",
".",
"pop",
"(",
"'label'",
")",
".",
"values",
"graph",
"=",
"nx",
".",
"Graph",
... | Amazon product co-purchasing network and ground-truth communities.
Network was collected by crawling Amazon website. It is based on Customers Who Bought
This Item Also Bought feature of the Amazon website. If a product i is frequently
co-purchased with product j, the graph contains an undirected edge from i to j.
Each product category provided by Amazon defines each ground-truth community. | [
"Amazon",
"product",
"co",
"-",
"purchasing",
"network",
"and",
"ground",
"-",
"truth",
"communities",
"."
] | e1ca77bce3c4537c0800a4c1395e1b6bbde5465d | https://github.com/HDI-Project/MLBlocks/blob/e1ca77bce3c4537c0800a4c1395e1b6bbde5465d/mlblocks/datasets.py#L357-L373 | train | 213,791 |
HDI-Project/MLBlocks | mlblocks/datasets.py | load_jester | def load_jester():
"""Ratings from the Jester Online Joke Recommender System.
This dataset consists of over 1.7 million instances of (user_id, item_id, rating)
triples, which is split 50-50 into train and test data.
source: "University of California Berkeley, CA"
sourceURI: "http://eigentaste.berkeley.edu/dataset/"
"""
dataset_path = _load('jester')
X = _load_csv(dataset_path, 'data')
y = X.pop('rating').values
return Dataset(load_jester.__doc__, X, y, r2_score) | python | def load_jester():
"""Ratings from the Jester Online Joke Recommender System.
This dataset consists of over 1.7 million instances of (user_id, item_id, rating)
triples, which is split 50-50 into train and test data.
source: "University of California Berkeley, CA"
sourceURI: "http://eigentaste.berkeley.edu/dataset/"
"""
dataset_path = _load('jester')
X = _load_csv(dataset_path, 'data')
y = X.pop('rating').values
return Dataset(load_jester.__doc__, X, y, r2_score) | [
"def",
"load_jester",
"(",
")",
":",
"dataset_path",
"=",
"_load",
"(",
"'jester'",
")",
"X",
"=",
"_load_csv",
"(",
"dataset_path",
",",
"'data'",
")",
"y",
"=",
"X",
".",
"pop",
"(",
"'rating'",
")",
".",
"values",
"return",
"Dataset",
"(",
"load_jes... | Ratings from the Jester Online Joke Recommender System.
This dataset consists of over 1.7 million instances of (user_id, item_id, rating)
triples, which is split 50-50 into train and test data.
source: "University of California Berkeley, CA"
sourceURI: "http://eigentaste.berkeley.edu/dataset/" | [
"Ratings",
"from",
"the",
"Jester",
"Online",
"Joke",
"Recommender",
"System",
"."
] | e1ca77bce3c4537c0800a4c1395e1b6bbde5465d | https://github.com/HDI-Project/MLBlocks/blob/e1ca77bce3c4537c0800a4c1395e1b6bbde5465d/mlblocks/datasets.py#L376-L391 | train | 213,792 |
HDI-Project/MLBlocks | mlblocks/datasets.py | load_wikiqa | def load_wikiqa():
"""A Challenge Dataset for Open-Domain Question Answering.
WikiQA dataset is a publicly available set of question and sentence (QS) pairs,
collected and annotated for research on open-domain question answering.
source: "Microsoft"
sourceURI: "https://www.microsoft.com/en-us/research/publication/wikiqa-a-challenge-dataset-for-open-domain-question-answering/#"
""" # noqa
dataset_path = _load('wikiqa')
data = _load_csv(dataset_path, 'data', set_index=True)
questions = _load_csv(dataset_path, 'questions', set_index=True)
sentences = _load_csv(dataset_path, 'sentences', set_index=True)
vocabulary = _load_csv(dataset_path, 'vocabulary', set_index=True)
entities = {
'data': (data, 'd3mIndex', None),
'questions': (questions, 'qIndex', None),
'sentences': (sentences, 'sIndex', None),
'vocabulary': (vocabulary, 'index', None)
}
relationships = [
('questions', 'qIndex', 'data', 'qIndex'),
('sentences', 'sIndex', 'data', 'sIndex')
]
target = data.pop('isAnswer').values
return Dataset(load_wikiqa.__doc__, data, target, accuracy_score, startify=True,
entities=entities, relationships=relationships) | python | def load_wikiqa():
"""A Challenge Dataset for Open-Domain Question Answering.
WikiQA dataset is a publicly available set of question and sentence (QS) pairs,
collected and annotated for research on open-domain question answering.
source: "Microsoft"
sourceURI: "https://www.microsoft.com/en-us/research/publication/wikiqa-a-challenge-dataset-for-open-domain-question-answering/#"
""" # noqa
dataset_path = _load('wikiqa')
data = _load_csv(dataset_path, 'data', set_index=True)
questions = _load_csv(dataset_path, 'questions', set_index=True)
sentences = _load_csv(dataset_path, 'sentences', set_index=True)
vocabulary = _load_csv(dataset_path, 'vocabulary', set_index=True)
entities = {
'data': (data, 'd3mIndex', None),
'questions': (questions, 'qIndex', None),
'sentences': (sentences, 'sIndex', None),
'vocabulary': (vocabulary, 'index', None)
}
relationships = [
('questions', 'qIndex', 'data', 'qIndex'),
('sentences', 'sIndex', 'data', 'sIndex')
]
target = data.pop('isAnswer').values
return Dataset(load_wikiqa.__doc__, data, target, accuracy_score, startify=True,
entities=entities, relationships=relationships) | [
"def",
"load_wikiqa",
"(",
")",
":",
"# noqa",
"dataset_path",
"=",
"_load",
"(",
"'wikiqa'",
")",
"data",
"=",
"_load_csv",
"(",
"dataset_path",
",",
"'data'",
",",
"set_index",
"=",
"True",
")",
"questions",
"=",
"_load_csv",
"(",
"dataset_path",
",",
"'... | A Challenge Dataset for Open-Domain Question Answering.
WikiQA dataset is a publicly available set of question and sentence (QS) pairs,
collected and annotated for research on open-domain question answering.
source: "Microsoft"
sourceURI: "https://www.microsoft.com/en-us/research/publication/wikiqa-a-challenge-dataset-for-open-domain-question-answering/#" | [
"A",
"Challenge",
"Dataset",
"for",
"Open",
"-",
"Domain",
"Question",
"Answering",
"."
] | e1ca77bce3c4537c0800a4c1395e1b6bbde5465d | https://github.com/HDI-Project/MLBlocks/blob/e1ca77bce3c4537c0800a4c1395e1b6bbde5465d/mlblocks/datasets.py#L394-L425 | train | 213,793 |
HDI-Project/MLBlocks | mlblocks/datasets.py | load_newsgroups | def load_newsgroups():
"""20 News Groups Dataset.
The data of this dataset is a 1d numpy array vector containing the texts
from 11314 newsgroups posts, and the target is a 1d numpy integer array
containing the label of one of the 20 topics that they are about.
"""
dataset = datasets.fetch_20newsgroups()
return Dataset(load_newsgroups.__doc__, np.array(dataset.data), dataset.target,
accuracy_score, stratify=True) | python | def load_newsgroups():
"""20 News Groups Dataset.
The data of this dataset is a 1d numpy array vector containing the texts
from 11314 newsgroups posts, and the target is a 1d numpy integer array
containing the label of one of the 20 topics that they are about.
"""
dataset = datasets.fetch_20newsgroups()
return Dataset(load_newsgroups.__doc__, np.array(dataset.data), dataset.target,
accuracy_score, stratify=True) | [
"def",
"load_newsgroups",
"(",
")",
":",
"dataset",
"=",
"datasets",
".",
"fetch_20newsgroups",
"(",
")",
"return",
"Dataset",
"(",
"load_newsgroups",
".",
"__doc__",
",",
"np",
".",
"array",
"(",
"dataset",
".",
"data",
")",
",",
"dataset",
".",
"target",... | 20 News Groups Dataset.
The data of this dataset is a 1d numpy array vector containing the texts
from 11314 newsgroups posts, and the target is a 1d numpy integer array
containing the label of one of the 20 topics that they are about. | [
"20",
"News",
"Groups",
"Dataset",
"."
] | e1ca77bce3c4537c0800a4c1395e1b6bbde5465d | https://github.com/HDI-Project/MLBlocks/blob/e1ca77bce3c4537c0800a4c1395e1b6bbde5465d/mlblocks/datasets.py#L428-L437 | train | 213,794 |
HDI-Project/MLBlocks | mlblocks/datasets.py | load_iris | def load_iris():
"""Iris Dataset."""
dataset = datasets.load_iris()
return Dataset(load_iris.__doc__, dataset.data, dataset.target,
accuracy_score, stratify=True) | python | def load_iris():
"""Iris Dataset."""
dataset = datasets.load_iris()
return Dataset(load_iris.__doc__, dataset.data, dataset.target,
accuracy_score, stratify=True) | [
"def",
"load_iris",
"(",
")",
":",
"dataset",
"=",
"datasets",
".",
"load_iris",
"(",
")",
"return",
"Dataset",
"(",
"load_iris",
".",
"__doc__",
",",
"dataset",
".",
"data",
",",
"dataset",
".",
"target",
",",
"accuracy_score",
",",
"stratify",
"=",
"Tr... | Iris Dataset. | [
"Iris",
"Dataset",
"."
] | e1ca77bce3c4537c0800a4c1395e1b6bbde5465d | https://github.com/HDI-Project/MLBlocks/blob/e1ca77bce3c4537c0800a4c1395e1b6bbde5465d/mlblocks/datasets.py#L440-L444 | train | 213,795 |
HDI-Project/MLBlocks | mlblocks/datasets.py | load_boston | def load_boston():
"""Boston House Prices Dataset."""
dataset = datasets.load_boston()
return Dataset(load_boston.__doc__, dataset.data, dataset.target, r2_score) | python | def load_boston():
"""Boston House Prices Dataset."""
dataset = datasets.load_boston()
return Dataset(load_boston.__doc__, dataset.data, dataset.target, r2_score) | [
"def",
"load_boston",
"(",
")",
":",
"dataset",
"=",
"datasets",
".",
"load_boston",
"(",
")",
"return",
"Dataset",
"(",
"load_boston",
".",
"__doc__",
",",
"dataset",
".",
"data",
",",
"dataset",
".",
"target",
",",
"r2_score",
")"
] | Boston House Prices Dataset. | [
"Boston",
"House",
"Prices",
"Dataset",
"."
] | e1ca77bce3c4537c0800a4c1395e1b6bbde5465d | https://github.com/HDI-Project/MLBlocks/blob/e1ca77bce3c4537c0800a4c1395e1b6bbde5465d/mlblocks/datasets.py#L447-L450 | train | 213,796 |
HDI-Project/MLBlocks | mlblocks/datasets.py | Dataset.get_splits | def get_splits(self, n_splits=1):
"""Return splits of this dataset ready for Cross Validation.
If n_splits is 1, a tuple containing the X for train and test
and the y for train and test is returned.
Otherwise, if n_splits is bigger than 1, a list of such tuples
is returned, one for each split.
Args:
n_splits (int): Number of times that the data needs to be splitted.
Returns:
tuple or list:
if n_splits is 1, a tuple containing the X for train and test
and the y for train and test is returned.
Otherwise, if n_splits is bigger than 1, a list of such tuples
is returned, one for each split.
"""
if n_splits == 1:
stratify = self.target if self._stratify else None
return train_test_split(
self.data,
self.target,
shuffle=self._shuffle,
stratify=stratify
)
else:
cv_class = StratifiedKFold if self._stratify else KFold
cv = cv_class(n_splits=n_splits, shuffle=self._shuffle)
splits = list()
for train, test in cv.split(self.data, self.target):
X_train = self._get_split(self.data, train)
y_train = self._get_split(self.target, train)
X_test = self._get_split(self.data, test)
y_test = self._get_split(self.target, test)
splits.append((X_train, X_test, y_train, y_test))
return splits | python | def get_splits(self, n_splits=1):
"""Return splits of this dataset ready for Cross Validation.
If n_splits is 1, a tuple containing the X for train and test
and the y for train and test is returned.
Otherwise, if n_splits is bigger than 1, a list of such tuples
is returned, one for each split.
Args:
n_splits (int): Number of times that the data needs to be splitted.
Returns:
tuple or list:
if n_splits is 1, a tuple containing the X for train and test
and the y for train and test is returned.
Otherwise, if n_splits is bigger than 1, a list of such tuples
is returned, one for each split.
"""
if n_splits == 1:
stratify = self.target if self._stratify else None
return train_test_split(
self.data,
self.target,
shuffle=self._shuffle,
stratify=stratify
)
else:
cv_class = StratifiedKFold if self._stratify else KFold
cv = cv_class(n_splits=n_splits, shuffle=self._shuffle)
splits = list()
for train, test in cv.split(self.data, self.target):
X_train = self._get_split(self.data, train)
y_train = self._get_split(self.target, train)
X_test = self._get_split(self.data, test)
y_test = self._get_split(self.target, test)
splits.append((X_train, X_test, y_train, y_test))
return splits | [
"def",
"get_splits",
"(",
"self",
",",
"n_splits",
"=",
"1",
")",
":",
"if",
"n_splits",
"==",
"1",
":",
"stratify",
"=",
"self",
".",
"target",
"if",
"self",
".",
"_stratify",
"else",
"None",
"return",
"train_test_split",
"(",
"self",
".",
"data",
","... | Return splits of this dataset ready for Cross Validation.
If n_splits is 1, a tuple containing the X for train and test
and the y for train and test is returned.
Otherwise, if n_splits is bigger than 1, a list of such tuples
is returned, one for each split.
Args:
n_splits (int): Number of times that the data needs to be splitted.
Returns:
tuple or list:
if n_splits is 1, a tuple containing the X for train and test
and the y for train and test is returned.
Otherwise, if n_splits is bigger than 1, a list of such tuples
is returned, one for each split. | [
"Return",
"splits",
"of",
"this",
"dataset",
"ready",
"for",
"Cross",
"Validation",
"."
] | e1ca77bce3c4537c0800a4c1395e1b6bbde5465d | https://github.com/HDI-Project/MLBlocks/blob/e1ca77bce3c4537c0800a4c1395e1b6bbde5465d/mlblocks/datasets.py#L144-L184 | train | 213,797 |
scoutapp/scout_apm_python | src/scout_apm/api/context.py | Context.add | def add(key, value):
"""Adds context to the currently executing request.
:key: Any String identifying the request context.
Example: "user_ip", "plan", "alert_count"
:value: Any json-serializable type.
Example: "1.1.1.1", "free", 100
:returns: nothing.
"""
tr = TrackedRequest.instance()
tr.tag(key, value) | python | def add(key, value):
"""Adds context to the currently executing request.
:key: Any String identifying the request context.
Example: "user_ip", "plan", "alert_count"
:value: Any json-serializable type.
Example: "1.1.1.1", "free", 100
:returns: nothing.
"""
tr = TrackedRequest.instance()
tr.tag(key, value) | [
"def",
"add",
"(",
"key",
",",
"value",
")",
":",
"tr",
"=",
"TrackedRequest",
".",
"instance",
"(",
")",
"tr",
".",
"tag",
"(",
"key",
",",
"value",
")"
] | Adds context to the currently executing request.
:key: Any String identifying the request context.
Example: "user_ip", "plan", "alert_count"
:value: Any json-serializable type.
Example: "1.1.1.1", "free", 100
:returns: nothing. | [
"Adds",
"context",
"to",
"the",
"currently",
"executing",
"request",
"."
] | e5539ee23b8129be9b75d5007c88b6158b51294f | https://github.com/scoutapp/scout_apm_python/blob/e5539ee23b8129be9b75d5007c88b6158b51294f/src/scout_apm/api/context.py#L8-L18 | train | 213,798 |
scoutapp/scout_apm_python | src/scout_apm/core/platform_detection.py | PlatformDetection.libc | def libc(cls):
"""
Alpine linux uses a non glibc version of the standard library, it uses
the stripped down musl instead. The core agent can be built against it,
but which one is running must be detected. Shelling out to `ldd`
appears to be the most reliable way to do this.
"""
try:
output = subprocess.check_output(
["ldd", "--version"], stderr=subprocess.STDOUT
)
except (OSError, subprocess.CalledProcessError):
return "gnu"
else:
if b"musl" in output:
return "musl"
else:
return "gnu" | python | def libc(cls):
"""
Alpine linux uses a non glibc version of the standard library, it uses
the stripped down musl instead. The core agent can be built against it,
but which one is running must be detected. Shelling out to `ldd`
appears to be the most reliable way to do this.
"""
try:
output = subprocess.check_output(
["ldd", "--version"], stderr=subprocess.STDOUT
)
except (OSError, subprocess.CalledProcessError):
return "gnu"
else:
if b"musl" in output:
return "musl"
else:
return "gnu" | [
"def",
"libc",
"(",
"cls",
")",
":",
"try",
":",
"output",
"=",
"subprocess",
".",
"check_output",
"(",
"[",
"\"ldd\"",
",",
"\"--version\"",
"]",
",",
"stderr",
"=",
"subprocess",
".",
"STDOUT",
")",
"except",
"(",
"OSError",
",",
"subprocess",
".",
"... | Alpine linux uses a non glibc version of the standard library, it uses
the stripped down musl instead. The core agent can be built against it,
but which one is running must be detected. Shelling out to `ldd`
appears to be the most reliable way to do this. | [
"Alpine",
"linux",
"uses",
"a",
"non",
"glibc",
"version",
"of",
"the",
"standard",
"library",
"it",
"uses",
"the",
"stripped",
"down",
"musl",
"instead",
".",
"The",
"core",
"agent",
"can",
"be",
"built",
"against",
"it",
"but",
"which",
"one",
"is",
"r... | e5539ee23b8129be9b75d5007c88b6158b51294f | https://github.com/scoutapp/scout_apm_python/blob/e5539ee23b8129be9b75d5007c88b6158b51294f/src/scout_apm/core/platform_detection.py#L45-L62 | train | 213,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.