repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1 value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1 value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
census-instrumentation/opencensus-python | contrib/opencensus-ext-flask/opencensus/ext/flask/flask_middleware.py | FlaskMiddleware._after_request | def _after_request(self, response):
"""A function to be run after each request.
See: http://flask.pocoo.org/docs/0.12/api/#flask.Flask.after_request
"""
# Do not trace if the url is blacklisted
if utils.disable_tracing_url(flask.request.url, self.blacklist_paths):
return response
try:
tracer = execution_context.get_opencensus_tracer()
tracer.add_attribute_to_current_span(
HTTP_STATUS_CODE,
str(response.status_code))
except Exception: # pragma: NO COVER
log.error('Failed to trace request', exc_info=True)
finally:
return response | python | def _after_request(self, response):
"""A function to be run after each request.
See: http://flask.pocoo.org/docs/0.12/api/#flask.Flask.after_request
"""
# Do not trace if the url is blacklisted
if utils.disable_tracing_url(flask.request.url, self.blacklist_paths):
return response
try:
tracer = execution_context.get_opencensus_tracer()
tracer.add_attribute_to_current_span(
HTTP_STATUS_CODE,
str(response.status_code))
except Exception: # pragma: NO COVER
log.error('Failed to trace request', exc_info=True)
finally:
return response | [
"def",
"_after_request",
"(",
"self",
",",
"response",
")",
":",
"# Do not trace if the url is blacklisted",
"if",
"utils",
".",
"disable_tracing_url",
"(",
"flask",
".",
"request",
".",
"url",
",",
"self",
".",
"blacklist_paths",
")",
":",
"return",
"response",
... | A function to be run after each request.
See: http://flask.pocoo.org/docs/0.12/api/#flask.Flask.after_request | [
"A",
"function",
"to",
"be",
"run",
"after",
"each",
"request",
"."
] | 992b223f7e34c5dcb65922b7d5c827e7a1351e7d | https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/contrib/opencensus-ext-flask/opencensus/ext/flask/flask_middleware.py#L156-L173 | train | 221,100 |
census-instrumentation/opencensus-python | opencensus/trace/utils.py | get_func_name | def get_func_name(func):
"""Return a name which includes the module name and function name."""
func_name = getattr(func, '__name__', func.__class__.__name__)
module_name = func.__module__
if module_name is not None:
module_name = func.__module__
return '{}.{}'.format(module_name, func_name)
return func_name | python | def get_func_name(func):
"""Return a name which includes the module name and function name."""
func_name = getattr(func, '__name__', func.__class__.__name__)
module_name = func.__module__
if module_name is not None:
module_name = func.__module__
return '{}.{}'.format(module_name, func_name)
return func_name | [
"def",
"get_func_name",
"(",
"func",
")",
":",
"func_name",
"=",
"getattr",
"(",
"func",
",",
"'__name__'",
",",
"func",
".",
"__class__",
".",
"__name__",
")",
"module_name",
"=",
"func",
".",
"__module__",
"if",
"module_name",
"is",
"not",
"None",
":",
... | Return a name which includes the module name and function name. | [
"Return",
"a",
"name",
"which",
"includes",
"the",
"module",
"name",
"and",
"function",
"name",
"."
] | 992b223f7e34c5dcb65922b7d5c827e7a1351e7d | https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/opencensus/trace/utils.py#L30-L39 | train | 221,101 |
census-instrumentation/opencensus-python | opencensus/trace/utils.py | disable_tracing_url | def disable_tracing_url(url, blacklist_paths=None):
"""Disable tracing on the provided blacklist paths, by default not tracing
the health check request.
If the url path starts with the blacklisted path, return True.
:type blacklist_paths: list
:param blacklist_paths: Paths that not tracing.
:rtype: bool
:returns: True if not tracing, False if tracing.
"""
if blacklist_paths is None:
blacklist_paths = DEFAULT_BLACKLIST_PATHS
# Remove the 'https?|ftp://' if exists
url = re.sub(URL_PATTERN, '', url)
# Split the url by the first '/' and get the path part
url_path = url.split('/', 1)[1]
for path in blacklist_paths:
if url_path.startswith(path):
return True
return False | python | def disable_tracing_url(url, blacklist_paths=None):
"""Disable tracing on the provided blacklist paths, by default not tracing
the health check request.
If the url path starts with the blacklisted path, return True.
:type blacklist_paths: list
:param blacklist_paths: Paths that not tracing.
:rtype: bool
:returns: True if not tracing, False if tracing.
"""
if blacklist_paths is None:
blacklist_paths = DEFAULT_BLACKLIST_PATHS
# Remove the 'https?|ftp://' if exists
url = re.sub(URL_PATTERN, '', url)
# Split the url by the first '/' and get the path part
url_path = url.split('/', 1)[1]
for path in blacklist_paths:
if url_path.startswith(path):
return True
return False | [
"def",
"disable_tracing_url",
"(",
"url",
",",
"blacklist_paths",
"=",
"None",
")",
":",
"if",
"blacklist_paths",
"is",
"None",
":",
"blacklist_paths",
"=",
"DEFAULT_BLACKLIST_PATHS",
"# Remove the 'https?|ftp://' if exists",
"url",
"=",
"re",
".",
"sub",
"(",
"URL_... | Disable tracing on the provided blacklist paths, by default not tracing
the health check request.
If the url path starts with the blacklisted path, return True.
:type blacklist_paths: list
:param blacklist_paths: Paths that not tracing.
:rtype: bool
:returns: True if not tracing, False if tracing. | [
"Disable",
"tracing",
"on",
"the",
"provided",
"blacklist",
"paths",
"by",
"default",
"not",
"tracing",
"the",
"health",
"check",
"request",
"."
] | 992b223f7e34c5dcb65922b7d5c827e7a1351e7d | https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/opencensus/trace/utils.py#L42-L67 | train | 221,102 |
census-instrumentation/opencensus-python | opencensus/trace/utils.py | disable_tracing_hostname | def disable_tracing_hostname(url, blacklist_hostnames=None):
"""Disable tracing for the provided blacklist URLs, by default not tracing
the exporter url.
If the url path starts with the blacklisted path, return True.
:type blacklist_hostnames: list
:param blacklist_hostnames: URL that not tracing.
:rtype: bool
:returns: True if not tracing, False if tracing.
"""
if blacklist_hostnames is None:
# Exporter host_name are not traced by default
_tracer = execution_context.get_opencensus_tracer()
try:
blacklist_hostnames = [
'{}:{}'.format(
_tracer.exporter.host_name,
_tracer.exporter.port
)
]
except(AttributeError):
blacklist_hostnames = []
return url in blacklist_hostnames | python | def disable_tracing_hostname(url, blacklist_hostnames=None):
"""Disable tracing for the provided blacklist URLs, by default not tracing
the exporter url.
If the url path starts with the blacklisted path, return True.
:type blacklist_hostnames: list
:param blacklist_hostnames: URL that not tracing.
:rtype: bool
:returns: True if not tracing, False if tracing.
"""
if blacklist_hostnames is None:
# Exporter host_name are not traced by default
_tracer = execution_context.get_opencensus_tracer()
try:
blacklist_hostnames = [
'{}:{}'.format(
_tracer.exporter.host_name,
_tracer.exporter.port
)
]
except(AttributeError):
blacklist_hostnames = []
return url in blacklist_hostnames | [
"def",
"disable_tracing_hostname",
"(",
"url",
",",
"blacklist_hostnames",
"=",
"None",
")",
":",
"if",
"blacklist_hostnames",
"is",
"None",
":",
"# Exporter host_name are not traced by default",
"_tracer",
"=",
"execution_context",
".",
"get_opencensus_tracer",
"(",
")",... | Disable tracing for the provided blacklist URLs, by default not tracing
the exporter url.
If the url path starts with the blacklisted path, return True.
:type blacklist_hostnames: list
:param blacklist_hostnames: URL that not tracing.
:rtype: bool
:returns: True if not tracing, False if tracing. | [
"Disable",
"tracing",
"for",
"the",
"provided",
"blacklist",
"URLs",
"by",
"default",
"not",
"tracing",
"the",
"exporter",
"url",
"."
] | 992b223f7e34c5dcb65922b7d5c827e7a1351e7d | https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/opencensus/trace/utils.py#L70-L95 | train | 221,103 |
census-instrumentation/opencensus-python | opencensus/trace/trace_options.py | TraceOptions.set_enabled | def set_enabled(self, enabled):
"""Update the last bit of the trace options byte str.
:type enabled: bool
:param enabled: Whether enable tracing in this span context or not.
"""
enabled_bit = '1' if enabled else '0'
self.trace_options_byte = str(
self.trace_options_byte)[:-1] + enabled_bit
self.enabled = self.get_enabled | python | def set_enabled(self, enabled):
"""Update the last bit of the trace options byte str.
:type enabled: bool
:param enabled: Whether enable tracing in this span context or not.
"""
enabled_bit = '1' if enabled else '0'
self.trace_options_byte = str(
self.trace_options_byte)[:-1] + enabled_bit
self.enabled = self.get_enabled | [
"def",
"set_enabled",
"(",
"self",
",",
"enabled",
")",
":",
"enabled_bit",
"=",
"'1'",
"if",
"enabled",
"else",
"'0'",
"self",
".",
"trace_options_byte",
"=",
"str",
"(",
"self",
".",
"trace_options_byte",
")",
"[",
":",
"-",
"1",
"]",
"+",
"enabled_bit... | Update the last bit of the trace options byte str.
:type enabled: bool
:param enabled: Whether enable tracing in this span context or not. | [
"Update",
"the",
"last",
"bit",
"of",
"the",
"trace",
"options",
"byte",
"str",
"."
] | 992b223f7e34c5dcb65922b7d5c827e7a1351e7d | https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/opencensus/trace/trace_options.py#L70-L79 | train | 221,104 |
census-instrumentation/opencensus-python | opencensus/common/monitored_resource/k8s_utils.py | get_k8s_metadata | def get_k8s_metadata():
"""Get kubernetes container metadata, as on GCP GKE."""
k8s_metadata = {}
gcp_cluster = (gcp_metadata_config.GcpMetadataConfig
.get_attribute(gcp_metadata_config.CLUSTER_NAME_KEY))
if gcp_cluster is not None:
k8s_metadata[CLUSTER_NAME_KEY] = gcp_cluster
for attribute_key, attribute_env in _K8S_ENV_ATTRIBUTES.items():
attribute_value = os.environ.get(attribute_env)
if attribute_value is not None:
k8s_metadata[attribute_key] = attribute_value
return k8s_metadata | python | def get_k8s_metadata():
"""Get kubernetes container metadata, as on GCP GKE."""
k8s_metadata = {}
gcp_cluster = (gcp_metadata_config.GcpMetadataConfig
.get_attribute(gcp_metadata_config.CLUSTER_NAME_KEY))
if gcp_cluster is not None:
k8s_metadata[CLUSTER_NAME_KEY] = gcp_cluster
for attribute_key, attribute_env in _K8S_ENV_ATTRIBUTES.items():
attribute_value = os.environ.get(attribute_env)
if attribute_value is not None:
k8s_metadata[attribute_key] = attribute_value
return k8s_metadata | [
"def",
"get_k8s_metadata",
"(",
")",
":",
"k8s_metadata",
"=",
"{",
"}",
"gcp_cluster",
"=",
"(",
"gcp_metadata_config",
".",
"GcpMetadataConfig",
".",
"get_attribute",
"(",
"gcp_metadata_config",
".",
"CLUSTER_NAME_KEY",
")",
")",
"if",
"gcp_cluster",
"is",
"not"... | Get kubernetes container metadata, as on GCP GKE. | [
"Get",
"kubernetes",
"container",
"metadata",
"as",
"on",
"GCP",
"GKE",
"."
] | 992b223f7e34c5dcb65922b7d5c827e7a1351e7d | https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/opencensus/common/monitored_resource/k8s_utils.py#L50-L64 | train | 221,105 |
census-instrumentation/opencensus-python | opencensus/tags/tag_map.py | TagMap.insert | def insert(self, key, value):
"""Inserts a key and value in the map if the map does not already
contain the key.
:type key: :class: '~opencensus.tags.tag_key.TagKey'
:param key: a tag key to insert into the map
:type value: :class: '~opencensus.tags.tag_value.TagValue'
:param value: a tag value that is associated with the tag key and
the value to insert into the tag map
"""
if key in self.map:
return
try:
tag_key = TagKey(key)
tag_val = TagValue(value)
self.map[tag_key] = tag_val
except ValueError:
raise | python | def insert(self, key, value):
"""Inserts a key and value in the map if the map does not already
contain the key.
:type key: :class: '~opencensus.tags.tag_key.TagKey'
:param key: a tag key to insert into the map
:type value: :class: '~opencensus.tags.tag_value.TagValue'
:param value: a tag value that is associated with the tag key and
the value to insert into the tag map
"""
if key in self.map:
return
try:
tag_key = TagKey(key)
tag_val = TagValue(value)
self.map[tag_key] = tag_val
except ValueError:
raise | [
"def",
"insert",
"(",
"self",
",",
"key",
",",
"value",
")",
":",
"if",
"key",
"in",
"self",
".",
"map",
":",
"return",
"try",
":",
"tag_key",
"=",
"TagKey",
"(",
"key",
")",
"tag_val",
"=",
"TagValue",
"(",
"value",
")",
"self",
".",
"map",
"[",... | Inserts a key and value in the map if the map does not already
contain the key.
:type key: :class: '~opencensus.tags.tag_key.TagKey'
:param key: a tag key to insert into the map
:type value: :class: '~opencensus.tags.tag_value.TagValue'
:param value: a tag value that is associated with the tag key and
the value to insert into the tag map | [
"Inserts",
"a",
"key",
"and",
"value",
"in",
"the",
"map",
"if",
"the",
"map",
"does",
"not",
"already",
"contain",
"the",
"key",
"."
] | 992b223f7e34c5dcb65922b7d5c827e7a1351e7d | https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/opencensus/tags/tag_map.py#L35-L55 | train | 221,106 |
census-instrumentation/opencensus-python | opencensus/tags/tag_map.py | TagMap.update | def update(self, key, value):
"""Updates the map by updating the value of a key
:type key: :class: '~opencensus.tags.tag_key.TagKey'
:param key: A tag key to be updated
:type value: :class: '~opencensus.tags.tag_value.TagValue'
:param value: The value to update the key to in the map
"""
if key in self.map:
self.map[key] = value | python | def update(self, key, value):
"""Updates the map by updating the value of a key
:type key: :class: '~opencensus.tags.tag_key.TagKey'
:param key: A tag key to be updated
:type value: :class: '~opencensus.tags.tag_value.TagValue'
:param value: The value to update the key to in the map
"""
if key in self.map:
self.map[key] = value | [
"def",
"update",
"(",
"self",
",",
"key",
",",
"value",
")",
":",
"if",
"key",
"in",
"self",
".",
"map",
":",
"self",
".",
"map",
"[",
"key",
"]",
"=",
"value"
] | Updates the map by updating the value of a key
:type key: :class: '~opencensus.tags.tag_key.TagKey'
:param key: A tag key to be updated
:type value: :class: '~opencensus.tags.tag_value.TagValue'
:param value: The value to update the key to in the map | [
"Updates",
"the",
"map",
"by",
"updating",
"the",
"value",
"of",
"a",
"key"
] | 992b223f7e34c5dcb65922b7d5c827e7a1351e7d | https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/opencensus/tags/tag_map.py#L68-L79 | train | 221,107 |
census-instrumentation/opencensus-python | contrib/opencensus-ext-threading/opencensus/ext/threading/trace.py | trace_integration | def trace_integration(tracer=None):
"""Wrap threading functions to trace."""
log.info("Integrated module: {}".format(MODULE_NAME))
# Wrap the threading start function
start_func = getattr(threading.Thread, "start")
setattr(
threading.Thread, start_func.__name__, wrap_threading_start(start_func)
)
# Wrap the threading run function
run_func = getattr(threading.Thread, "run")
setattr(threading.Thread, run_func.__name__, wrap_threading_run(run_func))
# Wrap the threading run function
apply_async_func = getattr(pool.Pool, "apply_async")
setattr(
pool.Pool,
apply_async_func.__name__,
wrap_apply_async(apply_async_func),
)
# Wrap the threading run function
submit_func = getattr(futures.ThreadPoolExecutor, "submit")
setattr(
futures.ThreadPoolExecutor,
submit_func.__name__,
wrap_submit(submit_func),
) | python | def trace_integration(tracer=None):
"""Wrap threading functions to trace."""
log.info("Integrated module: {}".format(MODULE_NAME))
# Wrap the threading start function
start_func = getattr(threading.Thread, "start")
setattr(
threading.Thread, start_func.__name__, wrap_threading_start(start_func)
)
# Wrap the threading run function
run_func = getattr(threading.Thread, "run")
setattr(threading.Thread, run_func.__name__, wrap_threading_run(run_func))
# Wrap the threading run function
apply_async_func = getattr(pool.Pool, "apply_async")
setattr(
pool.Pool,
apply_async_func.__name__,
wrap_apply_async(apply_async_func),
)
# Wrap the threading run function
submit_func = getattr(futures.ThreadPoolExecutor, "submit")
setattr(
futures.ThreadPoolExecutor,
submit_func.__name__,
wrap_submit(submit_func),
) | [
"def",
"trace_integration",
"(",
"tracer",
"=",
"None",
")",
":",
"log",
".",
"info",
"(",
"\"Integrated module: {}\"",
".",
"format",
"(",
"MODULE_NAME",
")",
")",
"# Wrap the threading start function",
"start_func",
"=",
"getattr",
"(",
"threading",
".",
"Thread... | Wrap threading functions to trace. | [
"Wrap",
"threading",
"functions",
"to",
"trace",
"."
] | 992b223f7e34c5dcb65922b7d5c827e7a1351e7d | https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/contrib/opencensus-ext-threading/opencensus/ext/threading/trace.py#L29-L56 | train | 221,108 |
census-instrumentation/opencensus-python | contrib/opencensus-ext-threading/opencensus/ext/threading/trace.py | wrap_threading_start | def wrap_threading_start(start_func):
"""Wrap the start function from thread. Put the tracer informations in the
threading object.
"""
def call(self):
self._opencensus_context = (
execution_context.get_opencensus_full_context()
)
return start_func(self)
return call | python | def wrap_threading_start(start_func):
"""Wrap the start function from thread. Put the tracer informations in the
threading object.
"""
def call(self):
self._opencensus_context = (
execution_context.get_opencensus_full_context()
)
return start_func(self)
return call | [
"def",
"wrap_threading_start",
"(",
"start_func",
")",
":",
"def",
"call",
"(",
"self",
")",
":",
"self",
".",
"_opencensus_context",
"=",
"(",
"execution_context",
".",
"get_opencensus_full_context",
"(",
")",
")",
"return",
"start_func",
"(",
"self",
")",
"r... | Wrap the start function from thread. Put the tracer informations in the
threading object. | [
"Wrap",
"the",
"start",
"function",
"from",
"thread",
".",
"Put",
"the",
"tracer",
"informations",
"in",
"the",
"threading",
"object",
"."
] | 992b223f7e34c5dcb65922b7d5c827e7a1351e7d | https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/contrib/opencensus-ext-threading/opencensus/ext/threading/trace.py#L59-L70 | train | 221,109 |
census-instrumentation/opencensus-python | contrib/opencensus-ext-threading/opencensus/ext/threading/trace.py | wrap_threading_run | def wrap_threading_run(run_func):
"""Wrap the run function from thread. Get the tracer informations from the
threading object and set it as current tracer.
"""
def call(self):
execution_context.set_opencensus_full_context(
*self._opencensus_context
)
return run_func(self)
return call | python | def wrap_threading_run(run_func):
"""Wrap the run function from thread. Get the tracer informations from the
threading object and set it as current tracer.
"""
def call(self):
execution_context.set_opencensus_full_context(
*self._opencensus_context
)
return run_func(self)
return call | [
"def",
"wrap_threading_run",
"(",
"run_func",
")",
":",
"def",
"call",
"(",
"self",
")",
":",
"execution_context",
".",
"set_opencensus_full_context",
"(",
"*",
"self",
".",
"_opencensus_context",
")",
"return",
"run_func",
"(",
"self",
")",
"return",
"call"
] | Wrap the run function from thread. Get the tracer informations from the
threading object and set it as current tracer. | [
"Wrap",
"the",
"run",
"function",
"from",
"thread",
".",
"Get",
"the",
"tracer",
"informations",
"from",
"the",
"threading",
"object",
"and",
"set",
"it",
"as",
"current",
"tracer",
"."
] | 992b223f7e34c5dcb65922b7d5c827e7a1351e7d | https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/contrib/opencensus-ext-threading/opencensus/ext/threading/trace.py#L73-L84 | train | 221,110 |
census-instrumentation/opencensus-python | opencensus/trace/attributes.py | Attributes.format_attributes_json | def format_attributes_json(self):
"""Convert the Attributes object to json format."""
attributes_json = {}
for key, value in self.attributes.items():
key = utils.check_str_length(key)[0]
value = _format_attribute_value(value)
if value is not None:
attributes_json[key] = value
result = {
'attributeMap': attributes_json
}
return result | python | def format_attributes_json(self):
"""Convert the Attributes object to json format."""
attributes_json = {}
for key, value in self.attributes.items():
key = utils.check_str_length(key)[0]
value = _format_attribute_value(value)
if value is not None:
attributes_json[key] = value
result = {
'attributeMap': attributes_json
}
return result | [
"def",
"format_attributes_json",
"(",
"self",
")",
":",
"attributes_json",
"=",
"{",
"}",
"for",
"key",
",",
"value",
"in",
"self",
".",
"attributes",
".",
"items",
"(",
")",
":",
"key",
"=",
"utils",
".",
"check_str_length",
"(",
"key",
")",
"[",
"0",... | Convert the Attributes object to json format. | [
"Convert",
"the",
"Attributes",
"object",
"to",
"json",
"format",
"."
] | 992b223f7e34c5dcb65922b7d5c827e7a1351e7d | https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/opencensus/trace/attributes.py#L59-L74 | train | 221,111 |
census-instrumentation/opencensus-python | opencensus/stats/aggregation_data.py | DistributionAggregationData.add_sample | def add_sample(self, value, timestamp, attachments):
"""Adding a sample to Distribution Aggregation Data"""
self._count_data += 1
bucket = self.increment_bucket_count(value)
if attachments is not None and self.exemplars is not None:
self.exemplars[bucket] = Exemplar(value, timestamp, attachments)
if self.count_data == 1:
self._mean_data = value
return
old_mean = self._mean_data
self._mean_data = self._mean_data + (
(value - self._mean_data) / self._count_data)
self._sum_of_sqd_deviations = self._sum_of_sqd_deviations + (
(value - old_mean) * (value - self._mean_data)) | python | def add_sample(self, value, timestamp, attachments):
"""Adding a sample to Distribution Aggregation Data"""
self._count_data += 1
bucket = self.increment_bucket_count(value)
if attachments is not None and self.exemplars is not None:
self.exemplars[bucket] = Exemplar(value, timestamp, attachments)
if self.count_data == 1:
self._mean_data = value
return
old_mean = self._mean_data
self._mean_data = self._mean_data + (
(value - self._mean_data) / self._count_data)
self._sum_of_sqd_deviations = self._sum_of_sqd_deviations + (
(value - old_mean) * (value - self._mean_data)) | [
"def",
"add_sample",
"(",
"self",
",",
"value",
",",
"timestamp",
",",
"attachments",
")",
":",
"self",
".",
"_count_data",
"+=",
"1",
"bucket",
"=",
"self",
".",
"increment_bucket_count",
"(",
"value",
")",
"if",
"attachments",
"is",
"not",
"None",
"and",... | Adding a sample to Distribution Aggregation Data | [
"Adding",
"a",
"sample",
"to",
"Distribution",
"Aggregation",
"Data"
] | 992b223f7e34c5dcb65922b7d5c827e7a1351e7d | https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/opencensus/stats/aggregation_data.py#L250-L265 | train | 221,112 |
census-instrumentation/opencensus-python | opencensus/stats/aggregation_data.py | DistributionAggregationData.increment_bucket_count | def increment_bucket_count(self, value):
"""Increment the bucket count based on a given value from the user"""
if len(self._bounds) == 0:
self._counts_per_bucket[0] += 1
return 0
for ii, bb in enumerate(self._bounds):
if value < bb:
self._counts_per_bucket[ii] += 1
return ii
else:
last_bucket_index = len(self._bounds)
self._counts_per_bucket[last_bucket_index] += 1
return last_bucket_index | python | def increment_bucket_count(self, value):
"""Increment the bucket count based on a given value from the user"""
if len(self._bounds) == 0:
self._counts_per_bucket[0] += 1
return 0
for ii, bb in enumerate(self._bounds):
if value < bb:
self._counts_per_bucket[ii] += 1
return ii
else:
last_bucket_index = len(self._bounds)
self._counts_per_bucket[last_bucket_index] += 1
return last_bucket_index | [
"def",
"increment_bucket_count",
"(",
"self",
",",
"value",
")",
":",
"if",
"len",
"(",
"self",
".",
"_bounds",
")",
"==",
"0",
":",
"self",
".",
"_counts_per_bucket",
"[",
"0",
"]",
"+=",
"1",
"return",
"0",
"for",
"ii",
",",
"bb",
"in",
"enumerate"... | Increment the bucket count based on a given value from the user | [
"Increment",
"the",
"bucket",
"count",
"based",
"on",
"a",
"given",
"value",
"from",
"the",
"user"
] | 992b223f7e34c5dcb65922b7d5c827e7a1351e7d | https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/opencensus/stats/aggregation_data.py#L267-L280 | train | 221,113 |
fhamborg/news-please | newsplease/single_crawler.py | SingleCrawler.load_crawler | def load_crawler(self, crawler, url, ignore_regex):
"""
Loads the given crawler with the given url.
:param class crawler: class of the crawler to load
:param str url: url to start the crawler with
:param regex ignore_regex: to be able to ignore urls that match this
regex code
"""
self.process = CrawlerProcess(self.cfg.get_scrapy_options())
self.process.crawl(
crawler,
self.helper,
url=url,
config=self.cfg,
ignore_regex=ignore_regex) | python | def load_crawler(self, crawler, url, ignore_regex):
"""
Loads the given crawler with the given url.
:param class crawler: class of the crawler to load
:param str url: url to start the crawler with
:param regex ignore_regex: to be able to ignore urls that match this
regex code
"""
self.process = CrawlerProcess(self.cfg.get_scrapy_options())
self.process.crawl(
crawler,
self.helper,
url=url,
config=self.cfg,
ignore_regex=ignore_regex) | [
"def",
"load_crawler",
"(",
"self",
",",
"crawler",
",",
"url",
",",
"ignore_regex",
")",
":",
"self",
".",
"process",
"=",
"CrawlerProcess",
"(",
"self",
".",
"cfg",
".",
"get_scrapy_options",
"(",
")",
")",
"self",
".",
"process",
".",
"crawl",
"(",
... | Loads the given crawler with the given url.
:param class crawler: class of the crawler to load
:param str url: url to start the crawler with
:param regex ignore_regex: to be able to ignore urls that match this
regex code | [
"Loads",
"the",
"given",
"crawler",
"with",
"the",
"given",
"url",
"."
] | 731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9 | https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/single_crawler.py#L224-L239 | train | 221,114 |
fhamborg/news-please | newsplease/pipeline/extractor/extractors/lang_detect_extractor.py | LangExtractor._language | def _language(self, item):
"""Returns the language of the extracted article by analyzing metatags and inspecting the visible text
with langdetect"""
response = item['spider_response'].body
root = html.fromstring(response)
# Check for lang-attributes
lang = root.get('lang')
if lang is None:
lang = root.get('xml:lang')
# Check for general meta tags
if lang is None:
meta = root.cssselect('meta[name="language"]')
if len(meta) > 0:
lang = meta[0].get('content')
# Check for open graph tags
if lang is None:
meta = root.cssselect('meta[property="og:locale"]')
if len(meta) > 0:
lang = meta[0].get('content')
# Look for <article> elements and inspect the one with the largest payload with langdetect
if lang is None:
article_list = []
for article in root.xpath('//article'):
article_list.append(re.sub(r'\s+', ' ', article.text_content().strip()))
if len(article_list) > 0:
lang = detect(max(article_list))
# Analyze the whole body with langdetect
if lang is None:
try:
lang = detect(root.text_content().strip())
except LangDetectException:
pass
# Try to normalize output
if lang is not None:
# First search for suitable locale in the original output
matches = self.langcode_pattern.search(lang)
if matches is not None:
lang = matches.group(0)
else:
# If no match was found, normalize the original output and search again
normalized = locale.normalize(re.split(r'\s|;|,', lang.strip())[0])
matches = self.langcode_pattern.search(normalized)
if matches is not None:
lang = matches.group(0)
return lang | python | def _language(self, item):
"""Returns the language of the extracted article by analyzing metatags and inspecting the visible text
with langdetect"""
response = item['spider_response'].body
root = html.fromstring(response)
# Check for lang-attributes
lang = root.get('lang')
if lang is None:
lang = root.get('xml:lang')
# Check for general meta tags
if lang is None:
meta = root.cssselect('meta[name="language"]')
if len(meta) > 0:
lang = meta[0].get('content')
# Check for open graph tags
if lang is None:
meta = root.cssselect('meta[property="og:locale"]')
if len(meta) > 0:
lang = meta[0].get('content')
# Look for <article> elements and inspect the one with the largest payload with langdetect
if lang is None:
article_list = []
for article in root.xpath('//article'):
article_list.append(re.sub(r'\s+', ' ', article.text_content().strip()))
if len(article_list) > 0:
lang = detect(max(article_list))
# Analyze the whole body with langdetect
if lang is None:
try:
lang = detect(root.text_content().strip())
except LangDetectException:
pass
# Try to normalize output
if lang is not None:
# First search for suitable locale in the original output
matches = self.langcode_pattern.search(lang)
if matches is not None:
lang = matches.group(0)
else:
# If no match was found, normalize the original output and search again
normalized = locale.normalize(re.split(r'\s|;|,', lang.strip())[0])
matches = self.langcode_pattern.search(normalized)
if matches is not None:
lang = matches.group(0)
return lang | [
"def",
"_language",
"(",
"self",
",",
"item",
")",
":",
"response",
"=",
"item",
"[",
"'spider_response'",
"]",
".",
"body",
"root",
"=",
"html",
".",
"fromstring",
"(",
"response",
")",
"# Check for lang-attributes",
"lang",
"=",
"root",
".",
"get",
"(",
... | Returns the language of the extracted article by analyzing metatags and inspecting the visible text
with langdetect | [
"Returns",
"the",
"language",
"of",
"the",
"extracted",
"article",
"by",
"analyzing",
"metatags",
"and",
"inspecting",
"the",
"visible",
"text",
"with",
"langdetect"
] | 731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9 | https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/pipeline/extractor/extractors/lang_detect_extractor.py#L21-L74 | train | 221,115 |
fhamborg/news-please | newsplease/crawler/spiders/download_crawler.py | Download.parse | def parse(self, response):
"""
Passes the response to the pipeline.
:param obj response: The scrapy response
"""
if not self.helper.parse_crawler.content_type(response):
return
yield self.helper.parse_crawler.pass_to_pipeline(
response,
self.helper.url_extractor.get_allowed_domain(response.url)
) | python | def parse(self, response):
"""
Passes the response to the pipeline.
:param obj response: The scrapy response
"""
if not self.helper.parse_crawler.content_type(response):
return
yield self.helper.parse_crawler.pass_to_pipeline(
response,
self.helper.url_extractor.get_allowed_domain(response.url)
) | [
"def",
"parse",
"(",
"self",
",",
"response",
")",
":",
"if",
"not",
"self",
".",
"helper",
".",
"parse_crawler",
".",
"content_type",
"(",
"response",
")",
":",
"return",
"yield",
"self",
".",
"helper",
".",
"parse_crawler",
".",
"pass_to_pipeline",
"(",
... | Passes the response to the pipeline.
:param obj response: The scrapy response | [
"Passes",
"the",
"response",
"to",
"the",
"pipeline",
"."
] | 731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9 | https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/crawler/spiders/download_crawler.py#L28-L40 | train | 221,116 |
fhamborg/news-please | newsplease/pipeline/extractor/comparer/comparer_Language.py | ComparerLanguage.extract | def extract(self, item, list_article_candidate):
"""Compares how often any language was detected.
:param item: The corresponding NewscrawlerItem
:param list_article_candidate: A list, the list of ArticleCandidate-Objects which have been extracted
:return: A string, the language which was most frequently detected
"""
# Save extracted languages in list
languages_extracted = []
# Save the extracted language of newspaper in extra variable, because newspaper extract meta-language
# which is very accurate.
language_newspaper = None
for article_candidate in list_article_candidate:
if article_candidate.language is not None:
languages_extracted.append(article_candidate.language)
if article_candidate.extractor == "newspaper":
language_newspaper = article_candidate.language
if not languages_extracted:
return None
# Create a set of the extracted languages, so every lang appears once
languages_extracted_set = set(languages_extracted)
# Count how often every language has been extracted
languages_extracted_number = []
for language in languages_extracted_set:
languages_extracted_number.append((languages_extracted.count(language), language))
if not (languages_extracted_number):
return None
# If there is no favorite language, return the language extracted by newspaper
if max(languages_extracted_number)[0] == min(languages_extracted_number)[0] and language_newspaper is not None:
return language_newspaper
if languages_extracted_number:
return (max(languages_extracted_number))[1]
else:
return None | python | def extract(self, item, list_article_candidate):
"""Compares how often any language was detected.
:param item: The corresponding NewscrawlerItem
:param list_article_candidate: A list, the list of ArticleCandidate-Objects which have been extracted
:return: A string, the language which was most frequently detected
"""
# Save extracted languages in list
languages_extracted = []
# Save the extracted language of newspaper in extra variable, because newspaper extract meta-language
# which is very accurate.
language_newspaper = None
for article_candidate in list_article_candidate:
if article_candidate.language is not None:
languages_extracted.append(article_candidate.language)
if article_candidate.extractor == "newspaper":
language_newspaper = article_candidate.language
if not languages_extracted:
return None
# Create a set of the extracted languages, so every lang appears once
languages_extracted_set = set(languages_extracted)
# Count how often every language has been extracted
languages_extracted_number = []
for language in languages_extracted_set:
languages_extracted_number.append((languages_extracted.count(language), language))
if not (languages_extracted_number):
return None
# If there is no favorite language, return the language extracted by newspaper
if max(languages_extracted_number)[0] == min(languages_extracted_number)[0] and language_newspaper is not None:
return language_newspaper
if languages_extracted_number:
return (max(languages_extracted_number))[1]
else:
return None | [
"def",
"extract",
"(",
"self",
",",
"item",
",",
"list_article_candidate",
")",
":",
"# Save extracted languages in list",
"languages_extracted",
"=",
"[",
"]",
"# Save the extracted language of newspaper in extra variable, because newspaper extract meta-language",
"# which is very a... | Compares how often any language was detected.
:param item: The corresponding NewscrawlerItem
:param list_article_candidate: A list, the list of ArticleCandidate-Objects which have been extracted
:return: A string, the language which was most frequently detected | [
"Compares",
"how",
"often",
"any",
"language",
"was",
"detected",
"."
] | 731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9 | https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/pipeline/extractor/comparer/comparer_Language.py#L4-L49 | train | 221,117 |
fhamborg/news-please | newsplease/pipeline/extractor/comparer/comparer_date.py | ComparerDate.extract | def extract(self, item, list_article_candidate):
"""Compares the extracted publish dates.
:param item: The corresponding NewscrawlerItem
:param list_article_candidate: A list, the list of ArticleCandidate-Objects which have been extracted
:return: A string, the most likely publish date
"""
list_publish_date = []
for article_candidate in list_article_candidate:
if article_candidate.publish_date != None:
list_publish_date.append((article_candidate.publish_date, article_candidate.extractor))
# If there is no value in the list, return None.
if len(list_publish_date) == 0:
return None
# If there are more options than one, return the result from date_extractor.
list_date_extractor = [x for x in list_publish_date if x[1] == "date_extractor"]
if len(list_date_extractor) == 0:
# If there is no date extracted by date_extractor, return the first result of list_publish_date.
return list_publish_date[0][0]
else:
return list_date_extractor[0][0] | python | def extract(self, item, list_article_candidate):
"""Compares the extracted publish dates.
:param item: The corresponding NewscrawlerItem
:param list_article_candidate: A list, the list of ArticleCandidate-Objects which have been extracted
:return: A string, the most likely publish date
"""
list_publish_date = []
for article_candidate in list_article_candidate:
if article_candidate.publish_date != None:
list_publish_date.append((article_candidate.publish_date, article_candidate.extractor))
# If there is no value in the list, return None.
if len(list_publish_date) == 0:
return None
# If there are more options than one, return the result from date_extractor.
list_date_extractor = [x for x in list_publish_date if x[1] == "date_extractor"]
if len(list_date_extractor) == 0:
# If there is no date extracted by date_extractor, return the first result of list_publish_date.
return list_publish_date[0][0]
else:
return list_date_extractor[0][0] | [
"def",
"extract",
"(",
"self",
",",
"item",
",",
"list_article_candidate",
")",
":",
"list_publish_date",
"=",
"[",
"]",
"for",
"article_candidate",
"in",
"list_article_candidate",
":",
"if",
"article_candidate",
".",
"publish_date",
"!=",
"None",
":",
"list_publi... | Compares the extracted publish dates.
:param item: The corresponding NewscrawlerItem
:param list_article_candidate: A list, the list of ArticleCandidate-Objects which have been extracted
:return: A string, the most likely publish date | [
"Compares",
"the",
"extracted",
"publish",
"dates",
"."
] | 731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9 | https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/pipeline/extractor/comparer/comparer_date.py#L4-L28 | train | 221,118 |
fhamborg/news-please | newsplease/helper_classes/heuristics.py | Heuristics.meta_contains_article_keyword | def meta_contains_article_keyword(self, response, site_dict):
"""
Determines wether the response's meta data contains the keyword
'article'
:param obj response: The scrapy response
:param dict site_dict: The site object from the JSON-File
:return bool: Determines wether the reponse's meta data contains the
keyword 'article'
"""
contains_meta = response.xpath('//meta') \
.re('(= ?["\'][^"\']*article[^"\']*["\'])')
if not contains_meta:
return False
return True | python | def meta_contains_article_keyword(self, response, site_dict):
"""
Determines wether the response's meta data contains the keyword
'article'
:param obj response: The scrapy response
:param dict site_dict: The site object from the JSON-File
:return bool: Determines wether the reponse's meta data contains the
keyword 'article'
"""
contains_meta = response.xpath('//meta') \
.re('(= ?["\'][^"\']*article[^"\']*["\'])')
if not contains_meta:
return False
return True | [
"def",
"meta_contains_article_keyword",
"(",
"self",
",",
"response",
",",
"site_dict",
")",
":",
"contains_meta",
"=",
"response",
".",
"xpath",
"(",
"'//meta'",
")",
".",
"re",
"(",
"'(= ?[\"\\'][^\"\\']*article[^\"\\']*[\"\\'])'",
")",
"if",
"not",
"contains_meta... | Determines wether the response's meta data contains the keyword
'article'
:param obj response: The scrapy response
:param dict site_dict: The site object from the JSON-File
:return bool: Determines wether the reponse's meta data contains the
keyword 'article' | [
"Determines",
"wether",
"the",
"response",
"s",
"meta",
"data",
"contains",
"the",
"keyword",
"article"
] | 731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9 | https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/helper_classes/heuristics.py#L36-L52 | train | 221,119 |
fhamborg/news-please | newsplease/helper_classes/heuristics.py | Heuristics.linked_headlines | def linked_headlines(self, response, site_dict, check_self=False):
"""
Checks how many of the headlines on the site contain links.
:param obj response: The scrapy response
:param dict site_dict: The site object from the JSON-File
:param bool check_self: Check headlines/
headlines_containing_link_to_same_domain
instead of headline/headline_containing_link
:return float: ratio headlines/headlines_containing_link
"""
h_all = 0
h_linked = 0
domain = UrlExtractor.get_allowed_domain(site_dict["url"], False)
# This regex checks, if a link containing site_domain as domain
# is contained in a string.
site_regex = r"href=[\"'][^\/]*\/\/(?:[^\"']*\.|)%s[\"'\/]" % domain
for i in range(1, 7):
for headline in response.xpath('//h%s' % i).extract():
h_all += 1
if "href" in headline and (
not check_self or re.search(site_regex, headline)
is not None):
h_linked += 1
self.log.debug("Linked headlines test: headlines = %s, linked = %s",
h_all, h_linked)
min_headlines = self.cfg_heuristics["min_headlines_for_linked_test"]
if min_headlines > h_all:
self.log.debug("Linked headlines test: Not enough headlines "
"(%s < %s): Passing!", h_all, min_headlines)
return True
return float(h_linked) / float(h_all) | python | def linked_headlines(self, response, site_dict, check_self=False):
"""
Checks how many of the headlines on the site contain links.
:param obj response: The scrapy response
:param dict site_dict: The site object from the JSON-File
:param bool check_self: Check headlines/
headlines_containing_link_to_same_domain
instead of headline/headline_containing_link
:return float: ratio headlines/headlines_containing_link
"""
h_all = 0
h_linked = 0
domain = UrlExtractor.get_allowed_domain(site_dict["url"], False)
# This regex checks, if a link containing site_domain as domain
# is contained in a string.
site_regex = r"href=[\"'][^\/]*\/\/(?:[^\"']*\.|)%s[\"'\/]" % domain
for i in range(1, 7):
for headline in response.xpath('//h%s' % i).extract():
h_all += 1
if "href" in headline and (
not check_self or re.search(site_regex, headline)
is not None):
h_linked += 1
self.log.debug("Linked headlines test: headlines = %s, linked = %s",
h_all, h_linked)
min_headlines = self.cfg_heuristics["min_headlines_for_linked_test"]
if min_headlines > h_all:
self.log.debug("Linked headlines test: Not enough headlines "
"(%s < %s): Passing!", h_all, min_headlines)
return True
return float(h_linked) / float(h_all) | [
"def",
"linked_headlines",
"(",
"self",
",",
"response",
",",
"site_dict",
",",
"check_self",
"=",
"False",
")",
":",
"h_all",
"=",
"0",
"h_linked",
"=",
"0",
"domain",
"=",
"UrlExtractor",
".",
"get_allowed_domain",
"(",
"site_dict",
"[",
"\"url\"",
"]",
... | Checks how many of the headlines on the site contain links.
:param obj response: The scrapy response
:param dict site_dict: The site object from the JSON-File
:param bool check_self: Check headlines/
headlines_containing_link_to_same_domain
instead of headline/headline_containing_link
:return float: ratio headlines/headlines_containing_link | [
"Checks",
"how",
"many",
"of",
"the",
"headlines",
"on",
"the",
"site",
"contain",
"links",
"."
] | 731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9 | https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/helper_classes/heuristics.py#L73-L109 | train | 221,120 |
fhamborg/news-please | newsplease/helper_classes/heuristics.py | Heuristics.is_not_from_subdomain | def is_not_from_subdomain(self, response, site_dict):
"""
Ensures the response's url isn't from a subdomain.
:param obj response: The scrapy response
:param dict site_dict: The site object from the JSON-File
:return bool: Determines if the response's url is from a subdomain
"""
root_url = re.sub(re_url_root, '', site_dict["url"])
return UrlExtractor.get_allowed_domain(response.url) == root_url | python | def is_not_from_subdomain(self, response, site_dict):
"""
Ensures the response's url isn't from a subdomain.
:param obj response: The scrapy response
:param dict site_dict: The site object from the JSON-File
:return bool: Determines if the response's url is from a subdomain
"""
root_url = re.sub(re_url_root, '', site_dict["url"])
return UrlExtractor.get_allowed_domain(response.url) == root_url | [
"def",
"is_not_from_subdomain",
"(",
"self",
",",
"response",
",",
"site_dict",
")",
":",
"root_url",
"=",
"re",
".",
"sub",
"(",
"re_url_root",
",",
"''",
",",
"site_dict",
"[",
"\"url\"",
"]",
")",
"return",
"UrlExtractor",
".",
"get_allowed_domain",
"(",
... | Ensures the response's url isn't from a subdomain.
:param obj response: The scrapy response
:param dict site_dict: The site object from the JSON-File
:return bool: Determines if the response's url is from a subdomain | [
"Ensures",
"the",
"response",
"s",
"url",
"isn",
"t",
"from",
"a",
"subdomain",
"."
] | 731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9 | https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/helper_classes/heuristics.py#L122-L133 | train | 221,121 |
fhamborg/news-please | newsplease/pipeline/extractor/comparer/comparer_description.py | ComparerDescription.extract | def extract(self, item, list_article_candidate):
"""Compares the extracted descriptions.
:param item: The corresponding NewscrawlerItem
:param list_article_candidate: A list, the list of ArticleCandidate-Objects which have been extracted
:return: A string, the most likely description
"""
list_description = []
""" The descriptions of the article candidates and the respective extractors are saved
in a tuple in list_description.
"""
for article_candidate in list_article_candidate:
if article_candidate.description != None:
list_description.append((article_candidate.description, article_candidate.extractor))
# If there is no value in the list, return None.
if len(list_description) == 0:
return None
# If there are more options than one, return the result from newspaper.
list_newspaper = [x for x in list_description if x[1] == "newspaper"]
if len(list_newspaper) == 0:
# If there is no description extracted by newspaper, return the first result of list_description.
return list_description[0][0]
else:
return list_newspaper[0][0] | python | def extract(self, item, list_article_candidate):
"""Compares the extracted descriptions.
:param item: The corresponding NewscrawlerItem
:param list_article_candidate: A list, the list of ArticleCandidate-Objects which have been extracted
:return: A string, the most likely description
"""
list_description = []
""" The descriptions of the article candidates and the respective extractors are saved
in a tuple in list_description.
"""
for article_candidate in list_article_candidate:
if article_candidate.description != None:
list_description.append((article_candidate.description, article_candidate.extractor))
# If there is no value in the list, return None.
if len(list_description) == 0:
return None
# If there are more options than one, return the result from newspaper.
list_newspaper = [x for x in list_description if x[1] == "newspaper"]
if len(list_newspaper) == 0:
# If there is no description extracted by newspaper, return the first result of list_description.
return list_description[0][0]
else:
return list_newspaper[0][0] | [
"def",
"extract",
"(",
"self",
",",
"item",
",",
"list_article_candidate",
")",
":",
"list_description",
"=",
"[",
"]",
"\"\"\" The descriptions of the article candidates and the respective extractors are saved\n in a tuple in list_description.\n \"\"\"",
"for",
"articl... | Compares the extracted descriptions.
:param item: The corresponding NewscrawlerItem
:param list_article_candidate: A list, the list of ArticleCandidate-Objects which have been extracted
:return: A string, the most likely description | [
"Compares",
"the",
"extracted",
"descriptions",
"."
] | 731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9 | https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/pipeline/extractor/comparer/comparer_description.py#L6-L33 | train | 221,122 |
fhamborg/news-please | newsplease/pipeline/pipelines.py | PandasStorage.close_spider | def close_spider(self, _spider):
"""
Write out to file
"""
self.df['date_download'] = pd.to_datetime(
self.df['date_download'], errors='coerce', infer_datetime_format=True
)
self.df['date_modify'] = pd.to_datetime(
self.df['date_modify'], errors='coerce', infer_datetime_format=True
)
self.df['date_publish'] = pd.to_datetime(
self.df['date_publish'], errors='coerce', infer_datetime_format=True
)
self.df.to_pickle(self.full_path)
self.log.info("Wrote to Pandas to %s", self.full_path) | python | def close_spider(self, _spider):
"""
Write out to file
"""
self.df['date_download'] = pd.to_datetime(
self.df['date_download'], errors='coerce', infer_datetime_format=True
)
self.df['date_modify'] = pd.to_datetime(
self.df['date_modify'], errors='coerce', infer_datetime_format=True
)
self.df['date_publish'] = pd.to_datetime(
self.df['date_publish'], errors='coerce', infer_datetime_format=True
)
self.df.to_pickle(self.full_path)
self.log.info("Wrote to Pandas to %s", self.full_path) | [
"def",
"close_spider",
"(",
"self",
",",
"_spider",
")",
":",
"self",
".",
"df",
"[",
"'date_download'",
"]",
"=",
"pd",
".",
"to_datetime",
"(",
"self",
".",
"df",
"[",
"'date_download'",
"]",
",",
"errors",
"=",
"'coerce'",
",",
"infer_datetime_format",
... | Write out to file | [
"Write",
"out",
"to",
"file"
] | 731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9 | https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/pipeline/pipelines.py#L643-L657 | train | 221,123 |
fhamborg/news-please | newsplease/pipeline/extractor/extractors/date_extractor.py | DateExtractor._publish_date | def _publish_date(self, item):
"""Returns the publish_date of the extracted article."""
url = item['url']
html = deepcopy(item['spider_response'].body)
publish_date = None
try:
if html is None:
request = urllib2.Request(url)
# Using a browser user agent, decreases the change of sites blocking this request - just a suggestion
# request.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko)
# Chrome/41.0.2228.0 Safari/537.36')
html = urllib2.build_opener().open(request).read()
html = BeautifulSoup(html, "lxml")
publish_date = self._extract_from_json(html)
if publish_date is None:
publish_date = self._extract_from_meta(html)
if publish_date is None:
publish_date = self._extract_from_html_tag(html)
if publish_date is None:
publish_date = self._extract_from_url(url)
except Exception as e:
# print(e.message, e.args)
pass
return publish_date | python | def _publish_date(self, item):
"""Returns the publish_date of the extracted article."""
url = item['url']
html = deepcopy(item['spider_response'].body)
publish_date = None
try:
if html is None:
request = urllib2.Request(url)
# Using a browser user agent, decreases the change of sites blocking this request - just a suggestion
# request.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko)
# Chrome/41.0.2228.0 Safari/537.36')
html = urllib2.build_opener().open(request).read()
html = BeautifulSoup(html, "lxml")
publish_date = self._extract_from_json(html)
if publish_date is None:
publish_date = self._extract_from_meta(html)
if publish_date is None:
publish_date = self._extract_from_html_tag(html)
if publish_date is None:
publish_date = self._extract_from_url(url)
except Exception as e:
# print(e.message, e.args)
pass
return publish_date | [
"def",
"_publish_date",
"(",
"self",
",",
"item",
")",
":",
"url",
"=",
"item",
"[",
"'url'",
"]",
"html",
"=",
"deepcopy",
"(",
"item",
"[",
"'spider_response'",
"]",
".",
"body",
")",
"publish_date",
"=",
"None",
"try",
":",
"if",
"html",
"is",
"No... | Returns the publish_date of the extracted article. | [
"Returns",
"the",
"publish_date",
"of",
"the",
"extracted",
"article",
"."
] | 731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9 | https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/pipeline/extractor/extractors/date_extractor.py#L30-L58 | train | 221,124 |
fhamborg/news-please | newsplease/pipeline/extractor/extractors/date_extractor.py | DateExtractor._extract_from_url | def _extract_from_url(self, url):
"""Try to extract from the article URL - simple but might work as a fallback"""
# Regex by Newspaper3k - https://github.com/codelucas/newspaper/blob/master/newspaper/urls.py
m = re.search(re_pub_date, url)
if m:
return self.parse_date_str(m.group(0))
return None | python | def _extract_from_url(self, url):
"""Try to extract from the article URL - simple but might work as a fallback"""
# Regex by Newspaper3k - https://github.com/codelucas/newspaper/blob/master/newspaper/urls.py
m = re.search(re_pub_date, url)
if m:
return self.parse_date_str(m.group(0))
return None | [
"def",
"_extract_from_url",
"(",
"self",
",",
"url",
")",
":",
"# Regex by Newspaper3k - https://github.com/codelucas/newspaper/blob/master/newspaper/urls.py",
"m",
"=",
"re",
".",
"search",
"(",
"re_pub_date",
",",
"url",
")",
"if",
"m",
":",
"return",
"self",
".",
... | Try to extract from the article URL - simple but might work as a fallback | [
"Try",
"to",
"extract",
"from",
"the",
"article",
"URL",
"-",
"simple",
"but",
"might",
"work",
"as",
"a",
"fallback"
] | 731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9 | https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/pipeline/extractor/extractors/date_extractor.py#L67-L74 | train | 221,125 |
fhamborg/news-please | newsplease/pipeline/extractor/cleaner.py | Cleaner.delete_tags | def delete_tags(self, arg):
"""Removes html-tags from extracted data.
:param arg: A string, the string which shall be cleaned
:return: A string, the cleaned string
"""
if len(arg) > 0:
raw = html.fromstring(arg)
return raw.text_content().strip()
return arg | python | def delete_tags(self, arg):
"""Removes html-tags from extracted data.
:param arg: A string, the string which shall be cleaned
:return: A string, the cleaned string
"""
if len(arg) > 0:
raw = html.fromstring(arg)
return raw.text_content().strip()
return arg | [
"def",
"delete_tags",
"(",
"self",
",",
"arg",
")",
":",
"if",
"len",
"(",
"arg",
")",
">",
"0",
":",
"raw",
"=",
"html",
".",
"fromstring",
"(",
"arg",
")",
"return",
"raw",
".",
"text_content",
"(",
")",
".",
"strip",
"(",
")",
"return",
"arg"
... | Removes html-tags from extracted data.
:param arg: A string, the string which shall be cleaned
:return: A string, the cleaned string | [
"Removes",
"html",
"-",
"tags",
"from",
"extracted",
"data",
"."
] | 731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9 | https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/pipeline/extractor/cleaner.py#L22-L33 | train | 221,126 |
fhamborg/news-please | newsplease/pipeline/extractor/cleaner.py | Cleaner.delete_whitespaces | def delete_whitespaces(self, arg):
"""Removes newlines, tabs and whitespaces at the beginning, the end and if there is more than one.
:param arg: A string, the string which shell be cleaned
:return: A string, the cleaned string
"""
# Deletes whitespaces after a newline
arg = re.sub(re_newline_spc, '', arg)
# Deletes every whitespace, tabulator, newline at the beginning of the string
arg = re.sub(re_starting_whitespc, '', arg)
# Deletes whitespace or tabulator if followed by whitespace or tabulator
arg = re.sub(re_multi_spc_tab, '', arg)
# Deletes newline if it is followed by an other one
arg = re.sub(re_double_newline, '', arg)
# Deletes newlines and whitespaces at the end of the string
arg = re.sub(re_ending_spc_newline, '', arg)
return arg | python | def delete_whitespaces(self, arg):
"""Removes newlines, tabs and whitespaces at the beginning, the end and if there is more than one.
:param arg: A string, the string which shell be cleaned
:return: A string, the cleaned string
"""
# Deletes whitespaces after a newline
arg = re.sub(re_newline_spc, '', arg)
# Deletes every whitespace, tabulator, newline at the beginning of the string
arg = re.sub(re_starting_whitespc, '', arg)
# Deletes whitespace or tabulator if followed by whitespace or tabulator
arg = re.sub(re_multi_spc_tab, '', arg)
# Deletes newline if it is followed by an other one
arg = re.sub(re_double_newline, '', arg)
# Deletes newlines and whitespaces at the end of the string
arg = re.sub(re_ending_spc_newline, '', arg)
return arg | [
"def",
"delete_whitespaces",
"(",
"self",
",",
"arg",
")",
":",
"# Deletes whitespaces after a newline",
"arg",
"=",
"re",
".",
"sub",
"(",
"re_newline_spc",
",",
"''",
",",
"arg",
")",
"# Deletes every whitespace, tabulator, newline at the beginning of the string",
"arg"... | Removes newlines, tabs and whitespaces at the beginning, the end and if there is more than one.
:param arg: A string, the string which shell be cleaned
:return: A string, the cleaned string | [
"Removes",
"newlines",
"tabs",
"and",
"whitespaces",
"at",
"the",
"beginning",
"the",
"end",
"and",
"if",
"there",
"is",
"more",
"than",
"one",
"."
] | 731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9 | https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/pipeline/extractor/cleaner.py#L35-L51 | train | 221,127 |
fhamborg/news-please | newsplease/pipeline/extractor/cleaner.py | Cleaner.do_cleaning | def do_cleaning(self, arg):
"""Does the actual cleaning by using the delete methods above.
:param arg: A string, the string which shell be cleaned. Or a list, in which case each of the strings within the
list is cleaned.
:return: A string, the cleaned string. Or a list with cleaned string entries.
"""
if arg is not None:
if isinstance(arg, list):
newlist = []
for entry in arg:
newlist.append(self.do_cleaning(entry))
return newlist
else:
if sys.version_info[0] < 3:
arg = unicode(arg)
else:
arg = str(arg)
arg = self.delete_tags(arg)
arg = self.delete_whitespaces(arg)
return arg
else:
return None | python | def do_cleaning(self, arg):
"""Does the actual cleaning by using the delete methods above.
:param arg: A string, the string which shell be cleaned. Or a list, in which case each of the strings within the
list is cleaned.
:return: A string, the cleaned string. Or a list with cleaned string entries.
"""
if arg is not None:
if isinstance(arg, list):
newlist = []
for entry in arg:
newlist.append(self.do_cleaning(entry))
return newlist
else:
if sys.version_info[0] < 3:
arg = unicode(arg)
else:
arg = str(arg)
arg = self.delete_tags(arg)
arg = self.delete_whitespaces(arg)
return arg
else:
return None | [
"def",
"do_cleaning",
"(",
"self",
",",
"arg",
")",
":",
"if",
"arg",
"is",
"not",
"None",
":",
"if",
"isinstance",
"(",
"arg",
",",
"list",
")",
":",
"newlist",
"=",
"[",
"]",
"for",
"entry",
"in",
"arg",
":",
"newlist",
".",
"append",
"(",
"sel... | Does the actual cleaning by using the delete methods above.
:param arg: A string, the string which shell be cleaned. Or a list, in which case each of the strings within the
list is cleaned.
:return: A string, the cleaned string. Or a list with cleaned string entries. | [
"Does",
"the",
"actual",
"cleaning",
"by",
"using",
"the",
"delete",
"methods",
"above",
"."
] | 731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9 | https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/pipeline/extractor/cleaner.py#L53-L75 | train | 221,128 |
fhamborg/news-please | newsplease/pipeline/extractor/cleaner.py | Cleaner.clean | def clean(self, list_article_candidates):
"""Iterates over each article_candidate and cleans every extracted data.
:param list_article_candidates: A list, the list of ArticleCandidate-Objects which have been extracted
:return: A list, the list with the cleaned ArticleCandidate-Objects
"""
# Save cleaned article_candidates in results.
results = []
for article_candidate in list_article_candidates:
article_candidate.title = self.do_cleaning(article_candidate.title)
article_candidate.description = self.do_cleaning(article_candidate.description)
article_candidate.text = self.do_cleaning(article_candidate.text)
article_candidate.topimage = self.do_cleaning(article_candidate.topimage)
article_candidate.author = self.do_cleaning(article_candidate.author)
article_candidate.publish_date = self.do_cleaning(article_candidate.publish_date)
results.append(article_candidate)
return results | python | def clean(self, list_article_candidates):
"""Iterates over each article_candidate and cleans every extracted data.
:param list_article_candidates: A list, the list of ArticleCandidate-Objects which have been extracted
:return: A list, the list with the cleaned ArticleCandidate-Objects
"""
# Save cleaned article_candidates in results.
results = []
for article_candidate in list_article_candidates:
article_candidate.title = self.do_cleaning(article_candidate.title)
article_candidate.description = self.do_cleaning(article_candidate.description)
article_candidate.text = self.do_cleaning(article_candidate.text)
article_candidate.topimage = self.do_cleaning(article_candidate.topimage)
article_candidate.author = self.do_cleaning(article_candidate.author)
article_candidate.publish_date = self.do_cleaning(article_candidate.publish_date)
results.append(article_candidate)
return results | [
"def",
"clean",
"(",
"self",
",",
"list_article_candidates",
")",
":",
"# Save cleaned article_candidates in results.",
"results",
"=",
"[",
"]",
"for",
"article_candidate",
"in",
"list_article_candidates",
":",
"article_candidate",
".",
"title",
"=",
"self",
".",
"do... | Iterates over each article_candidate and cleans every extracted data.
:param list_article_candidates: A list, the list of ArticleCandidate-Objects which have been extracted
:return: A list, the list with the cleaned ArticleCandidate-Objects | [
"Iterates",
"over",
"each",
"article_candidate",
"and",
"cleans",
"every",
"extracted",
"data",
"."
] | 731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9 | https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/pipeline/extractor/cleaner.py#L77-L96 | train | 221,129 |
fhamborg/news-please | newsplease/pipeline/extractor/comparer/comparer.py | Comparer.compare | def compare(self, item, article_candidates):
"""Compares the article candidates using the different submodules and saves the best results in
new ArticleCandidate object
:param item: The NewscrawlerItem related to the ArticleCandidates
:param article_candidates: The list of ArticleCandidate-Objects which have been extracted
:return: An ArticleCandidate-object containing the best results
"""
result = ArticleCandidate()
result.title = self.comparer_title.extract(item, article_candidates)
result.description = self.comparer_desciption.extract(item, article_candidates)
result.text = self.comparer_text.extract(item, article_candidates)
result.topimage = self.comparer_topimage.extract(item, article_candidates)
result.author = self.comparer_author.extract(item, article_candidates)
result.publish_date = self.comparer_date.extract(item, article_candidates)
result.language = self.comparer_language.extract(item, article_candidates)
return result | python | def compare(self, item, article_candidates):
"""Compares the article candidates using the different submodules and saves the best results in
new ArticleCandidate object
:param item: The NewscrawlerItem related to the ArticleCandidates
:param article_candidates: The list of ArticleCandidate-Objects which have been extracted
:return: An ArticleCandidate-object containing the best results
"""
result = ArticleCandidate()
result.title = self.comparer_title.extract(item, article_candidates)
result.description = self.comparer_desciption.extract(item, article_candidates)
result.text = self.comparer_text.extract(item, article_candidates)
result.topimage = self.comparer_topimage.extract(item, article_candidates)
result.author = self.comparer_author.extract(item, article_candidates)
result.publish_date = self.comparer_date.extract(item, article_candidates)
result.language = self.comparer_language.extract(item, article_candidates)
return result | [
"def",
"compare",
"(",
"self",
",",
"item",
",",
"article_candidates",
")",
":",
"result",
"=",
"ArticleCandidate",
"(",
")",
"result",
".",
"title",
"=",
"self",
".",
"comparer_title",
".",
"extract",
"(",
"item",
",",
"article_candidates",
")",
"result",
... | Compares the article candidates using the different submodules and saves the best results in
new ArticleCandidate object
:param item: The NewscrawlerItem related to the ArticleCandidates
:param article_candidates: The list of ArticleCandidate-Objects which have been extracted
:return: An ArticleCandidate-object containing the best results | [
"Compares",
"the",
"article",
"candidates",
"using",
"the",
"different",
"submodules",
"and",
"saves",
"the",
"best",
"results",
"in",
"new",
"ArticleCandidate",
"object"
] | 731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9 | https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/pipeline/extractor/comparer/comparer.py#L24-L42 | train | 221,130 |
fhamborg/news-please | newsplease/crawler/spiders/rss_crawler.py | RssCrawler.parse | def parse(self, response):
"""
Extracts the Rss Feed and initiates crawling it.
:param obj response: The scrapy response
"""
yield scrapy.Request(self.helper.url_extractor.get_rss_url(response),
callback=self.rss_parse) | python | def parse(self, response):
"""
Extracts the Rss Feed and initiates crawling it.
:param obj response: The scrapy response
"""
yield scrapy.Request(self.helper.url_extractor.get_rss_url(response),
callback=self.rss_parse) | [
"def",
"parse",
"(",
"self",
",",
"response",
")",
":",
"yield",
"scrapy",
".",
"Request",
"(",
"self",
".",
"helper",
".",
"url_extractor",
".",
"get_rss_url",
"(",
"response",
")",
",",
"callback",
"=",
"self",
".",
"rss_parse",
")"
] | Extracts the Rss Feed and initiates crawling it.
:param obj response: The scrapy response | [
"Extracts",
"the",
"Rss",
"Feed",
"and",
"initiates",
"crawling",
"it",
"."
] | 731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9 | https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/crawler/spiders/rss_crawler.py#L42-L49 | train | 221,131 |
fhamborg/news-please | newsplease/crawler/spiders/rss_crawler.py | RssCrawler.supports_site | def supports_site(url):
"""
Rss Crawler are supported if by every site containing an rss feed.
Determines if this crawler works on the given url.
:param str url: The url to test
:return bool: Determines wether this crawler work on the given url
"""
# Follow redirects
opener = urllib2.build_opener(urllib2.HTTPRedirectHandler)
redirect = opener.open(url).url
response = urllib2.urlopen(redirect).read()
# Check if a standard rss feed exists
return re.search(re_rss, response.decode('utf-8')) is not None | python | def supports_site(url):
"""
Rss Crawler are supported if by every site containing an rss feed.
Determines if this crawler works on the given url.
:param str url: The url to test
:return bool: Determines wether this crawler work on the given url
"""
# Follow redirects
opener = urllib2.build_opener(urllib2.HTTPRedirectHandler)
redirect = opener.open(url).url
response = urllib2.urlopen(redirect).read()
# Check if a standard rss feed exists
return re.search(re_rss, response.decode('utf-8')) is not None | [
"def",
"supports_site",
"(",
"url",
")",
":",
"# Follow redirects",
"opener",
"=",
"urllib2",
".",
"build_opener",
"(",
"urllib2",
".",
"HTTPRedirectHandler",
")",
"redirect",
"=",
"opener",
".",
"open",
"(",
"url",
")",
".",
"url",
"response",
"=",
"urllib2... | Rss Crawler are supported if by every site containing an rss feed.
Determines if this crawler works on the given url.
:param str url: The url to test
:return bool: Determines wether this crawler work on the given url | [
"Rss",
"Crawler",
"are",
"supported",
"if",
"by",
"every",
"site",
"containing",
"an",
"rss",
"feed",
"."
] | 731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9 | https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/crawler/spiders/rss_crawler.py#L86-L102 | train | 221,132 |
fhamborg/news-please | newsplease/helper_classes/url_extractor.py | UrlExtractor.get_allowed_domain | def get_allowed_domain(url, allow_subdomains=True):
"""
Determines the url's domain.
:param str url: the url to extract the allowed domain from
:param bool allow_subdomains: determines wether to include subdomains
:return str: subdomains.domain.topleveldomain or domain.topleveldomain
"""
if allow_subdomains:
return re.sub(re_www, '', re.search(r'[^/]+\.[^/]+', url).group(0))
else:
return re.search(re_domain, UrlExtractor.get_allowed_domain(url)).group(0) | python | def get_allowed_domain(url, allow_subdomains=True):
"""
Determines the url's domain.
:param str url: the url to extract the allowed domain from
:param bool allow_subdomains: determines wether to include subdomains
:return str: subdomains.domain.topleveldomain or domain.topleveldomain
"""
if allow_subdomains:
return re.sub(re_www, '', re.search(r'[^/]+\.[^/]+', url).group(0))
else:
return re.search(re_domain, UrlExtractor.get_allowed_domain(url)).group(0) | [
"def",
"get_allowed_domain",
"(",
"url",
",",
"allow_subdomains",
"=",
"True",
")",
":",
"if",
"allow_subdomains",
":",
"return",
"re",
".",
"sub",
"(",
"re_www",
",",
"''",
",",
"re",
".",
"search",
"(",
"r'[^/]+\\.[^/]+'",
",",
"url",
")",
".",
"group"... | Determines the url's domain.
:param str url: the url to extract the allowed domain from
:param bool allow_subdomains: determines wether to include subdomains
:return str: subdomains.domain.topleveldomain or domain.topleveldomain | [
"Determines",
"the",
"url",
"s",
"domain",
"."
] | 731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9 | https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/helper_classes/url_extractor.py#L31-L42 | train | 221,133 |
fhamborg/news-please | newsplease/helper_classes/url_extractor.py | UrlExtractor.get_subdomain | def get_subdomain(url):
"""
Determines the domain's subdomains.
:param str url: the url to extract any subdomains from
:return str: subdomains of url
"""
allowed_domain = UrlExtractor.get_allowed_domain(url)
return allowed_domain[:len(allowed_domain) - len(
UrlExtractor.get_allowed_domain(url, False))] | python | def get_subdomain(url):
"""
Determines the domain's subdomains.
:param str url: the url to extract any subdomains from
:return str: subdomains of url
"""
allowed_domain = UrlExtractor.get_allowed_domain(url)
return allowed_domain[:len(allowed_domain) - len(
UrlExtractor.get_allowed_domain(url, False))] | [
"def",
"get_subdomain",
"(",
"url",
")",
":",
"allowed_domain",
"=",
"UrlExtractor",
".",
"get_allowed_domain",
"(",
"url",
")",
"return",
"allowed_domain",
"[",
":",
"len",
"(",
"allowed_domain",
")",
"-",
"len",
"(",
"UrlExtractor",
".",
"get_allowed_domain",
... | Determines the domain's subdomains.
:param str url: the url to extract any subdomains from
:return str: subdomains of url | [
"Determines",
"the",
"domain",
"s",
"subdomains",
"."
] | 731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9 | https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/helper_classes/url_extractor.py#L45-L54 | train | 221,134 |
fhamborg/news-please | newsplease/helper_classes/url_extractor.py | UrlExtractor.follow_redirects | def follow_redirects(url):
"""
Get's the url actual address by following forwards
:param str url: the url to work on
:return str: actual address of url
"""
opener = urllib2.build_opener(urllib2.HTTPRedirectHandler)
return opener.open(url).url | python | def follow_redirects(url):
"""
Get's the url actual address by following forwards
:param str url: the url to work on
:return str: actual address of url
"""
opener = urllib2.build_opener(urllib2.HTTPRedirectHandler)
return opener.open(url).url | [
"def",
"follow_redirects",
"(",
"url",
")",
":",
"opener",
"=",
"urllib2",
".",
"build_opener",
"(",
"urllib2",
".",
"HTTPRedirectHandler",
")",
"return",
"opener",
".",
"open",
"(",
"url",
")",
".",
"url"
] | Get's the url actual address by following forwards
:param str url: the url to work on
:return str: actual address of url | [
"Get",
"s",
"the",
"url",
"actual",
"address",
"by",
"following",
"forwards"
] | 731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9 | https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/helper_classes/url_extractor.py#L57-L65 | train | 221,135 |
fhamborg/news-please | newsplease/helper_classes/url_extractor.py | UrlExtractor.get_sitemap_url | def get_sitemap_url(url, allow_subdomains):
"""
Determines the domain's robot.txt
:param str url: the url to work on
:param bool allow_subdomains: Determines if the robot.txt may be the
subdomain's
:return: the robot.txt's address
:raises Exception: if there's no robot.txt on the site's domain
"""
if allow_subdomains:
redirect = UrlExtractor.follow_redirects(
"http://" + UrlExtractor.get_allowed_domain(url)
)
else:
redirect = UrlExtractor.follow_redirects(
"http://" +
UrlExtractor.get_allowed_domain(url, False)
)
redirect = UrlExtractor.follow_redirects(url)
# Get robots.txt
parsed = urlparse(redirect)
if allow_subdomains:
url_netloc = parsed.netloc
else:
url_netloc = UrlExtractor.get_allowed_domain(
parsed.netloc, False)
robots = '{url.scheme}://{url_netloc}/robots.txt'.format(
url=parsed, url_netloc=url_netloc)
try:
urllib2.urlopen(robots)
return robots
except:
if allow_subdomains:
return UrlExtractor.get_sitemap_url(url, False)
else:
raise Exception('Fatal: no robots.txt found.') | python | def get_sitemap_url(url, allow_subdomains):
"""
Determines the domain's robot.txt
:param str url: the url to work on
:param bool allow_subdomains: Determines if the robot.txt may be the
subdomain's
:return: the robot.txt's address
:raises Exception: if there's no robot.txt on the site's domain
"""
if allow_subdomains:
redirect = UrlExtractor.follow_redirects(
"http://" + UrlExtractor.get_allowed_domain(url)
)
else:
redirect = UrlExtractor.follow_redirects(
"http://" +
UrlExtractor.get_allowed_domain(url, False)
)
redirect = UrlExtractor.follow_redirects(url)
# Get robots.txt
parsed = urlparse(redirect)
if allow_subdomains:
url_netloc = parsed.netloc
else:
url_netloc = UrlExtractor.get_allowed_domain(
parsed.netloc, False)
robots = '{url.scheme}://{url_netloc}/robots.txt'.format(
url=parsed, url_netloc=url_netloc)
try:
urllib2.urlopen(robots)
return robots
except:
if allow_subdomains:
return UrlExtractor.get_sitemap_url(url, False)
else:
raise Exception('Fatal: no robots.txt found.') | [
"def",
"get_sitemap_url",
"(",
"url",
",",
"allow_subdomains",
")",
":",
"if",
"allow_subdomains",
":",
"redirect",
"=",
"UrlExtractor",
".",
"follow_redirects",
"(",
"\"http://\"",
"+",
"UrlExtractor",
".",
"get_allowed_domain",
"(",
"url",
")",
")",
"else",
":... | Determines the domain's robot.txt
:param str url: the url to work on
:param bool allow_subdomains: Determines if the robot.txt may be the
subdomain's
:return: the robot.txt's address
:raises Exception: if there's no robot.txt on the site's domain | [
"Determines",
"the",
"domain",
"s",
"robot",
".",
"txt"
] | 731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9 | https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/helper_classes/url_extractor.py#L68-L107 | train | 221,136 |
fhamborg/news-please | newsplease/helper_classes/url_extractor.py | UrlExtractor.sitemap_check | def sitemap_check(url):
"""
Sitemap-Crawler are supported by every site which have a
Sitemap set in the robots.txt.
:param str url: the url to work on
:return bool: Determines if Sitemap is set in the site's robots.txt
"""
response = urllib2.urlopen(UrlExtractor.get_sitemap_url(url, True))
# Check if "Sitemap" is set
return "Sitemap:" in response.read().decode('utf-8') | python | def sitemap_check(url):
"""
Sitemap-Crawler are supported by every site which have a
Sitemap set in the robots.txt.
:param str url: the url to work on
:return bool: Determines if Sitemap is set in the site's robots.txt
"""
response = urllib2.urlopen(UrlExtractor.get_sitemap_url(url, True))
# Check if "Sitemap" is set
return "Sitemap:" in response.read().decode('utf-8') | [
"def",
"sitemap_check",
"(",
"url",
")",
":",
"response",
"=",
"urllib2",
".",
"urlopen",
"(",
"UrlExtractor",
".",
"get_sitemap_url",
"(",
"url",
",",
"True",
")",
")",
"# Check if \"Sitemap\" is set",
"return",
"\"Sitemap:\"",
"in",
"response",
".",
"read",
... | Sitemap-Crawler are supported by every site which have a
Sitemap set in the robots.txt.
:param str url: the url to work on
:return bool: Determines if Sitemap is set in the site's robots.txt | [
"Sitemap",
"-",
"Crawler",
"are",
"supported",
"by",
"every",
"site",
"which",
"have",
"a",
"Sitemap",
"set",
"in",
"the",
"robots",
".",
"txt",
"."
] | 731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9 | https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/helper_classes/url_extractor.py#L110-L121 | train | 221,137 |
fhamborg/news-please | newsplease/helper_classes/url_extractor.py | UrlExtractor.get_url_directory_string | def get_url_directory_string(url):
"""
Determines the url's directory string.
:param str url: the url to extract the directory string from
:return str: the directory string on the server
"""
domain = UrlExtractor.get_allowed_domain(url)
splitted_url = url.split('/')
# the following commented list comprehension could replace
# the following for, if not and break statement
# index = [index for index in range(len(splitted_url))
# if not re.search(domain, splitted_url[index]) is None][0]
for index in range(len(splitted_url)):
if not re.search(domain, splitted_url[index]) is None:
if splitted_url[-1] is "":
splitted_url = splitted_url[index + 1:-2]
else:
splitted_url = splitted_url[index + 1:-1]
break
return '_'.join(splitted_url) | python | def get_url_directory_string(url):
"""
Determines the url's directory string.
:param str url: the url to extract the directory string from
:return str: the directory string on the server
"""
domain = UrlExtractor.get_allowed_domain(url)
splitted_url = url.split('/')
# the following commented list comprehension could replace
# the following for, if not and break statement
# index = [index for index in range(len(splitted_url))
# if not re.search(domain, splitted_url[index]) is None][0]
for index in range(len(splitted_url)):
if not re.search(domain, splitted_url[index]) is None:
if splitted_url[-1] is "":
splitted_url = splitted_url[index + 1:-2]
else:
splitted_url = splitted_url[index + 1:-1]
break
return '_'.join(splitted_url) | [
"def",
"get_url_directory_string",
"(",
"url",
")",
":",
"domain",
"=",
"UrlExtractor",
".",
"get_allowed_domain",
"(",
"url",
")",
"splitted_url",
"=",
"url",
".",
"split",
"(",
"'/'",
")",
"# the following commented list comprehension could replace",
"# the following ... | Determines the url's directory string.
:param str url: the url to extract the directory string from
:return str: the directory string on the server | [
"Determines",
"the",
"url",
"s",
"directory",
"string",
"."
] | 731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9 | https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/helper_classes/url_extractor.py#L149-L172 | train | 221,138 |
fhamborg/news-please | newsplease/helper_classes/url_extractor.py | UrlExtractor.get_url_file_name | def get_url_file_name(url):
"""
Determines the url's file name.
:param str url: the url to extract the file name from
:return str: the filename (without the file extension) on the server
"""
url_root_ext = os.path.splitext(url)
if len(url_root_ext[1]) <= MAX_FILE_EXTENSION_LENGTH:
return os.path.split(url_root_ext[0])[1]
else:
return os.path.split(url)[1] | python | def get_url_file_name(url):
"""
Determines the url's file name.
:param str url: the url to extract the file name from
:return str: the filename (without the file extension) on the server
"""
url_root_ext = os.path.splitext(url)
if len(url_root_ext[1]) <= MAX_FILE_EXTENSION_LENGTH:
return os.path.split(url_root_ext[0])[1]
else:
return os.path.split(url)[1] | [
"def",
"get_url_file_name",
"(",
"url",
")",
":",
"url_root_ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"url",
")",
"if",
"len",
"(",
"url_root_ext",
"[",
"1",
"]",
")",
"<=",
"MAX_FILE_EXTENSION_LENGTH",
":",
"return",
"os",
".",
"path",
".",
... | Determines the url's file name.
:param str url: the url to extract the file name from
:return str: the filename (without the file extension) on the server | [
"Determines",
"the",
"url",
"s",
"file",
"name",
"."
] | 731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9 | https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/helper_classes/url_extractor.py#L175-L187 | train | 221,139 |
fhamborg/news-please | newsplease/config.py | CrawlerConfig.load_config | def load_config(self):
"""
Loads the config-file
"""
self.__config = {}
# Parse sections, its options and put it in self.config.
for section in self.sections:
self.__config[section] = {}
options = self.parser.options(section)
# Parse options of each section
for option in options:
try:
opt = self.parser \
.get(section, option)
try:
self.__config[section][option] = literal_eval(opt)
except (SyntaxError, ValueError):
self.__config[section][option] = opt
self.log_output.append(
{"level": "debug",
"msg": "Option not literal_eval-parsable"
" (maybe string): [{0}] {1}"
.format(section, option)})
if self.__config[section][option] == -1:
self.log_output.append(
{"level": "debug",
"msg": "Skipping: [%s] %s" % (section, option)}
)
except ConfigParser.NoOptionError as exc:
self.log_output.append(
{"level": "error",
"msg": "Exception on [%s] %s: %s"
% (section, option, exc)}
)
self.__config[section][option] = None | python | def load_config(self):
"""
Loads the config-file
"""
self.__config = {}
# Parse sections, its options and put it in self.config.
for section in self.sections:
self.__config[section] = {}
options = self.parser.options(section)
# Parse options of each section
for option in options:
try:
opt = self.parser \
.get(section, option)
try:
self.__config[section][option] = literal_eval(opt)
except (SyntaxError, ValueError):
self.__config[section][option] = opt
self.log_output.append(
{"level": "debug",
"msg": "Option not literal_eval-parsable"
" (maybe string): [{0}] {1}"
.format(section, option)})
if self.__config[section][option] == -1:
self.log_output.append(
{"level": "debug",
"msg": "Skipping: [%s] %s" % (section, option)}
)
except ConfigParser.NoOptionError as exc:
self.log_output.append(
{"level": "error",
"msg": "Exception on [%s] %s: %s"
% (section, option, exc)}
)
self.__config[section][option] = None | [
"def",
"load_config",
"(",
"self",
")",
":",
"self",
".",
"__config",
"=",
"{",
"}",
"# Parse sections, its options and put it in self.config.",
"for",
"section",
"in",
"self",
".",
"sections",
":",
"self",
".",
"__config",
"[",
"section",
"]",
"=",
"{",
"}",
... | Loads the config-file | [
"Loads",
"the",
"config",
"-",
"file"
] | 731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9 | https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/config.py#L95-L134 | train | 221,140 |
fhamborg/news-please | newsplease/config.py | CrawlerConfig.handle_logging | def handle_logging(self):
"""
To allow devs to log as early as possible, logging will already be
handled here
"""
configure_logging(self.get_scrapy_options())
# Disable duplicates
self.__scrapy_options["LOG_ENABLED"] = False
# Now, after log-level is correctly set, lets log them.
for msg in self.log_output:
if msg["level"] is "error":
self.log.error(msg["msg"])
elif msg["level"] is "info":
self.log.info(msg["msg"])
elif msg["level"] is "debug":
self.log.debug(msg["msg"]) | python | def handle_logging(self):
"""
To allow devs to log as early as possible, logging will already be
handled here
"""
configure_logging(self.get_scrapy_options())
# Disable duplicates
self.__scrapy_options["LOG_ENABLED"] = False
# Now, after log-level is correctly set, lets log them.
for msg in self.log_output:
if msg["level"] is "error":
self.log.error(msg["msg"])
elif msg["level"] is "info":
self.log.info(msg["msg"])
elif msg["level"] is "debug":
self.log.debug(msg["msg"]) | [
"def",
"handle_logging",
"(",
"self",
")",
":",
"configure_logging",
"(",
"self",
".",
"get_scrapy_options",
"(",
")",
")",
"# Disable duplicates",
"self",
".",
"__scrapy_options",
"[",
"\"LOG_ENABLED\"",
"]",
"=",
"False",
"# Now, after log-level is correctly set, lets... | To allow devs to log as early as possible, logging will already be
handled here | [
"To",
"allow",
"devs",
"to",
"log",
"as",
"early",
"as",
"possible",
"logging",
"will",
"already",
"be",
"handled",
"here"
] | 731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9 | https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/config.py#L148-L166 | train | 221,141 |
fhamborg/news-please | newsplease/config.py | CrawlerConfig.option | def option(self, option):
"""
Gets the option, set_section needs to be set before.
:param option (string): The option to get.
:return mixed: The option from from the config.
"""
if self.__current_section is None:
raise RuntimeError('No section set in option-getting')
return self.__config[self.__current_section][option] | python | def option(self, option):
"""
Gets the option, set_section needs to be set before.
:param option (string): The option to get.
:return mixed: The option from from the config.
"""
if self.__current_section is None:
raise RuntimeError('No section set in option-getting')
return self.__config[self.__current_section][option] | [
"def",
"option",
"(",
"self",
",",
"option",
")",
":",
"if",
"self",
".",
"__current_section",
"is",
"None",
":",
"raise",
"RuntimeError",
"(",
"'No section set in option-getting'",
")",
"return",
"self",
".",
"__config",
"[",
"self",
".",
"__current_section",
... | Gets the option, set_section needs to be set before.
:param option (string): The option to get.
:return mixed: The option from from the config. | [
"Gets",
"the",
"option",
"set_section",
"needs",
"to",
"be",
"set",
"before",
"."
] | 731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9 | https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/config.py#L194-L203 | train | 221,142 |
fhamborg/news-please | newsplease/config.py | JsonConfig.get_url_array | def get_url_array(self):
"""
Get all url-objects in an array
:return sites (array): The sites from the JSON-file
"""
urlarray = []
for urlobjects in self.__json_object["base_urls"]:
urlarray.append(urlobjects["url"])
return urlarray | python | def get_url_array(self):
"""
Get all url-objects in an array
:return sites (array): The sites from the JSON-file
"""
urlarray = []
for urlobjects in self.__json_object["base_urls"]:
urlarray.append(urlobjects["url"])
return urlarray | [
"def",
"get_url_array",
"(",
"self",
")",
":",
"urlarray",
"=",
"[",
"]",
"for",
"urlobjects",
"in",
"self",
".",
"__json_object",
"[",
"\"base_urls\"",
"]",
":",
"urlarray",
".",
"append",
"(",
"urlobjects",
"[",
"\"url\"",
"]",
")",
"return",
"urlarray"
... | Get all url-objects in an array
:return sites (array): The sites from the JSON-file | [
"Get",
"all",
"url",
"-",
"objects",
"in",
"an",
"array"
] | 731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9 | https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/config.py#L293-L302 | train | 221,143 |
fhamborg/news-please | newsplease/pipeline/extractor/comparer/comparer_title.py | ComparerTitle.find_matches | def find_matches(self, list_title):
"""Checks if there are any matches between extracted titles.
:param list_title: A list, the extracted titles saved in a list
:return: A list, the matched titles
"""
list_title_matches = []
# Generate every possible tuple of titles and safe the matched string in a list.
for a, b, in itertools.combinations(list_title, 2):
if a == b:
list_title_matches.append(a)
return list_title_matches | python | def find_matches(self, list_title):
"""Checks if there are any matches between extracted titles.
:param list_title: A list, the extracted titles saved in a list
:return: A list, the matched titles
"""
list_title_matches = []
# Generate every possible tuple of titles and safe the matched string in a list.
for a, b, in itertools.combinations(list_title, 2):
if a == b:
list_title_matches.append(a)
return list_title_matches | [
"def",
"find_matches",
"(",
"self",
",",
"list_title",
")",
":",
"list_title_matches",
"=",
"[",
"]",
"# Generate every possible tuple of titles and safe the matched string in a list.",
"for",
"a",
",",
"b",
",",
"in",
"itertools",
".",
"combinations",
"(",
"list_title"... | Checks if there are any matches between extracted titles.
:param list_title: A list, the extracted titles saved in a list
:return: A list, the matched titles | [
"Checks",
"if",
"there",
"are",
"any",
"matches",
"between",
"extracted",
"titles",
"."
] | 731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9 | https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/pipeline/extractor/comparer/comparer_title.py#L7-L19 | train | 221,144 |
fhamborg/news-please | newsplease/pipeline/extractor/comparer/comparer_title.py | ComparerTitle.extract_match | def extract_match(self, list_title_matches):
"""Extract the title with the most matches from the list.
:param list_title_matches: A list, the extracted titles which match with others
:return: A string, the most frequently extracted title.
"""
# Create a set of the extracted titles
list_title_matches_set = set(list_title_matches)
list_title_count = []
# Count how often a title was matched and safe as tuple in list.
for match in list_title_matches_set:
list_title_count.append((list_title_matches.count(match), match))
if list_title_count and max(list_title_count)[0] != min(list_title_count)[0]:
return max(list_title_count)[1]
return None | python | def extract_match(self, list_title_matches):
"""Extract the title with the most matches from the list.
:param list_title_matches: A list, the extracted titles which match with others
:return: A string, the most frequently extracted title.
"""
# Create a set of the extracted titles
list_title_matches_set = set(list_title_matches)
list_title_count = []
# Count how often a title was matched and safe as tuple in list.
for match in list_title_matches_set:
list_title_count.append((list_title_matches.count(match), match))
if list_title_count and max(list_title_count)[0] != min(list_title_count)[0]:
return max(list_title_count)[1]
return None | [
"def",
"extract_match",
"(",
"self",
",",
"list_title_matches",
")",
":",
"# Create a set of the extracted titles",
"list_title_matches_set",
"=",
"set",
"(",
"list_title_matches",
")",
"list_title_count",
"=",
"[",
"]",
"# Count how often a title was matched and safe as tuple ... | Extract the title with the most matches from the list.
:param list_title_matches: A list, the extracted titles which match with others
:return: A string, the most frequently extracted title. | [
"Extract",
"the",
"title",
"with",
"the",
"most",
"matches",
"from",
"the",
"list",
"."
] | 731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9 | https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/pipeline/extractor/comparer/comparer_title.py#L21-L38 | train | 221,145 |
fhamborg/news-please | newsplease/pipeline/extractor/comparer/comparer_title.py | ComparerTitle.extract | def extract(self, item, list_article_candidate):
"""Compares the extracted titles.
:param item: The corresponding NewscrawlerItem
:param list_article_candidate: A list, the list of ArticleCandidate-Objects which have been extracted
:return: A string, the most likely title
"""
list_title = []
# Save every title from the candidates in list_title.
for article_candidate in list_article_candidate:
if article_candidate.title is not None:
list_title.append(article_candidate.title)
if not list_title:
return None
# Creates a list with matched titles
list_title_matches = self.find_matches(list_title)
# Extract title with the most matches
matched_title = self.extract_match(list_title_matches)
# Returns the matched title if there is one, else returns the shortest title
if matched_title:
return matched_title
else:
if list_title_matches:
return self.choose_shortest_title(set(list_title_matches))
else:
return self.choose_shortest_title(list_title) | python | def extract(self, item, list_article_candidate):
"""Compares the extracted titles.
:param item: The corresponding NewscrawlerItem
:param list_article_candidate: A list, the list of ArticleCandidate-Objects which have been extracted
:return: A string, the most likely title
"""
list_title = []
# Save every title from the candidates in list_title.
for article_candidate in list_article_candidate:
if article_candidate.title is not None:
list_title.append(article_candidate.title)
if not list_title:
return None
# Creates a list with matched titles
list_title_matches = self.find_matches(list_title)
# Extract title with the most matches
matched_title = self.extract_match(list_title_matches)
# Returns the matched title if there is one, else returns the shortest title
if matched_title:
return matched_title
else:
if list_title_matches:
return self.choose_shortest_title(set(list_title_matches))
else:
return self.choose_shortest_title(list_title) | [
"def",
"extract",
"(",
"self",
",",
"item",
",",
"list_article_candidate",
")",
":",
"list_title",
"=",
"[",
"]",
"# Save every title from the candidates in list_title.",
"for",
"article_candidate",
"in",
"list_article_candidate",
":",
"if",
"article_candidate",
".",
"t... | Compares the extracted titles.
:param item: The corresponding NewscrawlerItem
:param list_article_candidate: A list, the list of ArticleCandidate-Objects which have been extracted
:return: A string, the most likely title | [
"Compares",
"the",
"extracted",
"titles",
"."
] | 731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9 | https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/pipeline/extractor/comparer/comparer_title.py#L53-L82 | train | 221,146 |
fhamborg/news-please | newsplease/__main__.py | cli | def cli(cfg_file_path, resume, reset_elasticsearch, reset_mysql, reset_json, reset_all, no_confirm):
"A generic news crawler and extractor."
if reset_all:
reset_elasticsearch = True
reset_json = True
reset_mysql = True
if cfg_file_path and not cfg_file_path.endswith(os.path.sep):
cfg_file_path += os.path.sep
NewsPleaseLauncher(cfg_file_path, resume, reset_elasticsearch, reset_json, reset_mysql, no_confirm) | python | def cli(cfg_file_path, resume, reset_elasticsearch, reset_mysql, reset_json, reset_all, no_confirm):
"A generic news crawler and extractor."
if reset_all:
reset_elasticsearch = True
reset_json = True
reset_mysql = True
if cfg_file_path and not cfg_file_path.endswith(os.path.sep):
cfg_file_path += os.path.sep
NewsPleaseLauncher(cfg_file_path, resume, reset_elasticsearch, reset_json, reset_mysql, no_confirm) | [
"def",
"cli",
"(",
"cfg_file_path",
",",
"resume",
",",
"reset_elasticsearch",
",",
"reset_mysql",
",",
"reset_json",
",",
"reset_all",
",",
"no_confirm",
")",
":",
"if",
"reset_all",
":",
"reset_elasticsearch",
"=",
"True",
"reset_json",
"=",
"True",
"reset_mys... | A generic news crawler and extractor. | [
"A",
"generic",
"news",
"crawler",
"and",
"extractor",
"."
] | 731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9 | https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/__main__.py#L634-L645 | train | 221,147 |
fhamborg/news-please | newsplease/__main__.py | NewsPleaseLauncher.manage_crawlers | def manage_crawlers(self):
"""
Manages all crawlers, threads and limites the number of parallel
running threads.
"""
sites = self.json.get_site_objects()
for index, site in enumerate(sites):
if "daemonize" in site:
self.daemon_list.add_daemon(index, site["daemonize"])
elif "additional_rss_daemon" in site:
self.daemon_list.add_daemon(index,
site["additional_rss_daemon"])
self.crawler_list.append_item(index)
else:
self.crawler_list.append_item(index)
num_threads = self.cfg.section('Crawler')[
'number_of_parallel_crawlers']
if self.crawler_list.len() < num_threads:
num_threads = self.crawler_list.len()
for _ in range(num_threads):
thread = threading.Thread(target=self.manage_crawler,
args=(),
kwargs={})
self.threads.append(thread)
thread.start()
num_daemons = self.cfg.section('Crawler')['number_of_parallel_daemons']
if self.daemon_list.len() < num_daemons:
num_daemons = self.daemon_list.len()
for _ in range(num_daemons):
thread_daemonized = threading.Thread(target=self.manage_daemon,
args=(),
kwargs={})
self.threads_daemonized.append(thread_daemonized)
thread_daemonized.start()
while not self.shutdown:
try:
time.sleep(10)
# if we are not in daemon mode and no crawler is running any longer,
# all articles have been crawled and the tool can shut down
if self.daemon_list.len() == 0 and self.number_of_active_crawlers == 0:
self.graceful_stop()
break
except IOError:
# This exception will only occur on kill-process on windows.
# The process should be killed, thus this exception is
# irrelevant.
pass | python | def manage_crawlers(self):
"""
Manages all crawlers, threads and limites the number of parallel
running threads.
"""
sites = self.json.get_site_objects()
for index, site in enumerate(sites):
if "daemonize" in site:
self.daemon_list.add_daemon(index, site["daemonize"])
elif "additional_rss_daemon" in site:
self.daemon_list.add_daemon(index,
site["additional_rss_daemon"])
self.crawler_list.append_item(index)
else:
self.crawler_list.append_item(index)
num_threads = self.cfg.section('Crawler')[
'number_of_parallel_crawlers']
if self.crawler_list.len() < num_threads:
num_threads = self.crawler_list.len()
for _ in range(num_threads):
thread = threading.Thread(target=self.manage_crawler,
args=(),
kwargs={})
self.threads.append(thread)
thread.start()
num_daemons = self.cfg.section('Crawler')['number_of_parallel_daemons']
if self.daemon_list.len() < num_daemons:
num_daemons = self.daemon_list.len()
for _ in range(num_daemons):
thread_daemonized = threading.Thread(target=self.manage_daemon,
args=(),
kwargs={})
self.threads_daemonized.append(thread_daemonized)
thread_daemonized.start()
while not self.shutdown:
try:
time.sleep(10)
# if we are not in daemon mode and no crawler is running any longer,
# all articles have been crawled and the tool can shut down
if self.daemon_list.len() == 0 and self.number_of_active_crawlers == 0:
self.graceful_stop()
break
except IOError:
# This exception will only occur on kill-process on windows.
# The process should be killed, thus this exception is
# irrelevant.
pass | [
"def",
"manage_crawlers",
"(",
"self",
")",
":",
"sites",
"=",
"self",
".",
"json",
".",
"get_site_objects",
"(",
")",
"for",
"index",
",",
"site",
"in",
"enumerate",
"(",
"sites",
")",
":",
"if",
"\"daemonize\"",
"in",
"site",
":",
"self",
".",
"daemo... | Manages all crawlers, threads and limites the number of parallel
running threads. | [
"Manages",
"all",
"crawlers",
"threads",
"and",
"limites",
"the",
"number",
"of",
"parallel",
"running",
"threads",
"."
] | 731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9 | https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/__main__.py#L152-L204 | train | 221,148 |
fhamborg/news-please | newsplease/__main__.py | NewsPleaseLauncher.manage_crawler | def manage_crawler(self):
"""
Manages a normal crawler thread.
When a crawler finished, it loads another one if there are still sites
to crawl.
"""
index = True
self.number_of_active_crawlers += 1
while not self.shutdown and index is not None:
index = self.crawler_list.get_next_item()
if index is None:
self.number_of_active_crawlers -= 1
break
self.start_crawler(index) | python | def manage_crawler(self):
"""
Manages a normal crawler thread.
When a crawler finished, it loads another one if there are still sites
to crawl.
"""
index = True
self.number_of_active_crawlers += 1
while not self.shutdown and index is not None:
index = self.crawler_list.get_next_item()
if index is None:
self.number_of_active_crawlers -= 1
break
self.start_crawler(index) | [
"def",
"manage_crawler",
"(",
"self",
")",
":",
"index",
"=",
"True",
"self",
".",
"number_of_active_crawlers",
"+=",
"1",
"while",
"not",
"self",
".",
"shutdown",
"and",
"index",
"is",
"not",
"None",
":",
"index",
"=",
"self",
".",
"crawler_list",
".",
... | Manages a normal crawler thread.
When a crawler finished, it loads another one if there are still sites
to crawl. | [
"Manages",
"a",
"normal",
"crawler",
"thread",
"."
] | 731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9 | https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/__main__.py#L206-L221 | train | 221,149 |
fhamborg/news-please | newsplease/__main__.py | NewsPleaseLauncher.manage_daemon | def manage_daemon(self):
"""
Manages a daemonized crawler thread.
Once a crawler it finished, it loads the next one.
"""
while not self.shutdown:
# next scheduled daemon, tuple (time, index)
item = self.daemon_list.get_next_item()
cur = time.time()
pajama_time = item[0] - cur
if pajama_time > 0:
self.thread_event.wait(pajama_time)
if not self.shutdown:
self.start_crawler(item[1], daemonize=True) | python | def manage_daemon(self):
"""
Manages a daemonized crawler thread.
Once a crawler it finished, it loads the next one.
"""
while not self.shutdown:
# next scheduled daemon, tuple (time, index)
item = self.daemon_list.get_next_item()
cur = time.time()
pajama_time = item[0] - cur
if pajama_time > 0:
self.thread_event.wait(pajama_time)
if not self.shutdown:
self.start_crawler(item[1], daemonize=True) | [
"def",
"manage_daemon",
"(",
"self",
")",
":",
"while",
"not",
"self",
".",
"shutdown",
":",
"# next scheduled daemon, tuple (time, index)",
"item",
"=",
"self",
".",
"daemon_list",
".",
"get_next_item",
"(",
")",
"cur",
"=",
"time",
".",
"time",
"(",
")",
"... | Manages a daemonized crawler thread.
Once a crawler it finished, it loads the next one. | [
"Manages",
"a",
"daemonized",
"crawler",
"thread",
"."
] | 731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9 | https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/__main__.py#L223-L237 | train | 221,150 |
fhamborg/news-please | newsplease/__main__.py | NewsPleaseLauncher.start_crawler | def start_crawler(self, index, daemonize=False):
"""
Starts a crawler from the input-array.
:param int index: The array-index of the site
:param int daemonize: Bool if the crawler is supposed to be daemonized
(to delete the JOBDIR)
"""
call_process = [sys.executable,
self.__single_crawler,
self.cfg_file_path,
self.json_file_path,
"%s" % index,
"%s" % self.shall_resume,
"%s" % daemonize]
self.log.debug("Calling Process: %s", call_process)
crawler = Popen(call_process,
stderr=None,
stdout=None)
crawler.communicate()
self.crawlers.append(crawler) | python | def start_crawler(self, index, daemonize=False):
"""
Starts a crawler from the input-array.
:param int index: The array-index of the site
:param int daemonize: Bool if the crawler is supposed to be daemonized
(to delete the JOBDIR)
"""
call_process = [sys.executable,
self.__single_crawler,
self.cfg_file_path,
self.json_file_path,
"%s" % index,
"%s" % self.shall_resume,
"%s" % daemonize]
self.log.debug("Calling Process: %s", call_process)
crawler = Popen(call_process,
stderr=None,
stdout=None)
crawler.communicate()
self.crawlers.append(crawler) | [
"def",
"start_crawler",
"(",
"self",
",",
"index",
",",
"daemonize",
"=",
"False",
")",
":",
"call_process",
"=",
"[",
"sys",
".",
"executable",
",",
"self",
".",
"__single_crawler",
",",
"self",
".",
"cfg_file_path",
",",
"self",
".",
"json_file_path",
",... | Starts a crawler from the input-array.
:param int index: The array-index of the site
:param int daemonize: Bool if the crawler is supposed to be daemonized
(to delete the JOBDIR) | [
"Starts",
"a",
"crawler",
"from",
"the",
"input",
"-",
"array",
"."
] | 731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9 | https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/__main__.py#L239-L261 | train | 221,151 |
fhamborg/news-please | newsplease/__main__.py | NewsPleaseLauncher.graceful_stop | def graceful_stop(self, signal_number=None, stack_frame=None):
"""
This function will be called when a graceful-stop is initiated.
"""
stop_msg = "Hard" if self.shutdown else "Graceful"
if signal_number is None:
self.log.info("%s stop called manually. "
"Shutting down.", stop_msg)
else:
self.log.info("%s stop called by signal #%s. Shutting down."
"Stack Frame: %s",
stop_msg, signal_number, stack_frame)
self.shutdown = True
self.crawler_list.stop()
self.daemon_list.stop()
self.thread_event.set()
return True | python | def graceful_stop(self, signal_number=None, stack_frame=None):
"""
This function will be called when a graceful-stop is initiated.
"""
stop_msg = "Hard" if self.shutdown else "Graceful"
if signal_number is None:
self.log.info("%s stop called manually. "
"Shutting down.", stop_msg)
else:
self.log.info("%s stop called by signal #%s. Shutting down."
"Stack Frame: %s",
stop_msg, signal_number, stack_frame)
self.shutdown = True
self.crawler_list.stop()
self.daemon_list.stop()
self.thread_event.set()
return True | [
"def",
"graceful_stop",
"(",
"self",
",",
"signal_number",
"=",
"None",
",",
"stack_frame",
"=",
"None",
")",
":",
"stop_msg",
"=",
"\"Hard\"",
"if",
"self",
".",
"shutdown",
"else",
"\"Graceful\"",
"if",
"signal_number",
"is",
"None",
":",
"self",
".",
"l... | This function will be called when a graceful-stop is initiated. | [
"This",
"function",
"will",
"be",
"called",
"when",
"a",
"graceful",
"-",
"stop",
"is",
"initiated",
"."
] | 731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9 | https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/__main__.py#L263-L279 | train | 221,152 |
fhamborg/news-please | newsplease/helper_classes/savepath_parser.py | SavepathParser.time_replacer | def time_replacer(match, timestamp):
"""
Transforms the timestamp to the format the regex match determines.
:param str match: the regex match
:param time timestamp: the timestamp to format with match.group(1)
:return str: the timestamp formated with strftime the way the
regex-match within the first set of braces defines
"""
# match.group(0) = entire match
# match.group(1) = match in braces #1
return time.strftime(match.group(1), time.gmtime(timestamp)) | python | def time_replacer(match, timestamp):
"""
Transforms the timestamp to the format the regex match determines.
:param str match: the regex match
:param time timestamp: the timestamp to format with match.group(1)
:return str: the timestamp formated with strftime the way the
regex-match within the first set of braces defines
"""
# match.group(0) = entire match
# match.group(1) = match in braces #1
return time.strftime(match.group(1), time.gmtime(timestamp)) | [
"def",
"time_replacer",
"(",
"match",
",",
"timestamp",
")",
":",
"# match.group(0) = entire match",
"# match.group(1) = match in braces #1",
"return",
"time",
".",
"strftime",
"(",
"match",
".",
"group",
"(",
"1",
")",
",",
"time",
".",
"gmtime",
"(",
"timestamp"... | Transforms the timestamp to the format the regex match determines.
:param str match: the regex match
:param time timestamp: the timestamp to format with match.group(1)
:return str: the timestamp formated with strftime the way the
regex-match within the first set of braces defines | [
"Transforms",
"the",
"timestamp",
"to",
"the",
"format",
"the",
"regex",
"match",
"determines",
"."
] | 731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9 | https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/helper_classes/savepath_parser.py#L76-L87 | train | 221,153 |
fhamborg/news-please | newsplease/helper_classes/savepath_parser.py | SavepathParser.append_md5_if_too_long | def append_md5_if_too_long(component, size):
"""
Trims the component if it is longer than size and appends the
component's md5. Total must be of length size.
:param str component: component to work on
:param int size: component's size limit
:return str: component and appended md5 trimmed to be of length size
"""
if len(component) > size:
if size > 32:
component_size = size - 32 - 1
return "%s_%s" % (component[:component_size],
hashlib.md5(component.encode('utf-8')).hexdigest())
else:
return hashlib.md5(component.encode('utf-8')).hexdigest()[:size]
else:
return component | python | def append_md5_if_too_long(component, size):
"""
Trims the component if it is longer than size and appends the
component's md5. Total must be of length size.
:param str component: component to work on
:param int size: component's size limit
:return str: component and appended md5 trimmed to be of length size
"""
if len(component) > size:
if size > 32:
component_size = size - 32 - 1
return "%s_%s" % (component[:component_size],
hashlib.md5(component.encode('utf-8')).hexdigest())
else:
return hashlib.md5(component.encode('utf-8')).hexdigest()[:size]
else:
return component | [
"def",
"append_md5_if_too_long",
"(",
"component",
",",
"size",
")",
":",
"if",
"len",
"(",
"component",
")",
">",
"size",
":",
"if",
"size",
">",
"32",
":",
"component_size",
"=",
"size",
"-",
"32",
"-",
"1",
"return",
"\"%s_%s\"",
"%",
"(",
"componen... | Trims the component if it is longer than size and appends the
component's md5. Total must be of length size.
:param str component: component to work on
:param int size: component's size limit
:return str: component and appended md5 trimmed to be of length size | [
"Trims",
"the",
"component",
"if",
"it",
"is",
"longer",
"than",
"size",
"and",
"appends",
"the",
"component",
"s",
"md5",
".",
"Total",
"must",
"be",
"of",
"length",
"size",
"."
] | 731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9 | https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/helper_classes/savepath_parser.py#L90-L108 | train | 221,154 |
fhamborg/news-please | newsplease/helper_classes/savepath_parser.py | SavepathParser.remove_not_allowed_chars | def remove_not_allowed_chars(savepath):
"""
Removes invalid filepath characters from the savepath.
:param str savepath: the savepath to work on
:return str: the savepath without invalid filepath characters
"""
split_savepath = os.path.splitdrive(savepath)
# https://msdn.microsoft.com/en-us/library/aa365247.aspx
savepath_without_invalid_chars = re.sub(r'<|>|:|\"|\||\?|\*', '_',
split_savepath[1])
return split_savepath[0] + savepath_without_invalid_chars | python | def remove_not_allowed_chars(savepath):
"""
Removes invalid filepath characters from the savepath.
:param str savepath: the savepath to work on
:return str: the savepath without invalid filepath characters
"""
split_savepath = os.path.splitdrive(savepath)
# https://msdn.microsoft.com/en-us/library/aa365247.aspx
savepath_without_invalid_chars = re.sub(r'<|>|:|\"|\||\?|\*', '_',
split_savepath[1])
return split_savepath[0] + savepath_without_invalid_chars | [
"def",
"remove_not_allowed_chars",
"(",
"savepath",
")",
":",
"split_savepath",
"=",
"os",
".",
"path",
".",
"splitdrive",
"(",
"savepath",
")",
"# https://msdn.microsoft.com/en-us/library/aa365247.aspx",
"savepath_without_invalid_chars",
"=",
"re",
".",
"sub",
"(",
"r'... | Removes invalid filepath characters from the savepath.
:param str savepath: the savepath to work on
:return str: the savepath without invalid filepath characters | [
"Removes",
"invalid",
"filepath",
"characters",
"from",
"the",
"savepath",
"."
] | 731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9 | https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/helper_classes/savepath_parser.py#L219-L230 | train | 221,155 |
fhamborg/news-please | newsplease/helper_classes/savepath_parser.py | SavepathParser.get_abs_path_static | def get_abs_path_static(savepath, relative_to_path):
"""
Figures out the savepath's absolute version.
:param str savepath: the savepath to return an absolute version of
:param str relative_to_path: the file path this savepath should be
relative to
:return str: absolute version of savepath
"""
if os.path.isabs(savepath):
return os.path.abspath(savepath)
else:
return os.path.abspath(
os.path.join(relative_to_path, (savepath))
) | python | def get_abs_path_static(savepath, relative_to_path):
"""
Figures out the savepath's absolute version.
:param str savepath: the savepath to return an absolute version of
:param str relative_to_path: the file path this savepath should be
relative to
:return str: absolute version of savepath
"""
if os.path.isabs(savepath):
return os.path.abspath(savepath)
else:
return os.path.abspath(
os.path.join(relative_to_path, (savepath))
) | [
"def",
"get_abs_path_static",
"(",
"savepath",
",",
"relative_to_path",
")",
":",
"if",
"os",
".",
"path",
".",
"isabs",
"(",
"savepath",
")",
":",
"return",
"os",
".",
"path",
".",
"abspath",
"(",
"savepath",
")",
"else",
":",
"return",
"os",
".",
"pa... | Figures out the savepath's absolute version.
:param str savepath: the savepath to return an absolute version of
:param str relative_to_path: the file path this savepath should be
relative to
:return str: absolute version of savepath | [
"Figures",
"out",
"the",
"savepath",
"s",
"absolute",
"version",
"."
] | 731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9 | https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/helper_classes/savepath_parser.py#L233-L247 | train | 221,156 |
fhamborg/news-please | newsplease/helper_classes/savepath_parser.py | SavepathParser.get_base_path | def get_base_path(path):
"""
Determines the longest possible beginning of a path that does not
contain a %-Symbol.
/this/is/a/pa%th would become /this/is/a
:param str path: the path to get the base from
:return: the path's base
"""
if "%" not in path:
return path
path = os.path.split(path)[0]
while "%" in path:
path = os.path.split(path)[0]
return path | python | def get_base_path(path):
"""
Determines the longest possible beginning of a path that does not
contain a %-Symbol.
/this/is/a/pa%th would become /this/is/a
:param str path: the path to get the base from
:return: the path's base
"""
if "%" not in path:
return path
path = os.path.split(path)[0]
while "%" in path:
path = os.path.split(path)[0]
return path | [
"def",
"get_base_path",
"(",
"path",
")",
":",
"if",
"\"%\"",
"not",
"in",
"path",
":",
"return",
"path",
"path",
"=",
"os",
".",
"path",
".",
"split",
"(",
"path",
")",
"[",
"0",
"]",
"while",
"\"%\"",
"in",
"path",
":",
"path",
"=",
"os",
".",
... | Determines the longest possible beginning of a path that does not
contain a %-Symbol.
/this/is/a/pa%th would become /this/is/a
:param str path: the path to get the base from
:return: the path's base | [
"Determines",
"the",
"longest",
"possible",
"beginning",
"of",
"a",
"path",
"that",
"does",
"not",
"contain",
"a",
"%",
"-",
"Symbol",
"."
] | 731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9 | https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/helper_classes/savepath_parser.py#L260-L278 | train | 221,157 |
fhamborg/news-please | newsplease/helper_classes/savepath_parser.py | SavepathParser.get_max_url_file_name_length | def get_max_url_file_name_length(savepath):
"""
Determines the max length for any max... parts.
:param str savepath: absolute savepath to work on
:return: max. allowed number of chars for any of the max... parts
"""
number_occurrences = savepath.count('%max_url_file_name')
number_occurrences += savepath.count('%appendmd5_max_url_file_name')
savepath_copy = savepath
size_without_max_url_file_name = len(
savepath_copy.replace('%max_url_file_name', '')
.replace('%appendmd5_max_url_file_name', '')
)
# Windows: max file path length is 260 characters including
# NULL (string end)
max_size = 260 - 1 - size_without_max_url_file_name
max_size_per_occurrence = max_size / number_occurrences
return max_size_per_occurrence | python | def get_max_url_file_name_length(savepath):
"""
Determines the max length for any max... parts.
:param str savepath: absolute savepath to work on
:return: max. allowed number of chars for any of the max... parts
"""
number_occurrences = savepath.count('%max_url_file_name')
number_occurrences += savepath.count('%appendmd5_max_url_file_name')
savepath_copy = savepath
size_without_max_url_file_name = len(
savepath_copy.replace('%max_url_file_name', '')
.replace('%appendmd5_max_url_file_name', '')
)
# Windows: max file path length is 260 characters including
# NULL (string end)
max_size = 260 - 1 - size_without_max_url_file_name
max_size_per_occurrence = max_size / number_occurrences
return max_size_per_occurrence | [
"def",
"get_max_url_file_name_length",
"(",
"savepath",
")",
":",
"number_occurrences",
"=",
"savepath",
".",
"count",
"(",
"'%max_url_file_name'",
")",
"number_occurrences",
"+=",
"savepath",
".",
"count",
"(",
"'%appendmd5_max_url_file_name'",
")",
"savepath_copy",
"=... | Determines the max length for any max... parts.
:param str savepath: absolute savepath to work on
:return: max. allowed number of chars for any of the max... parts | [
"Determines",
"the",
"max",
"length",
"for",
"any",
"max",
"...",
"parts",
"."
] | 731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9 | https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/helper_classes/savepath_parser.py#L295-L316 | train | 221,158 |
fhamborg/news-please | newsplease/pipeline/extractor/extractors/readability_extractor.py | ReadabilityExtractor.extract | def extract(self, item):
"""Creates an readability document and returns an ArticleCandidate containing article title and text.
:param item: A NewscrawlerItem to parse.
:return: ArticleCandidate containing the recovered article data.
"""
doc = Document(deepcopy(item['spider_response'].body))
description = doc.summary()
article_candidate = ArticleCandidate()
article_candidate.extractor = self._name
article_candidate.title = doc.short_title()
article_candidate.description = description
article_candidate.text = self._text(item)
article_candidate.topimage = self._topimage(item)
article_candidate.author = self._author(item)
article_candidate.publish_date = self._publish_date(item)
article_candidate.language = self._language(item)
return article_candidate | python | def extract(self, item):
"""Creates an readability document and returns an ArticleCandidate containing article title and text.
:param item: A NewscrawlerItem to parse.
:return: ArticleCandidate containing the recovered article data.
"""
doc = Document(deepcopy(item['spider_response'].body))
description = doc.summary()
article_candidate = ArticleCandidate()
article_candidate.extractor = self._name
article_candidate.title = doc.short_title()
article_candidate.description = description
article_candidate.text = self._text(item)
article_candidate.topimage = self._topimage(item)
article_candidate.author = self._author(item)
article_candidate.publish_date = self._publish_date(item)
article_candidate.language = self._language(item)
return article_candidate | [
"def",
"extract",
"(",
"self",
",",
"item",
")",
":",
"doc",
"=",
"Document",
"(",
"deepcopy",
"(",
"item",
"[",
"'spider_response'",
"]",
".",
"body",
")",
")",
"description",
"=",
"doc",
".",
"summary",
"(",
")",
"article_candidate",
"=",
"ArticleCandi... | Creates an readability document and returns an ArticleCandidate containing article title and text.
:param item: A NewscrawlerItem to parse.
:return: ArticleCandidate containing the recovered article data. | [
"Creates",
"an",
"readability",
"document",
"and",
"returns",
"an",
"ArticleCandidate",
"containing",
"article",
"title",
"and",
"text",
"."
] | 731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9 | https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/pipeline/extractor/extractors/readability_extractor.py#L18-L38 | train | 221,159 |
fhamborg/news-please | newsplease/pipeline/extractor/article_extractor.py | Extractor.extract | def extract(self, item):
"""Runs the HTML-response trough a list of initialized extractors, a cleaner and compares the results.
:param item: NewscrawlerItem to be processed.
:return: An updated NewscrawlerItem including the results of the extraction
"""
article_candidates = []
for extractor in self.extractor_list:
article_candidates.append(extractor.extract(item))
article_candidates = self.cleaner.clean(article_candidates)
article = self.comparer.compare(item, article_candidates)
item['article_title'] = article.title
item['article_description'] = article.description
item['article_text'] = article.text
item['article_image'] = article.topimage
item['article_author'] = article.author
item['article_publish_date'] = article.publish_date
item['article_language'] = article.language
return item | python | def extract(self, item):
"""Runs the HTML-response trough a list of initialized extractors, a cleaner and compares the results.
:param item: NewscrawlerItem to be processed.
:return: An updated NewscrawlerItem including the results of the extraction
"""
article_candidates = []
for extractor in self.extractor_list:
article_candidates.append(extractor.extract(item))
article_candidates = self.cleaner.clean(article_candidates)
article = self.comparer.compare(item, article_candidates)
item['article_title'] = article.title
item['article_description'] = article.description
item['article_text'] = article.text
item['article_image'] = article.topimage
item['article_author'] = article.author
item['article_publish_date'] = article.publish_date
item['article_language'] = article.language
return item | [
"def",
"extract",
"(",
"self",
",",
"item",
")",
":",
"article_candidates",
"=",
"[",
"]",
"for",
"extractor",
"in",
"self",
".",
"extractor_list",
":",
"article_candidates",
".",
"append",
"(",
"extractor",
".",
"extract",
"(",
"item",
")",
")",
"article_... | Runs the HTML-response trough a list of initialized extractors, a cleaner and compares the results.
:param item: NewscrawlerItem to be processed.
:return: An updated NewscrawlerItem including the results of the extraction | [
"Runs",
"the",
"HTML",
"-",
"response",
"trough",
"a",
"list",
"of",
"initialized",
"extractors",
"a",
"cleaner",
"and",
"compares",
"the",
"results",
"."
] | 731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9 | https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/pipeline/extractor/article_extractor.py#L43-L66 | train | 221,160 |
fhamborg/news-please | newsplease/helper_classes/parse_crawler.py | ParseCrawler.pass_to_pipeline_if_article | def pass_to_pipeline_if_article(
self,
response,
source_domain,
original_url,
rss_title=None
):
"""
Responsible for passing a NewscrawlerItem to the pipeline if the
response contains an article.
:param obj response: the scrapy response to work on
:param str source_domain: the response's domain as set for the crawler
:param str original_url: the url set in the json file
:param str rss_title: the title extracted by an rssCrawler
:return NewscrawlerItem: NewscrawlerItem to pass to the pipeline
"""
if self.helper.heuristics.is_article(response, original_url):
return self.pass_to_pipeline(
response, source_domain, rss_title=None) | python | def pass_to_pipeline_if_article(
self,
response,
source_domain,
original_url,
rss_title=None
):
"""
Responsible for passing a NewscrawlerItem to the pipeline if the
response contains an article.
:param obj response: the scrapy response to work on
:param str source_domain: the response's domain as set for the crawler
:param str original_url: the url set in the json file
:param str rss_title: the title extracted by an rssCrawler
:return NewscrawlerItem: NewscrawlerItem to pass to the pipeline
"""
if self.helper.heuristics.is_article(response, original_url):
return self.pass_to_pipeline(
response, source_domain, rss_title=None) | [
"def",
"pass_to_pipeline_if_article",
"(",
"self",
",",
"response",
",",
"source_domain",
",",
"original_url",
",",
"rss_title",
"=",
"None",
")",
":",
"if",
"self",
".",
"helper",
".",
"heuristics",
".",
"is_article",
"(",
"response",
",",
"original_url",
")"... | Responsible for passing a NewscrawlerItem to the pipeline if the
response contains an article.
:param obj response: the scrapy response to work on
:param str source_domain: the response's domain as set for the crawler
:param str original_url: the url set in the json file
:param str rss_title: the title extracted by an rssCrawler
:return NewscrawlerItem: NewscrawlerItem to pass to the pipeline | [
"Responsible",
"for",
"passing",
"a",
"NewscrawlerItem",
"to",
"the",
"pipeline",
"if",
"the",
"response",
"contains",
"an",
"article",
"."
] | 731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9 | https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/helper_classes/parse_crawler.py#L27-L46 | train | 221,161 |
fhamborg/news-please | newsplease/helper_classes/parse_crawler.py | ParseCrawler.recursive_requests | def recursive_requests(response, spider, ignore_regex='',
ignore_file_extensions='pdf'):
"""
Manages recursive requests.
Determines urls to recursivly crawl if they do not match certain file
extensions and do not match a certain regex set in the config file.
:param obj response: the response to extract any urls from
:param obj spider: the crawler the callback should be called on
:param str ignore_regex: a regex that should that any extracted url
shouldn't match
:param str ignore_file_extensions: a regex of file extensions that the
end of any url may not match
:return list: Scrapy Requests
"""
# Recursivly crawl all URLs on the current page
# that do not point to irrelevant file types
# or contain any of the given ignore_regex regexes
return [
scrapy.Request(response.urljoin(href), callback=spider.parse)
for href in response.css("a::attr('href')").extract() if re.match(
r'.*\.' + ignore_file_extensions +
r'$', response.urljoin(href), re.IGNORECASE
) is None
and len(re.match(ignore_regex, response.urljoin(href)).group(0)) == 0
] | python | def recursive_requests(response, spider, ignore_regex='',
ignore_file_extensions='pdf'):
"""
Manages recursive requests.
Determines urls to recursivly crawl if they do not match certain file
extensions and do not match a certain regex set in the config file.
:param obj response: the response to extract any urls from
:param obj spider: the crawler the callback should be called on
:param str ignore_regex: a regex that should that any extracted url
shouldn't match
:param str ignore_file_extensions: a regex of file extensions that the
end of any url may not match
:return list: Scrapy Requests
"""
# Recursivly crawl all URLs on the current page
# that do not point to irrelevant file types
# or contain any of the given ignore_regex regexes
return [
scrapy.Request(response.urljoin(href), callback=spider.parse)
for href in response.css("a::attr('href')").extract() if re.match(
r'.*\.' + ignore_file_extensions +
r'$', response.urljoin(href), re.IGNORECASE
) is None
and len(re.match(ignore_regex, response.urljoin(href)).group(0)) == 0
] | [
"def",
"recursive_requests",
"(",
"response",
",",
"spider",
",",
"ignore_regex",
"=",
"''",
",",
"ignore_file_extensions",
"=",
"'pdf'",
")",
":",
"# Recursivly crawl all URLs on the current page",
"# that do not point to irrelevant file types",
"# or contain any of the given ig... | Manages recursive requests.
Determines urls to recursivly crawl if they do not match certain file
extensions and do not match a certain regex set in the config file.
:param obj response: the response to extract any urls from
:param obj spider: the crawler the callback should be called on
:param str ignore_regex: a regex that should that any extracted url
shouldn't match
:param str ignore_file_extensions: a regex of file extensions that the
end of any url may not match
:return list: Scrapy Requests | [
"Manages",
"recursive",
"requests",
".",
"Determines",
"urls",
"to",
"recursivly",
"crawl",
"if",
"they",
"do",
"not",
"match",
"certain",
"file",
"extensions",
"and",
"do",
"not",
"match",
"a",
"certain",
"regex",
"set",
"in",
"the",
"config",
"file",
"."
] | 731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9 | https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/helper_classes/parse_crawler.py#L87-L112 | train | 221,162 |
fhamborg/news-please | newsplease/helper_classes/parse_crawler.py | ParseCrawler.content_type | def content_type(self, response):
"""
Ensures the response is of type
:param obj response: The scrapy response
:return bool: Determines wether the response is of the correct type
"""
if not re_html.match(response.headers.get('Content-Type').decode('utf-8')):
self.log.warn(
"Dropped: %s's content is not of type "
"text/html but %s", response.url, response.headers.get('Content-Type')
)
return False
else:
return True | python | def content_type(self, response):
"""
Ensures the response is of type
:param obj response: The scrapy response
:return bool: Determines wether the response is of the correct type
"""
if not re_html.match(response.headers.get('Content-Type').decode('utf-8')):
self.log.warn(
"Dropped: %s's content is not of type "
"text/html but %s", response.url, response.headers.get('Content-Type')
)
return False
else:
return True | [
"def",
"content_type",
"(",
"self",
",",
"response",
")",
":",
"if",
"not",
"re_html",
".",
"match",
"(",
"response",
".",
"headers",
".",
"get",
"(",
"'Content-Type'",
")",
".",
"decode",
"(",
"'utf-8'",
")",
")",
":",
"self",
".",
"log",
".",
"warn... | Ensures the response is of type
:param obj response: The scrapy response
:return bool: Determines wether the response is of the correct type | [
"Ensures",
"the",
"response",
"is",
"of",
"type"
] | 731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9 | https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/helper_classes/parse_crawler.py#L114-L128 | train | 221,163 |
fhamborg/news-please | newsplease/pipeline/extractor/comparer/comparer_topimage.py | ComparerTopimage.extract | def extract(self, item, list_article_candidate):
"""Compares the extracted top images.
:param item: The corresponding NewscrawlerItem
:param list_article_candidate: A list, the list of ArticleCandidate-Objects which have been extracted
:return: A string (url), the most likely top image
"""
list_topimage = []
for article_candidate in list_article_candidate:
if article_candidate.topimage is not None:
# Changes a relative path of an image to the absolute path of the given url.
article_candidate.topimage = self.image_absoulte_path(item['url'], article_candidate.topimage)
list_topimage.append((article_candidate.topimage, article_candidate.extractor))
# If there is no value in the list, return None.
if len(list_topimage) == 0:
return None
# If there are more options than one, return the result from newspaper.
list_newspaper = [x for x in list_topimage if x[1] == "newspaper"]
if len(list_newspaper) == 0:
# If there is no topimage extracted by newspaper, return the first result of list_topimage.
return list_topimage[0][0]
else:
return list_newspaper[0][0] | python | def extract(self, item, list_article_candidate):
"""Compares the extracted top images.
:param item: The corresponding NewscrawlerItem
:param list_article_candidate: A list, the list of ArticleCandidate-Objects which have been extracted
:return: A string (url), the most likely top image
"""
list_topimage = []
for article_candidate in list_article_candidate:
if article_candidate.topimage is not None:
# Changes a relative path of an image to the absolute path of the given url.
article_candidate.topimage = self.image_absoulte_path(item['url'], article_candidate.topimage)
list_topimage.append((article_candidate.topimage, article_candidate.extractor))
# If there is no value in the list, return None.
if len(list_topimage) == 0:
return None
# If there are more options than one, return the result from newspaper.
list_newspaper = [x for x in list_topimage if x[1] == "newspaper"]
if len(list_newspaper) == 0:
# If there is no topimage extracted by newspaper, return the first result of list_topimage.
return list_topimage[0][0]
else:
return list_newspaper[0][0] | [
"def",
"extract",
"(",
"self",
",",
"item",
",",
"list_article_candidate",
")",
":",
"list_topimage",
"=",
"[",
"]",
"for",
"article_candidate",
"in",
"list_article_candidate",
":",
"if",
"article_candidate",
".",
"topimage",
"is",
"not",
"None",
":",
"# Changes... | Compares the extracted top images.
:param item: The corresponding NewscrawlerItem
:param list_article_candidate: A list, the list of ArticleCandidate-Objects which have been extracted
:return: A string (url), the most likely top image | [
"Compares",
"the",
"extracted",
"top",
"images",
"."
] | 731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9 | https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/pipeline/extractor/comparer/comparer_topimage.py#L15-L41 | train | 221,164 |
fhamborg/news-please | newsplease/pipeline/extractor/comparer/comparer_text.py | ComparerText.extract | def extract(self, item, article_candidate_list):
"""Compares the extracted texts.
:param item: The corresponding NewscrawlerItem
:param article_candidate_list: A list, the list of ArticleCandidate-Objects which have been extracted
:return: A string, the most likely text
"""
list_text = []
# The minimal number of words a text needs to have
min_number_words = 15
# The texts of the article candidates and the respective extractors are saved in a tuple in list_text.
for article_candidate in article_candidate_list:
if article_candidate.text != None:
list_text.append((article_candidate.text, article_candidate.extractor))
# Remove texts that are shorter than min_number_words.
for text_tuple in list_text:
if len(text_tuple[0].split()) < min_number_words:
list_text.remove(text_tuple)
# If there is no value in the list, return None.
if len(list_text) == 0:
return None
# If there is only one solution, return it.
if len(list_text) < 2:
return list_text[0][0]
else:
# If there is more than one solution, do the following:
# Create a list which holds triple of the score and the two extractors
list_score = []
# Compare every text with all other texts at least once
for a, b, in itertools.combinations(list_text, 2):
# Create sets from the texts
set_a = set(a[0].split())
set_b = set(b[0].split())
symmetric_difference_a_b = set_a ^ set_b
intersection_a_b = set_a & set_b
# Replace 0 with -1 in order to elude division by zero
if intersection_a_b == 0:
intersection_a_b = -1
# Create the score. It divides the number of words which are not in both texts by the number of words which
# are in both texts and subtracts the result from 1. The closer to 1 the more similiar they are.
score = 1 - ((len(symmetric_difference_a_b)) / (2 * len(intersection_a_b)))
list_score.append((score, a[1], b[1]))
# Find out which is the highest score
best_score = max(list_score, key=lambda item: item[0])
# If one of the solutions is newspaper return it
if "newspaper" in best_score:
return (list(filter(lambda x: x[1] == "newspaper", list_text))[0][0])
else:
# If not, return the text that is longer
# A list that holds the extracted texts and their extractors which were most similar
top_candidates = []
for tuple in list_text:
if tuple[1] == best_score[1] or tuple[1] == best_score[2]:
top_candidates.append(tuple)
if len(top_candidates[0][0]) > len(top_candidates[1][0]):
return (top_candidates[0][0])
else:
return (top_candidates[1][0]) | python | def extract(self, item, article_candidate_list):
"""Compares the extracted texts.
:param item: The corresponding NewscrawlerItem
:param article_candidate_list: A list, the list of ArticleCandidate-Objects which have been extracted
:return: A string, the most likely text
"""
list_text = []
# The minimal number of words a text needs to have
min_number_words = 15
# The texts of the article candidates and the respective extractors are saved in a tuple in list_text.
for article_candidate in article_candidate_list:
if article_candidate.text != None:
list_text.append((article_candidate.text, article_candidate.extractor))
# Remove texts that are shorter than min_number_words.
for text_tuple in list_text:
if len(text_tuple[0].split()) < min_number_words:
list_text.remove(text_tuple)
# If there is no value in the list, return None.
if len(list_text) == 0:
return None
# If there is only one solution, return it.
if len(list_text) < 2:
return list_text[0][0]
else:
# If there is more than one solution, do the following:
# Create a list which holds triple of the score and the two extractors
list_score = []
# Compare every text with all other texts at least once
for a, b, in itertools.combinations(list_text, 2):
# Create sets from the texts
set_a = set(a[0].split())
set_b = set(b[0].split())
symmetric_difference_a_b = set_a ^ set_b
intersection_a_b = set_a & set_b
# Replace 0 with -1 in order to elude division by zero
if intersection_a_b == 0:
intersection_a_b = -1
# Create the score. It divides the number of words which are not in both texts by the number of words which
# are in both texts and subtracts the result from 1. The closer to 1 the more similiar they are.
score = 1 - ((len(symmetric_difference_a_b)) / (2 * len(intersection_a_b)))
list_score.append((score, a[1], b[1]))
# Find out which is the highest score
best_score = max(list_score, key=lambda item: item[0])
# If one of the solutions is newspaper return it
if "newspaper" in best_score:
return (list(filter(lambda x: x[1] == "newspaper", list_text))[0][0])
else:
# If not, return the text that is longer
# A list that holds the extracted texts and their extractors which were most similar
top_candidates = []
for tuple in list_text:
if tuple[1] == best_score[1] or tuple[1] == best_score[2]:
top_candidates.append(tuple)
if len(top_candidates[0][0]) > len(top_candidates[1][0]):
return (top_candidates[0][0])
else:
return (top_candidates[1][0]) | [
"def",
"extract",
"(",
"self",
",",
"item",
",",
"article_candidate_list",
")",
":",
"list_text",
"=",
"[",
"]",
"# The minimal number of words a text needs to have",
"min_number_words",
"=",
"15",
"# The texts of the article candidates and the respective extractors are saved in ... | Compares the extracted texts.
:param item: The corresponding NewscrawlerItem
:param article_candidate_list: A list, the list of ArticleCandidate-Objects which have been extracted
:return: A string, the most likely text | [
"Compares",
"the",
"extracted",
"texts",
"."
] | 731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9 | https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/pipeline/extractor/comparer/comparer_text.py#L7-L79 | train | 221,165 |
fhamborg/news-please | newsplease/helper_classes/sub_classes/heuristics_manager.py | HeuristicsManager.is_article | def is_article(self, response, url):
"""
Tests if the given response is an article by calling and checking
the heuristics set in config.cfg and sitelist.json
:param obj response: The response of the site.
:param str url: The base_url (needed to get the site-specific config
from the JSON-file)
:return bool: true if the heuristics match the site as an article
"""
site = self.__sites_object[url]
heuristics = self.__get_enabled_heuristics(url)
self.log.info("Checking site: %s", response.url)
statement = self.__get_condition(url)
self.log.debug("Condition (original): %s", statement)
for heuristic, condition in heuristics.items():
heuristic_func = getattr(self, heuristic)
result = heuristic_func(response, site)
check = self.__evaluate_result(result, condition)
statement = re.sub(r"\b%s\b" % heuristic, str(check), statement)
self.log.debug("Checking heuristic (%s)"
" result (%s) on condition (%s): %s",
heuristic, result, condition, check)
self.log.debug("Condition (evaluated): %s", statement)
is_article = eval(statement)
self.log.debug("Article accepted: %s", is_article)
return is_article | python | def is_article(self, response, url):
"""
Tests if the given response is an article by calling and checking
the heuristics set in config.cfg and sitelist.json
:param obj response: The response of the site.
:param str url: The base_url (needed to get the site-specific config
from the JSON-file)
:return bool: true if the heuristics match the site as an article
"""
site = self.__sites_object[url]
heuristics = self.__get_enabled_heuristics(url)
self.log.info("Checking site: %s", response.url)
statement = self.__get_condition(url)
self.log.debug("Condition (original): %s", statement)
for heuristic, condition in heuristics.items():
heuristic_func = getattr(self, heuristic)
result = heuristic_func(response, site)
check = self.__evaluate_result(result, condition)
statement = re.sub(r"\b%s\b" % heuristic, str(check), statement)
self.log.debug("Checking heuristic (%s)"
" result (%s) on condition (%s): %s",
heuristic, result, condition, check)
self.log.debug("Condition (evaluated): %s", statement)
is_article = eval(statement)
self.log.debug("Article accepted: %s", is_article)
return is_article | [
"def",
"is_article",
"(",
"self",
",",
"response",
",",
"url",
")",
":",
"site",
"=",
"self",
".",
"__sites_object",
"[",
"url",
"]",
"heuristics",
"=",
"self",
".",
"__get_enabled_heuristics",
"(",
"url",
")",
"self",
".",
"log",
".",
"info",
"(",
"\"... | Tests if the given response is an article by calling and checking
the heuristics set in config.cfg and sitelist.json
:param obj response: The response of the site.
:param str url: The base_url (needed to get the site-specific config
from the JSON-file)
:return bool: true if the heuristics match the site as an article | [
"Tests",
"if",
"the",
"given",
"response",
"is",
"an",
"article",
"by",
"calling",
"and",
"checking",
"the",
"heuristics",
"set",
"in",
"config",
".",
"cfg",
"and",
"sitelist",
".",
"json"
] | 731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9 | https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/helper_classes/sub_classes/heuristics_manager.py#L36-L67 | train | 221,166 |
fhamborg/news-please | newsplease/helper_classes/sub_classes/heuristics_manager.py | HeuristicsManager.__get_condition | def __get_condition(self, url):
"""
Gets the condition for a url and validates it.
:param str url: The url to get the condition for
"""
if self.__heuristics_condition is not None:
return self.__heuristics_condition
if "pass_heuristics_condition" in self.__sites_object[url]:
condition = \
self.__sites_object[url]["pass_heuristics_condition"]
else:
condition = \
self.cfg_heuristics["pass_heuristics_condition"]
# Because the condition will be eval-ed (Yeah, eval is evil, BUT only
# when not filtered properly), we are filtering it here.
# Anyway, if that filter-method is not perfect: This is not any
# random user-input thats evaled. This is (hopefully still when you
# read this) not a webtool, where you need to filter everything 100%
# properly.
disalloweds = condition
heuristics = self.__get_enabled_heuristics(url)
for allowed in self.__condition_allowed:
disalloweds = disalloweds.replace(allowed, " ")
for heuristic, _ in heuristics.items():
disalloweds = re.sub(r"\b%s\b" % heuristic, " ", disalloweds)
disalloweds = disalloweds.split(" ")
for disallowed in disalloweds:
if disallowed != "":
self.log.error("Misconfiguration: In the condition,"
" an unknown heuristic was found and"
" will be ignored: %s", disallowed)
condition = re.sub(r"\b%s\b" % disallowed, "True", condition)
self.__heuristics_condition = condition
# Now condition should just consits of not, and, or, (, ), and all
# enabled heuristics.
return condition | python | def __get_condition(self, url):
"""
Gets the condition for a url and validates it.
:param str url: The url to get the condition for
"""
if self.__heuristics_condition is not None:
return self.__heuristics_condition
if "pass_heuristics_condition" in self.__sites_object[url]:
condition = \
self.__sites_object[url]["pass_heuristics_condition"]
else:
condition = \
self.cfg_heuristics["pass_heuristics_condition"]
# Because the condition will be eval-ed (Yeah, eval is evil, BUT only
# when not filtered properly), we are filtering it here.
# Anyway, if that filter-method is not perfect: This is not any
# random user-input thats evaled. This is (hopefully still when you
# read this) not a webtool, where you need to filter everything 100%
# properly.
disalloweds = condition
heuristics = self.__get_enabled_heuristics(url)
for allowed in self.__condition_allowed:
disalloweds = disalloweds.replace(allowed, " ")
for heuristic, _ in heuristics.items():
disalloweds = re.sub(r"\b%s\b" % heuristic, " ", disalloweds)
disalloweds = disalloweds.split(" ")
for disallowed in disalloweds:
if disallowed != "":
self.log.error("Misconfiguration: In the condition,"
" an unknown heuristic was found and"
" will be ignored: %s", disallowed)
condition = re.sub(r"\b%s\b" % disallowed, "True", condition)
self.__heuristics_condition = condition
# Now condition should just consits of not, and, or, (, ), and all
# enabled heuristics.
return condition | [
"def",
"__get_condition",
"(",
"self",
",",
"url",
")",
":",
"if",
"self",
".",
"__heuristics_condition",
"is",
"not",
"None",
":",
"return",
"self",
".",
"__heuristics_condition",
"if",
"\"pass_heuristics_condition\"",
"in",
"self",
".",
"__sites_object",
"[",
... | Gets the condition for a url and validates it.
:param str url: The url to get the condition for | [
"Gets",
"the",
"condition",
"for",
"a",
"url",
"and",
"validates",
"it",
"."
] | 731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9 | https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/helper_classes/sub_classes/heuristics_manager.py#L69-L110 | train | 221,167 |
fhamborg/news-please | newsplease/helper_classes/sub_classes/heuristics_manager.py | HeuristicsManager.__evaluate_result | def __evaluate_result(self, result, condition):
"""
Evaluates a result of a heuristic
with the condition given in the config.
:param mixed result: The result of the heuristic
:param mixed condition: The condition string to evaluate on the result
:return bool: Whether the heuristic result matches the condition
"""
# If result is bool this means, that the heuristic
# is bool as well or has a special situation
# (for example some condition [e.g. in config] is [not] met, thus
# just pass it)
if isinstance(result, bool):
return result
# Check if the condition is a String condition,
# allowing <=, >=, <, >, = conditions or string
# when they start with " or '
if isinstance(condition, basestring):
# Check if result should match a string
if (condition.startswith("'") and condition.endswith("'")) or \
(condition.startswith('"') and condition.endswith('"')):
if isinstance(result, basestring):
self.log.debug("Condition %s recognized as string.",
condition)
return result == condition[1:-1]
return self.__evaluation_error(
result, condition, "Result not string")
# Only number-comparision following
if not isinstance(result, (float, int)):
return self.__evaluation_error(
result, condition, "Result not number on comparision")
# Check if result should match a number
if condition.startswith("="):
number = self.__try_parse_number(condition[1:])
if isinstance(number, bool):
return self.__evaluation_error(
result, condition, "Number not parsable (=)")
return result == number
# Check if result should be >= then a number
if condition.startswith(">="):
number = self.__try_parse_number(condition[2:])
if isinstance(number, bool):
return self.__evaluation_error(
result, condition, "Number not parsable (>=)")
return result >= number
# Check if result should be <= then a number
if condition.startswith("<="):
number = self.__try_parse_number(condition[2:])
if isinstance(number, bool):
return self.__evaluation_error(
result, condition, "Number not parsable (<=)")
return result <= number
# Check if result should be > then a number
if condition.startswith(">"):
number = self.__try_parse_number(condition[1:])
if isinstance(number, bool):
return self.__evaluation_error(
result, condition, "Number not parsable (>)")
return result > number
# Check if result should be < then a number
if condition.startswith("<"):
number = self.__try_parse_number(condition[1:])
if isinstance(number, bool):
return self.__evaluation_error(
result, condition, "Number not parsable (<)")
return result < number
# Check if result should be equal a number
number = self.__try_parse_number(condition)
if isinstance(number, bool):
return self.__evaluation_error(
result, condition, "Number not parsable")
return result == number
# Check if the condition is a number and matches the result
if isinstance(condition, (float, int)) and isinstance(result, (float, int)):
return condition == result
return self.__evaluation_error(result, condition, "Unknown") | python | def __evaluate_result(self, result, condition):
"""
Evaluates a result of a heuristic
with the condition given in the config.
:param mixed result: The result of the heuristic
:param mixed condition: The condition string to evaluate on the result
:return bool: Whether the heuristic result matches the condition
"""
# If result is bool this means, that the heuristic
# is bool as well or has a special situation
# (for example some condition [e.g. in config] is [not] met, thus
# just pass it)
if isinstance(result, bool):
return result
# Check if the condition is a String condition,
# allowing <=, >=, <, >, = conditions or string
# when they start with " or '
if isinstance(condition, basestring):
# Check if result should match a string
if (condition.startswith("'") and condition.endswith("'")) or \
(condition.startswith('"') and condition.endswith('"')):
if isinstance(result, basestring):
self.log.debug("Condition %s recognized as string.",
condition)
return result == condition[1:-1]
return self.__evaluation_error(
result, condition, "Result not string")
# Only number-comparision following
if not isinstance(result, (float, int)):
return self.__evaluation_error(
result, condition, "Result not number on comparision")
# Check if result should match a number
if condition.startswith("="):
number = self.__try_parse_number(condition[1:])
if isinstance(number, bool):
return self.__evaluation_error(
result, condition, "Number not parsable (=)")
return result == number
# Check if result should be >= then a number
if condition.startswith(">="):
number = self.__try_parse_number(condition[2:])
if isinstance(number, bool):
return self.__evaluation_error(
result, condition, "Number not parsable (>=)")
return result >= number
# Check if result should be <= then a number
if condition.startswith("<="):
number = self.__try_parse_number(condition[2:])
if isinstance(number, bool):
return self.__evaluation_error(
result, condition, "Number not parsable (<=)")
return result <= number
# Check if result should be > then a number
if condition.startswith(">"):
number = self.__try_parse_number(condition[1:])
if isinstance(number, bool):
return self.__evaluation_error(
result, condition, "Number not parsable (>)")
return result > number
# Check if result should be < then a number
if condition.startswith("<"):
number = self.__try_parse_number(condition[1:])
if isinstance(number, bool):
return self.__evaluation_error(
result, condition, "Number not parsable (<)")
return result < number
# Check if result should be equal a number
number = self.__try_parse_number(condition)
if isinstance(number, bool):
return self.__evaluation_error(
result, condition, "Number not parsable")
return result == number
# Check if the condition is a number and matches the result
if isinstance(condition, (float, int)) and isinstance(result, (float, int)):
return condition == result
return self.__evaluation_error(result, condition, "Unknown") | [
"def",
"__evaluate_result",
"(",
"self",
",",
"result",
",",
"condition",
")",
":",
"# If result is bool this means, that the heuristic",
"# is bool as well or has a special situation",
"# (for example some condition [e.g. in config] is [not] met, thus",
"# just pass it)",
"if",
"isin... | Evaluates a result of a heuristic
with the condition given in the config.
:param mixed result: The result of the heuristic
:param mixed condition: The condition string to evaluate on the result
:return bool: Whether the heuristic result matches the condition | [
"Evaluates",
"a",
"result",
"of",
"a",
"heuristic",
"with",
"the",
"condition",
"given",
"in",
"the",
"config",
"."
] | 731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9 | https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/helper_classes/sub_classes/heuristics_manager.py#L112-L200 | train | 221,168 |
fhamborg/news-please | newsplease/helper_classes/sub_classes/heuristics_manager.py | HeuristicsManager.__evaluation_error | def __evaluation_error(self, result, condition, throw):
"""Helper-method for easy error-logging"""
self.log.error("Result does not match condition, dropping item. "
"Result %s; Condition: %s; Throw: %s",
result, condition, throw)
return False | python | def __evaluation_error(self, result, condition, throw):
"""Helper-method for easy error-logging"""
self.log.error("Result does not match condition, dropping item. "
"Result %s; Condition: %s; Throw: %s",
result, condition, throw)
return False | [
"def",
"__evaluation_error",
"(",
"self",
",",
"result",
",",
"condition",
",",
"throw",
")",
":",
"self",
".",
"log",
".",
"error",
"(",
"\"Result does not match condition, dropping item. \"",
"\"Result %s; Condition: %s; Throw: %s\"",
",",
"result",
",",
"condition",
... | Helper-method for easy error-logging | [
"Helper",
"-",
"method",
"for",
"easy",
"error",
"-",
"logging"
] | 731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9 | https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/helper_classes/sub_classes/heuristics_manager.py#L202-L207 | train | 221,169 |
fhamborg/news-please | newsplease/helper_classes/sub_classes/heuristics_manager.py | HeuristicsManager.__try_parse_number | def __try_parse_number(self, string):
"""Try to parse a string to a number, else return False."""
try:
return int(string)
except ValueError:
try:
return float(string)
except ValueError:
return False | python | def __try_parse_number(self, string):
"""Try to parse a string to a number, else return False."""
try:
return int(string)
except ValueError:
try:
return float(string)
except ValueError:
return False | [
"def",
"__try_parse_number",
"(",
"self",
",",
"string",
")",
":",
"try",
":",
"return",
"int",
"(",
"string",
")",
"except",
"ValueError",
":",
"try",
":",
"return",
"float",
"(",
"string",
")",
"except",
"ValueError",
":",
"return",
"False"
] | Try to parse a string to a number, else return False. | [
"Try",
"to",
"parse",
"a",
"string",
"to",
"a",
"number",
"else",
"return",
"False",
"."
] | 731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9 | https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/helper_classes/sub_classes/heuristics_manager.py#L209-L217 | train | 221,170 |
fhamborg/news-please | newsplease/helper_classes/sub_classes/heuristics_manager.py | HeuristicsManager.__get_enabled_heuristics | def __get_enabled_heuristics(self, url):
"""
Get the enabled heuristics for a site, merging the default and the
overwrite together.
The config will only be read once and the merged site-config will be
cached.
:param str url: The url to get the heuristics for.
"""
if url in self.__sites_heuristics:
return self.__sites_heuristics[url]
site = self.__sites_object[url]
heuristics = dict(self.cfg_heuristics["enabled_heuristics"])
if "overwrite_heuristics" in site:
for heuristic, value in site["overwrite_heuristics"].items():
if value is False and heuristic in heuristics:
del heuristics[heuristic]
else:
heuristics[heuristic] = value
self.__sites_heuristics[site["url"]] = heuristics
self.log.debug(
"Enabled heuristics for %s: %s", site["url"], heuristics
)
return heuristics | python | def __get_enabled_heuristics(self, url):
"""
Get the enabled heuristics for a site, merging the default and the
overwrite together.
The config will only be read once and the merged site-config will be
cached.
:param str url: The url to get the heuristics for.
"""
if url in self.__sites_heuristics:
return self.__sites_heuristics[url]
site = self.__sites_object[url]
heuristics = dict(self.cfg_heuristics["enabled_heuristics"])
if "overwrite_heuristics" in site:
for heuristic, value in site["overwrite_heuristics"].items():
if value is False and heuristic in heuristics:
del heuristics[heuristic]
else:
heuristics[heuristic] = value
self.__sites_heuristics[site["url"]] = heuristics
self.log.debug(
"Enabled heuristics for %s: %s", site["url"], heuristics
)
return heuristics | [
"def",
"__get_enabled_heuristics",
"(",
"self",
",",
"url",
")",
":",
"if",
"url",
"in",
"self",
".",
"__sites_heuristics",
":",
"return",
"self",
".",
"__sites_heuristics",
"[",
"url",
"]",
"site",
"=",
"self",
".",
"__sites_object",
"[",
"url",
"]",
"heu... | Get the enabled heuristics for a site, merging the default and the
overwrite together.
The config will only be read once and the merged site-config will be
cached.
:param str url: The url to get the heuristics for. | [
"Get",
"the",
"enabled",
"heuristics",
"for",
"a",
"site",
"merging",
"the",
"default",
"and",
"the",
"overwrite",
"together",
".",
"The",
"config",
"will",
"only",
"be",
"read",
"once",
"and",
"the",
"merged",
"site",
"-",
"config",
"will",
"be",
"cached"... | 731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9 | https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/helper_classes/sub_classes/heuristics_manager.py#L219-L245 | train | 221,171 |
fhamborg/news-please | newsplease/pipeline/extractor/extractors/abstract_extractor.py | AbstractExtractor.extract | def extract(self, item):
"""Executes all implemented functions on the given article and returns an
object containing the recovered data.
:param item: A NewscrawlerItem to parse.
:return: ArticleCandidate containing the recovered article data.
"""
article_candidate = ArticleCandidate()
article_candidate.extractor = self._name()
article_candidate.title = self._title(item)
article_candidate.description = self._description(item)
article_candidate.text = self._text(item)
article_candidate.topimage = self._topimage(item)
article_candidate.author = self._author(item)
article_candidate.publish_date = self._publish_date(item)
article_candidate.language = self._language(item)
return article_candidate | python | def extract(self, item):
"""Executes all implemented functions on the given article and returns an
object containing the recovered data.
:param item: A NewscrawlerItem to parse.
:return: ArticleCandidate containing the recovered article data.
"""
article_candidate = ArticleCandidate()
article_candidate.extractor = self._name()
article_candidate.title = self._title(item)
article_candidate.description = self._description(item)
article_candidate.text = self._text(item)
article_candidate.topimage = self._topimage(item)
article_candidate.author = self._author(item)
article_candidate.publish_date = self._publish_date(item)
article_candidate.language = self._language(item)
return article_candidate | [
"def",
"extract",
"(",
"self",
",",
"item",
")",
":",
"article_candidate",
"=",
"ArticleCandidate",
"(",
")",
"article_candidate",
".",
"extractor",
"=",
"self",
".",
"_name",
"(",
")",
"article_candidate",
".",
"title",
"=",
"self",
".",
"_title",
"(",
"i... | Executes all implemented functions on the given article and returns an
object containing the recovered data.
:param item: A NewscrawlerItem to parse.
:return: ArticleCandidate containing the recovered article data. | [
"Executes",
"all",
"implemented",
"functions",
"on",
"the",
"given",
"article",
"and",
"returns",
"an",
"object",
"containing",
"the",
"recovered",
"data",
"."
] | 731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9 | https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/pipeline/extractor/extractors/abstract_extractor.py#L48-L66 | train | 221,172 |
fhamborg/news-please | newsplease/pipeline/extractor/extractors/newspaper_extractor.py | NewspaperExtractor.extract | def extract(self, item):
"""Creates an instance of Article without a Download and returns an ArticleCandidate with the results of
parsing the HTML-Code.
:param item: A NewscrawlerItem to parse.
:return: ArticleCandidate containing the recovered article data.
"""
article_candidate = ArticleCandidate()
article_candidate.extractor = self._name()
article = Article('')
article.set_html(item['spider_response'].body)
article.parse()
article_candidate.title = article.title
article_candidate.description = article.meta_description
article_candidate.text = article.text
article_candidate.topimage = article.top_image
article_candidate.author = article.authors
if article.publish_date is not None:
try:
article_candidate.publish_date = article.publish_date.strftime('%Y-%m-%d %H:%M:%S')
except ValueError as exception:
self.log.debug('%s: Newspaper failed to extract the date in the supported format,'
'Publishing date set to None' % item['url'])
article_candidate.language = article.meta_lang
return article_candidate | python | def extract(self, item):
"""Creates an instance of Article without a Download and returns an ArticleCandidate with the results of
parsing the HTML-Code.
:param item: A NewscrawlerItem to parse.
:return: ArticleCandidate containing the recovered article data.
"""
article_candidate = ArticleCandidate()
article_candidate.extractor = self._name()
article = Article('')
article.set_html(item['spider_response'].body)
article.parse()
article_candidate.title = article.title
article_candidate.description = article.meta_description
article_candidate.text = article.text
article_candidate.topimage = article.top_image
article_candidate.author = article.authors
if article.publish_date is not None:
try:
article_candidate.publish_date = article.publish_date.strftime('%Y-%m-%d %H:%M:%S')
except ValueError as exception:
self.log.debug('%s: Newspaper failed to extract the date in the supported format,'
'Publishing date set to None' % item['url'])
article_candidate.language = article.meta_lang
return article_candidate | [
"def",
"extract",
"(",
"self",
",",
"item",
")",
":",
"article_candidate",
"=",
"ArticleCandidate",
"(",
")",
"article_candidate",
".",
"extractor",
"=",
"self",
".",
"_name",
"(",
")",
"article",
"=",
"Article",
"(",
"''",
")",
"article",
".",
"set_html",... | Creates an instance of Article without a Download and returns an ArticleCandidate with the results of
parsing the HTML-Code.
:param item: A NewscrawlerItem to parse.
:return: ArticleCandidate containing the recovered article data. | [
"Creates",
"an",
"instance",
"of",
"Article",
"without",
"a",
"Download",
"and",
"returns",
"an",
"ArticleCandidate",
"with",
"the",
"results",
"of",
"parsing",
"the",
"HTML",
"-",
"Code",
"."
] | 731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9 | https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/pipeline/extractor/extractors/newspaper_extractor.py#L18-L44 | train | 221,173 |
fhamborg/news-please | newsplease/pipeline/extractor/comparer/comparer_author.py | ComparerAuthor.extract | def extract(self, item, list_article_candidate):
"""Compares the extracted authors.
:param item: The corresponding NewscrawlerItem
:param list_article_candidate: A list, the list of ArticleCandidate-Objects which have been extracted
:return: A string, the most likely authors
"""
list_author = []
# The authors of the ArticleCandidates and the respective extractors are saved in a tuple in list_author.
for article_candidate in list_article_candidate:
if (article_candidate.author is not None) and (article_candidate.author != '[]'):
list_author.append((article_candidate.author, article_candidate.extractor))
# If there is no value in the list, return None.
if len(list_author) == 0:
return None
# If there are more options than one, return the result from newspaper.
list_newspaper = [x for x in list_author if x[1] == "newspaper"]
if len(list_newspaper) == 0:
# If there is no author extracted by newspaper, return the first result of list_author.
return list_author[0][0]
else:
return list_newspaper[0][0] | python | def extract(self, item, list_article_candidate):
"""Compares the extracted authors.
:param item: The corresponding NewscrawlerItem
:param list_article_candidate: A list, the list of ArticleCandidate-Objects which have been extracted
:return: A string, the most likely authors
"""
list_author = []
# The authors of the ArticleCandidates and the respective extractors are saved in a tuple in list_author.
for article_candidate in list_article_candidate:
if (article_candidate.author is not None) and (article_candidate.author != '[]'):
list_author.append((article_candidate.author, article_candidate.extractor))
# If there is no value in the list, return None.
if len(list_author) == 0:
return None
# If there are more options than one, return the result from newspaper.
list_newspaper = [x for x in list_author if x[1] == "newspaper"]
if len(list_newspaper) == 0:
# If there is no author extracted by newspaper, return the first result of list_author.
return list_author[0][0]
else:
return list_newspaper[0][0] | [
"def",
"extract",
"(",
"self",
",",
"item",
",",
"list_article_candidate",
")",
":",
"list_author",
"=",
"[",
"]",
"# The authors of the ArticleCandidates and the respective extractors are saved in a tuple in list_author.",
"for",
"article_candidate",
"in",
"list_article_candidat... | Compares the extracted authors.
:param item: The corresponding NewscrawlerItem
:param list_article_candidate: A list, the list of ArticleCandidate-Objects which have been extracted
:return: A string, the most likely authors | [
"Compares",
"the",
"extracted",
"authors",
"."
] | 731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9 | https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/pipeline/extractor/comparer/comparer_author.py#L4-L29 | train | 221,174 |
GNS3/gns3-server | gns3server/utils/asyncio/serial.py | _asyncio_open_serial_windows | def _asyncio_open_serial_windows(path):
"""
Open a windows named pipe
:returns: An IO like object
"""
try:
yield from wait_for_named_pipe_creation(path)
except asyncio.TimeoutError:
raise NodeError('Pipe file "{}" is missing'.format(path))
return WindowsPipe(path) | python | def _asyncio_open_serial_windows(path):
"""
Open a windows named pipe
:returns: An IO like object
"""
try:
yield from wait_for_named_pipe_creation(path)
except asyncio.TimeoutError:
raise NodeError('Pipe file "{}" is missing'.format(path))
return WindowsPipe(path) | [
"def",
"_asyncio_open_serial_windows",
"(",
"path",
")",
":",
"try",
":",
"yield",
"from",
"wait_for_named_pipe_creation",
"(",
"path",
")",
"except",
"asyncio",
".",
"TimeoutError",
":",
"raise",
"NodeError",
"(",
"'Pipe file \"{}\" is missing'",
".",
"format",
"("... | Open a windows named pipe
:returns: An IO like object | [
"Open",
"a",
"windows",
"named",
"pipe"
] | a221678448fb5d24e977ef562f81d56aacc89ab1 | https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/utils/asyncio/serial.py#L99-L110 | train | 221,175 |
GNS3/gns3-server | gns3server/utils/asyncio/serial.py | _asyncio_open_serial_unix | def _asyncio_open_serial_unix(path):
"""
Open a unix socket or a windows named pipe
:returns: An IO like object
"""
try:
# wait for VM to create the pipe file.
yield from wait_for_file_creation(path)
except asyncio.TimeoutError:
raise NodeError('Pipe file "{}" is missing'.format(path))
output = SerialReaderWriterProtocol()
try:
yield from asyncio.get_event_loop().create_unix_connection(lambda: output, path)
except ConnectionRefusedError:
raise NodeError('Can\'t open pipe file "{}"'.format(path))
return output | python | def _asyncio_open_serial_unix(path):
"""
Open a unix socket or a windows named pipe
:returns: An IO like object
"""
try:
# wait for VM to create the pipe file.
yield from wait_for_file_creation(path)
except asyncio.TimeoutError:
raise NodeError('Pipe file "{}" is missing'.format(path))
output = SerialReaderWriterProtocol()
try:
yield from asyncio.get_event_loop().create_unix_connection(lambda: output, path)
except ConnectionRefusedError:
raise NodeError('Can\'t open pipe file "{}"'.format(path))
return output | [
"def",
"_asyncio_open_serial_unix",
"(",
"path",
")",
":",
"try",
":",
"# wait for VM to create the pipe file.",
"yield",
"from",
"wait_for_file_creation",
"(",
"path",
")",
"except",
"asyncio",
".",
"TimeoutError",
":",
"raise",
"NodeError",
"(",
"'Pipe file \"{}\" is ... | Open a unix socket or a windows named pipe
:returns: An IO like object | [
"Open",
"a",
"unix",
"socket",
"or",
"a",
"windows",
"named",
"pipe"
] | a221678448fb5d24e977ef562f81d56aacc89ab1 | https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/utils/asyncio/serial.py#L114-L132 | train | 221,176 |
GNS3/gns3-server | gns3server/controller/udp_link.py | UDPLink.create | def create(self):
"""
Create the link on the nodes
"""
node1 = self._nodes[0]["node"]
adapter_number1 = self._nodes[0]["adapter_number"]
port_number1 = self._nodes[0]["port_number"]
node2 = self._nodes[1]["node"]
adapter_number2 = self._nodes[1]["adapter_number"]
port_number2 = self._nodes[1]["port_number"]
# Get an IP allowing communication between both host
try:
(node1_host, node2_host) = yield from node1.compute.get_ip_on_same_subnet(node2.compute)
except ValueError as e:
raise aiohttp.web.HTTPConflict(text=str(e))
# Reserve a UDP port on both side
response = yield from node1.compute.post("/projects/{}/ports/udp".format(self._project.id))
self._node1_port = response.json["udp_port"]
response = yield from node2.compute.post("/projects/{}/ports/udp".format(self._project.id))
self._node2_port = response.json["udp_port"]
node1_filters = {}
node2_filters = {}
filter_node = self._get_filter_node()
if filter_node == node1:
node1_filters = self.get_active_filters()
elif filter_node == node2:
node2_filters = self.get_active_filters()
# Create the tunnel on both side
self._link_data.append({
"lport": self._node1_port,
"rhost": node2_host,
"rport": self._node2_port,
"type": "nio_udp",
"filters": node1_filters
})
yield from node1.post("/adapters/{adapter_number}/ports/{port_number}/nio".format(adapter_number=adapter_number1, port_number=port_number1), data=self._link_data[0], timeout=120)
self._link_data.append({
"lport": self._node2_port,
"rhost": node1_host,
"rport": self._node1_port,
"type": "nio_udp",
"filters": node2_filters
})
try:
yield from node2.post("/adapters/{adapter_number}/ports/{port_number}/nio".format(adapter_number=adapter_number2, port_number=port_number2), data=self._link_data[1], timeout=120)
except Exception as e:
# We clean the first NIO
yield from node1.delete("/adapters/{adapter_number}/ports/{port_number}/nio".format(adapter_number=adapter_number1, port_number=port_number1), timeout=120)
raise e
self._created = True | python | def create(self):
"""
Create the link on the nodes
"""
node1 = self._nodes[0]["node"]
adapter_number1 = self._nodes[0]["adapter_number"]
port_number1 = self._nodes[0]["port_number"]
node2 = self._nodes[1]["node"]
adapter_number2 = self._nodes[1]["adapter_number"]
port_number2 = self._nodes[1]["port_number"]
# Get an IP allowing communication between both host
try:
(node1_host, node2_host) = yield from node1.compute.get_ip_on_same_subnet(node2.compute)
except ValueError as e:
raise aiohttp.web.HTTPConflict(text=str(e))
# Reserve a UDP port on both side
response = yield from node1.compute.post("/projects/{}/ports/udp".format(self._project.id))
self._node1_port = response.json["udp_port"]
response = yield from node2.compute.post("/projects/{}/ports/udp".format(self._project.id))
self._node2_port = response.json["udp_port"]
node1_filters = {}
node2_filters = {}
filter_node = self._get_filter_node()
if filter_node == node1:
node1_filters = self.get_active_filters()
elif filter_node == node2:
node2_filters = self.get_active_filters()
# Create the tunnel on both side
self._link_data.append({
"lport": self._node1_port,
"rhost": node2_host,
"rport": self._node2_port,
"type": "nio_udp",
"filters": node1_filters
})
yield from node1.post("/adapters/{adapter_number}/ports/{port_number}/nio".format(adapter_number=adapter_number1, port_number=port_number1), data=self._link_data[0], timeout=120)
self._link_data.append({
"lport": self._node2_port,
"rhost": node1_host,
"rport": self._node1_port,
"type": "nio_udp",
"filters": node2_filters
})
try:
yield from node2.post("/adapters/{adapter_number}/ports/{port_number}/nio".format(adapter_number=adapter_number2, port_number=port_number2), data=self._link_data[1], timeout=120)
except Exception as e:
# We clean the first NIO
yield from node1.delete("/adapters/{adapter_number}/ports/{port_number}/nio".format(adapter_number=adapter_number1, port_number=port_number1), timeout=120)
raise e
self._created = True | [
"def",
"create",
"(",
"self",
")",
":",
"node1",
"=",
"self",
".",
"_nodes",
"[",
"0",
"]",
"[",
"\"node\"",
"]",
"adapter_number1",
"=",
"self",
".",
"_nodes",
"[",
"0",
"]",
"[",
"\"adapter_number\"",
"]",
"port_number1",
"=",
"self",
".",
"_nodes",
... | Create the link on the nodes | [
"Create",
"the",
"link",
"on",
"the",
"nodes"
] | a221678448fb5d24e977ef562f81d56aacc89ab1 | https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/controller/udp_link.py#L41-L96 | train | 221,177 |
GNS3/gns3-server | gns3server/controller/udp_link.py | UDPLink.delete | def delete(self):
"""
Delete the link and free the resources
"""
if not self._created:
return
try:
node1 = self._nodes[0]["node"]
adapter_number1 = self._nodes[0]["adapter_number"]
port_number1 = self._nodes[0]["port_number"]
except IndexError:
return
try:
yield from node1.delete("/adapters/{adapter_number}/ports/{port_number}/nio".format(adapter_number=adapter_number1, port_number=port_number1), timeout=120)
# If the node is already delete (user selected multiple element and delete all in the same time)
except aiohttp.web.HTTPNotFound:
pass
try:
node2 = self._nodes[1]["node"]
adapter_number2 = self._nodes[1]["adapter_number"]
port_number2 = self._nodes[1]["port_number"]
except IndexError:
return
try:
yield from node2.delete("/adapters/{adapter_number}/ports/{port_number}/nio".format(adapter_number=adapter_number2, port_number=port_number2), timeout=120)
# If the node is already delete (user selected multiple element and delete all in the same time)
except aiohttp.web.HTTPNotFound:
pass
yield from super().delete() | python | def delete(self):
"""
Delete the link and free the resources
"""
if not self._created:
return
try:
node1 = self._nodes[0]["node"]
adapter_number1 = self._nodes[0]["adapter_number"]
port_number1 = self._nodes[0]["port_number"]
except IndexError:
return
try:
yield from node1.delete("/adapters/{adapter_number}/ports/{port_number}/nio".format(adapter_number=adapter_number1, port_number=port_number1), timeout=120)
# If the node is already delete (user selected multiple element and delete all in the same time)
except aiohttp.web.HTTPNotFound:
pass
try:
node2 = self._nodes[1]["node"]
adapter_number2 = self._nodes[1]["adapter_number"]
port_number2 = self._nodes[1]["port_number"]
except IndexError:
return
try:
yield from node2.delete("/adapters/{adapter_number}/ports/{port_number}/nio".format(adapter_number=adapter_number2, port_number=port_number2), timeout=120)
# If the node is already delete (user selected multiple element and delete all in the same time)
except aiohttp.web.HTTPNotFound:
pass
yield from super().delete() | [
"def",
"delete",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_created",
":",
"return",
"try",
":",
"node1",
"=",
"self",
".",
"_nodes",
"[",
"0",
"]",
"[",
"\"node\"",
"]",
"adapter_number1",
"=",
"self",
".",
"_nodes",
"[",
"0",
"]",
"[",
... | Delete the link and free the resources | [
"Delete",
"the",
"link",
"and",
"free",
"the",
"resources"
] | a221678448fb5d24e977ef562f81d56aacc89ab1 | https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/controller/udp_link.py#L118-L147 | train | 221,178 |
GNS3/gns3-server | gns3server/controller/udp_link.py | UDPLink.start_capture | def start_capture(self, data_link_type="DLT_EN10MB", capture_file_name=None):
"""
Start capture on a link
"""
if not capture_file_name:
capture_file_name = self.default_capture_file_name()
self._capture_node = self._choose_capture_side()
data = {
"capture_file_name": capture_file_name,
"data_link_type": data_link_type
}
yield from self._capture_node["node"].post("/adapters/{adapter_number}/ports/{port_number}/start_capture".format(adapter_number=self._capture_node["adapter_number"], port_number=self._capture_node["port_number"]), data=data)
yield from super().start_capture(data_link_type=data_link_type, capture_file_name=capture_file_name) | python | def start_capture(self, data_link_type="DLT_EN10MB", capture_file_name=None):
"""
Start capture on a link
"""
if not capture_file_name:
capture_file_name = self.default_capture_file_name()
self._capture_node = self._choose_capture_side()
data = {
"capture_file_name": capture_file_name,
"data_link_type": data_link_type
}
yield from self._capture_node["node"].post("/adapters/{adapter_number}/ports/{port_number}/start_capture".format(adapter_number=self._capture_node["adapter_number"], port_number=self._capture_node["port_number"]), data=data)
yield from super().start_capture(data_link_type=data_link_type, capture_file_name=capture_file_name) | [
"def",
"start_capture",
"(",
"self",
",",
"data_link_type",
"=",
"\"DLT_EN10MB\"",
",",
"capture_file_name",
"=",
"None",
")",
":",
"if",
"not",
"capture_file_name",
":",
"capture_file_name",
"=",
"self",
".",
"default_capture_file_name",
"(",
")",
"self",
".",
... | Start capture on a link | [
"Start",
"capture",
"on",
"a",
"link"
] | a221678448fb5d24e977ef562f81d56aacc89ab1 | https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/controller/udp_link.py#L150-L162 | train | 221,179 |
GNS3/gns3-server | gns3server/controller/udp_link.py | UDPLink.stop_capture | def stop_capture(self):
"""
Stop capture on a link
"""
if self._capture_node:
yield from self._capture_node["node"].post("/adapters/{adapter_number}/ports/{port_number}/stop_capture".format(adapter_number=self._capture_node["adapter_number"], port_number=self._capture_node["port_number"]))
self._capture_node = None
yield from super().stop_capture() | python | def stop_capture(self):
"""
Stop capture on a link
"""
if self._capture_node:
yield from self._capture_node["node"].post("/adapters/{adapter_number}/ports/{port_number}/stop_capture".format(adapter_number=self._capture_node["adapter_number"], port_number=self._capture_node["port_number"]))
self._capture_node = None
yield from super().stop_capture() | [
"def",
"stop_capture",
"(",
"self",
")",
":",
"if",
"self",
".",
"_capture_node",
":",
"yield",
"from",
"self",
".",
"_capture_node",
"[",
"\"node\"",
"]",
".",
"post",
"(",
"\"/adapters/{adapter_number}/ports/{port_number}/stop_capture\"",
".",
"format",
"(",
"ad... | Stop capture on a link | [
"Stop",
"capture",
"on",
"a",
"link"
] | a221678448fb5d24e977ef562f81d56aacc89ab1 | https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/controller/udp_link.py#L165-L172 | train | 221,180 |
GNS3/gns3-server | gns3server/controller/udp_link.py | UDPLink._choose_capture_side | def _choose_capture_side(self):
"""
Run capture on the best candidate.
The ideal candidate is a node who on controller server and always
running (capture will not be cut off)
:returns: Node where the capture should run
"""
ALWAYS_RUNNING_NODES_TYPE = ("cloud", "nat", "ethernet_switch", "ethernet_hub")
for node in self._nodes:
if node["node"].compute.id == "local" and node["node"].node_type in ALWAYS_RUNNING_NODES_TYPE and node["node"].status == "started":
return node
for node in self._nodes:
if node["node"].node_type in ALWAYS_RUNNING_NODES_TYPE and node["node"].status == "started":
return node
for node in self._nodes:
if node["node"].compute.id == "local" and node["node"].status == "started":
return node
for node in self._nodes:
if node["node"].node_type and node["node"].status == "started":
return node
raise aiohttp.web.HTTPConflict(text="Cannot capture because there is no running device on this link") | python | def _choose_capture_side(self):
"""
Run capture on the best candidate.
The ideal candidate is a node who on controller server and always
running (capture will not be cut off)
:returns: Node where the capture should run
"""
ALWAYS_RUNNING_NODES_TYPE = ("cloud", "nat", "ethernet_switch", "ethernet_hub")
for node in self._nodes:
if node["node"].compute.id == "local" and node["node"].node_type in ALWAYS_RUNNING_NODES_TYPE and node["node"].status == "started":
return node
for node in self._nodes:
if node["node"].node_type in ALWAYS_RUNNING_NODES_TYPE and node["node"].status == "started":
return node
for node in self._nodes:
if node["node"].compute.id == "local" and node["node"].status == "started":
return node
for node in self._nodes:
if node["node"].node_type and node["node"].status == "started":
return node
raise aiohttp.web.HTTPConflict(text="Cannot capture because there is no running device on this link") | [
"def",
"_choose_capture_side",
"(",
"self",
")",
":",
"ALWAYS_RUNNING_NODES_TYPE",
"=",
"(",
"\"cloud\"",
",",
"\"nat\"",
",",
"\"ethernet_switch\"",
",",
"\"ethernet_hub\"",
")",
"for",
"node",
"in",
"self",
".",
"_nodes",
":",
"if",
"node",
"[",
"\"node\"",
... | Run capture on the best candidate.
The ideal candidate is a node who on controller server and always
running (capture will not be cut off)
:returns: Node where the capture should run | [
"Run",
"capture",
"on",
"the",
"best",
"candidate",
"."
] | a221678448fb5d24e977ef562f81d56aacc89ab1 | https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/controller/udp_link.py#L174-L202 | train | 221,181 |
GNS3/gns3-server | gns3server/controller/udp_link.py | UDPLink.read_pcap_from_source | def read_pcap_from_source(self):
"""
Return a FileStream of the Pcap from the compute node
"""
if self._capture_node:
compute = self._capture_node["node"].compute
return compute.stream_file(self._project, "tmp/captures/" + self._capture_file_name) | python | def read_pcap_from_source(self):
"""
Return a FileStream of the Pcap from the compute node
"""
if self._capture_node:
compute = self._capture_node["node"].compute
return compute.stream_file(self._project, "tmp/captures/" + self._capture_file_name) | [
"def",
"read_pcap_from_source",
"(",
"self",
")",
":",
"if",
"self",
".",
"_capture_node",
":",
"compute",
"=",
"self",
".",
"_capture_node",
"[",
"\"node\"",
"]",
".",
"compute",
"return",
"compute",
".",
"stream_file",
"(",
"self",
".",
"_project",
",",
... | Return a FileStream of the Pcap from the compute node | [
"Return",
"a",
"FileStream",
"of",
"the",
"Pcap",
"from",
"the",
"compute",
"node"
] | a221678448fb5d24e977ef562f81d56aacc89ab1 | https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/controller/udp_link.py#L205-L211 | train | 221,182 |
GNS3/gns3-server | gns3server/controller/udp_link.py | UDPLink.node_updated | def node_updated(self, node):
"""
Called when a node member of the link is updated
"""
if self._capture_node and node == self._capture_node["node"] and node.status != "started":
yield from self.stop_capture() | python | def node_updated(self, node):
"""
Called when a node member of the link is updated
"""
if self._capture_node and node == self._capture_node["node"] and node.status != "started":
yield from self.stop_capture() | [
"def",
"node_updated",
"(",
"self",
",",
"node",
")",
":",
"if",
"self",
".",
"_capture_node",
"and",
"node",
"==",
"self",
".",
"_capture_node",
"[",
"\"node\"",
"]",
"and",
"node",
".",
"status",
"!=",
"\"started\"",
":",
"yield",
"from",
"self",
".",
... | Called when a node member of the link is updated | [
"Called",
"when",
"a",
"node",
"member",
"of",
"the",
"link",
"is",
"updated"
] | a221678448fb5d24e977ef562f81d56aacc89ab1 | https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/controller/udp_link.py#L214-L219 | train | 221,183 |
GNS3/gns3-server | gns3server/utils/vmnet.py | parse_networking_file | def parse_networking_file():
"""
Parse the VMware networking file.
"""
pairs = dict()
allocated_subnets = []
try:
with open(VMWARE_NETWORKING_FILE, "r", encoding="utf-8") as f:
version = f.readline()
for line in f.read().splitlines():
try:
_, key, value = line.split(' ', 3)
key = key.strip()
value = value.strip()
pairs[key] = value
if key.endswith("HOSTONLY_SUBNET"):
allocated_subnets.append(value)
except ValueError:
raise SystemExit("Error while parsing {}".format(VMWARE_NETWORKING_FILE))
except OSError as e:
raise SystemExit("Cannot open {}: {}".format(VMWARE_NETWORKING_FILE, e))
return version, pairs, allocated_subnets | python | def parse_networking_file():
"""
Parse the VMware networking file.
"""
pairs = dict()
allocated_subnets = []
try:
with open(VMWARE_NETWORKING_FILE, "r", encoding="utf-8") as f:
version = f.readline()
for line in f.read().splitlines():
try:
_, key, value = line.split(' ', 3)
key = key.strip()
value = value.strip()
pairs[key] = value
if key.endswith("HOSTONLY_SUBNET"):
allocated_subnets.append(value)
except ValueError:
raise SystemExit("Error while parsing {}".format(VMWARE_NETWORKING_FILE))
except OSError as e:
raise SystemExit("Cannot open {}: {}".format(VMWARE_NETWORKING_FILE, e))
return version, pairs, allocated_subnets | [
"def",
"parse_networking_file",
"(",
")",
":",
"pairs",
"=",
"dict",
"(",
")",
"allocated_subnets",
"=",
"[",
"]",
"try",
":",
"with",
"open",
"(",
"VMWARE_NETWORKING_FILE",
",",
"\"r\"",
",",
"encoding",
"=",
"\"utf-8\"",
")",
"as",
"f",
":",
"version",
... | Parse the VMware networking file. | [
"Parse",
"the",
"VMware",
"networking",
"file",
"."
] | a221678448fb5d24e977ef562f81d56aacc89ab1 | https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/utils/vmnet.py#L39-L61 | train | 221,184 |
GNS3/gns3-server | gns3server/utils/vmnet.py | write_networking_file | def write_networking_file(version, pairs):
"""
Write the VMware networking file.
"""
vmnets = OrderedDict(sorted(pairs.items(), key=lambda t: t[0]))
try:
with open(VMWARE_NETWORKING_FILE, "w", encoding="utf-8") as f:
f.write(version)
for key, value in vmnets.items():
f.write("answer {} {}\n".format(key, value))
except OSError as e:
raise SystemExit("Cannot open {}: {}".format(VMWARE_NETWORKING_FILE, e))
# restart VMware networking service
if sys.platform.startswith("darwin"):
if not os.path.exists("/Applications/VMware Fusion.app/Contents/Library/vmnet-cli"):
raise SystemExit("VMware Fusion is not installed in Applications")
os.system(r"/Applications/VMware\ Fusion.app/Contents/Library/vmnet-cli --configure")
os.system(r"/Applications/VMware\ Fusion.app/Contents/Library/vmnet-cli --stop")
os.system(r"/Applications/VMware\ Fusion.app/Contents/Library/vmnet-cli --start")
else:
os.system("vmware-networks --stop")
os.system("vmware-networks --start") | python | def write_networking_file(version, pairs):
"""
Write the VMware networking file.
"""
vmnets = OrderedDict(sorted(pairs.items(), key=lambda t: t[0]))
try:
with open(VMWARE_NETWORKING_FILE, "w", encoding="utf-8") as f:
f.write(version)
for key, value in vmnets.items():
f.write("answer {} {}\n".format(key, value))
except OSError as e:
raise SystemExit("Cannot open {}: {}".format(VMWARE_NETWORKING_FILE, e))
# restart VMware networking service
if sys.platform.startswith("darwin"):
if not os.path.exists("/Applications/VMware Fusion.app/Contents/Library/vmnet-cli"):
raise SystemExit("VMware Fusion is not installed in Applications")
os.system(r"/Applications/VMware\ Fusion.app/Contents/Library/vmnet-cli --configure")
os.system(r"/Applications/VMware\ Fusion.app/Contents/Library/vmnet-cli --stop")
os.system(r"/Applications/VMware\ Fusion.app/Contents/Library/vmnet-cli --start")
else:
os.system("vmware-networks --stop")
os.system("vmware-networks --start") | [
"def",
"write_networking_file",
"(",
"version",
",",
"pairs",
")",
":",
"vmnets",
"=",
"OrderedDict",
"(",
"sorted",
"(",
"pairs",
".",
"items",
"(",
")",
",",
"key",
"=",
"lambda",
"t",
":",
"t",
"[",
"0",
"]",
")",
")",
"try",
":",
"with",
"open"... | Write the VMware networking file. | [
"Write",
"the",
"VMware",
"networking",
"file",
"."
] | a221678448fb5d24e977ef562f81d56aacc89ab1 | https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/utils/vmnet.py#L64-L87 | train | 221,185 |
GNS3/gns3-server | gns3server/utils/vmnet.py | parse_vmnet_range | def parse_vmnet_range(start, end):
"""
Parse the vmnet range on the command line.
"""
class Range(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
if len(values) != 2:
raise argparse.ArgumentTypeError("vmnet range must consist of 2 numbers")
if not start <= values[0] or not values[1] <= end:
raise argparse.ArgumentTypeError("vmnet range must be between {} and {}".format(start, end))
setattr(args, self.dest, values)
return Range | python | def parse_vmnet_range(start, end):
"""
Parse the vmnet range on the command line.
"""
class Range(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
if len(values) != 2:
raise argparse.ArgumentTypeError("vmnet range must consist of 2 numbers")
if not start <= values[0] or not values[1] <= end:
raise argparse.ArgumentTypeError("vmnet range must be between {} and {}".format(start, end))
setattr(args, self.dest, values)
return Range | [
"def",
"parse_vmnet_range",
"(",
"start",
",",
"end",
")",
":",
"class",
"Range",
"(",
"argparse",
".",
"Action",
")",
":",
"def",
"__call__",
"(",
"self",
",",
"parser",
",",
"args",
",",
"values",
",",
"option_string",
"=",
"None",
")",
":",
"if",
... | Parse the vmnet range on the command line. | [
"Parse",
"the",
"vmnet",
"range",
"on",
"the",
"command",
"line",
"."
] | a221678448fb5d24e977ef562f81d56aacc89ab1 | https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/utils/vmnet.py#L90-L103 | train | 221,186 |
GNS3/gns3-server | gns3server/utils/vmnet.py | vmnet_unix | def vmnet_unix(args, vmnet_range_start, vmnet_range_end):
"""
Implementation on Linux and Mac OS X.
"""
if not os.path.exists(VMWARE_NETWORKING_FILE):
raise SystemExit("VMware Player, Workstation or Fusion is not installed")
if not os.access(VMWARE_NETWORKING_FILE, os.W_OK):
raise SystemExit("You must run this script as root")
version, pairs, allocated_subnets = parse_networking_file()
if args.list and not sys.platform.startswith("win"):
for vmnet_number in range(1, 256):
vmnet_name = "VNET_{}_VIRTUAL_ADAPTER".format(vmnet_number)
if vmnet_name in pairs:
print("vmnet{}".format(vmnet_number))
return
if args.clean:
# clean all vmnets but vmnet1 and vmnet8
for key in pairs.copy().keys():
if key.startswith("VNET_1_") or key.startswith("VNET_8_"):
continue
del pairs[key]
else:
for vmnet_number in range(vmnet_range_start, vmnet_range_end + 1):
vmnet_name = "VNET_{}_VIRTUAL_ADAPTER".format(vmnet_number)
if vmnet_name in pairs:
continue
allocated_subnet = None
for subnet in ipaddress.ip_network("172.16.0.0/16").subnets(prefixlen_diff=8):
subnet = str(subnet.network_address)
if subnet not in allocated_subnets:
allocated_subnet = subnet
allocated_subnets.append(allocated_subnet)
break
if allocated_subnet is None:
print("Couldn't allocate a subnet for vmnet{}".format(vmnet_number))
continue
print("Adding vmnet{}...".format(vmnet_number))
pairs["VNET_{}_HOSTONLY_NETMASK".format(vmnet_number)] = "255.255.255.0"
pairs["VNET_{}_HOSTONLY_SUBNET".format(vmnet_number)] = allocated_subnet
pairs["VNET_{}_VIRTUAL_ADAPTER".format(vmnet_number)] = "yes"
write_networking_file(version, pairs) | python | def vmnet_unix(args, vmnet_range_start, vmnet_range_end):
"""
Implementation on Linux and Mac OS X.
"""
if not os.path.exists(VMWARE_NETWORKING_FILE):
raise SystemExit("VMware Player, Workstation or Fusion is not installed")
if not os.access(VMWARE_NETWORKING_FILE, os.W_OK):
raise SystemExit("You must run this script as root")
version, pairs, allocated_subnets = parse_networking_file()
if args.list and not sys.platform.startswith("win"):
for vmnet_number in range(1, 256):
vmnet_name = "VNET_{}_VIRTUAL_ADAPTER".format(vmnet_number)
if vmnet_name in pairs:
print("vmnet{}".format(vmnet_number))
return
if args.clean:
# clean all vmnets but vmnet1 and vmnet8
for key in pairs.copy().keys():
if key.startswith("VNET_1_") or key.startswith("VNET_8_"):
continue
del pairs[key]
else:
for vmnet_number in range(vmnet_range_start, vmnet_range_end + 1):
vmnet_name = "VNET_{}_VIRTUAL_ADAPTER".format(vmnet_number)
if vmnet_name in pairs:
continue
allocated_subnet = None
for subnet in ipaddress.ip_network("172.16.0.0/16").subnets(prefixlen_diff=8):
subnet = str(subnet.network_address)
if subnet not in allocated_subnets:
allocated_subnet = subnet
allocated_subnets.append(allocated_subnet)
break
if allocated_subnet is None:
print("Couldn't allocate a subnet for vmnet{}".format(vmnet_number))
continue
print("Adding vmnet{}...".format(vmnet_number))
pairs["VNET_{}_HOSTONLY_NETMASK".format(vmnet_number)] = "255.255.255.0"
pairs["VNET_{}_HOSTONLY_SUBNET".format(vmnet_number)] = allocated_subnet
pairs["VNET_{}_VIRTUAL_ADAPTER".format(vmnet_number)] = "yes"
write_networking_file(version, pairs) | [
"def",
"vmnet_unix",
"(",
"args",
",",
"vmnet_range_start",
",",
"vmnet_range_end",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"VMWARE_NETWORKING_FILE",
")",
":",
"raise",
"SystemExit",
"(",
"\"VMware Player, Workstation or Fusion is not installed\"... | Implementation on Linux and Mac OS X. | [
"Implementation",
"on",
"Linux",
"and",
"Mac",
"OS",
"X",
"."
] | a221678448fb5d24e977ef562f81d56aacc89ab1 | https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/utils/vmnet.py#L183-L229 | train | 221,187 |
GNS3/gns3-server | gns3server/utils/vmnet.py | main | def main():
"""
Entry point for the VMNET tool.
"""
parser = argparse.ArgumentParser(description='%(prog)s add/remove vmnet interfaces')
parser.add_argument('-r', "--range", nargs='+', action=parse_vmnet_range(1, 255),
type=int, help="vmnet range to add (default is {} {})".format(DEFAULT_RANGE[0], DEFAULT_RANGE[1]))
parser.add_argument("-C", "--clean", action="store_true", help="remove all vmnets excepting vmnet1 and vmnet8")
parser.add_argument("-l", "--list", action="store_true", help="list all existing vmnets (UNIX only)")
try:
args = parser.parse_args()
except argparse.ArgumentTypeError as e:
raise SystemExit(e)
vmnet_range = args.range if args.range is not None else DEFAULT_RANGE
if sys.platform.startswith("win"):
try:
vmnet_windows(args, vmnet_range[0], vmnet_range[1])
except SystemExit:
os.system("pause")
raise
else:
vmnet_unix(args, vmnet_range[0], vmnet_range[1]) | python | def main():
"""
Entry point for the VMNET tool.
"""
parser = argparse.ArgumentParser(description='%(prog)s add/remove vmnet interfaces')
parser.add_argument('-r', "--range", nargs='+', action=parse_vmnet_range(1, 255),
type=int, help="vmnet range to add (default is {} {})".format(DEFAULT_RANGE[0], DEFAULT_RANGE[1]))
parser.add_argument("-C", "--clean", action="store_true", help="remove all vmnets excepting vmnet1 and vmnet8")
parser.add_argument("-l", "--list", action="store_true", help="list all existing vmnets (UNIX only)")
try:
args = parser.parse_args()
except argparse.ArgumentTypeError as e:
raise SystemExit(e)
vmnet_range = args.range if args.range is not None else DEFAULT_RANGE
if sys.platform.startswith("win"):
try:
vmnet_windows(args, vmnet_range[0], vmnet_range[1])
except SystemExit:
os.system("pause")
raise
else:
vmnet_unix(args, vmnet_range[0], vmnet_range[1]) | [
"def",
"main",
"(",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"'%(prog)s add/remove vmnet interfaces'",
")",
"parser",
".",
"add_argument",
"(",
"'-r'",
",",
"\"--range\"",
",",
"nargs",
"=",
"'+'",
",",
"action",
"="... | Entry point for the VMNET tool. | [
"Entry",
"point",
"for",
"the",
"VMNET",
"tool",
"."
] | a221678448fb5d24e977ef562f81d56aacc89ab1 | https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/utils/vmnet.py#L232-L256 | train | 221,188 |
GNS3/gns3-server | gns3server/utils/asyncio/telnet_server.py | AsyncioTelnetServer._get_reader | def _get_reader(self, network_reader):
"""
Get a reader or None if another reader is already reading.
"""
with (yield from self._lock):
if self._reader_process is None:
self._reader_process = network_reader
if self._reader:
if self._reader_process == network_reader:
self._current_read = asyncio.async(self._reader.read(READ_SIZE))
return self._current_read
return None | python | def _get_reader(self, network_reader):
"""
Get a reader or None if another reader is already reading.
"""
with (yield from self._lock):
if self._reader_process is None:
self._reader_process = network_reader
if self._reader:
if self._reader_process == network_reader:
self._current_read = asyncio.async(self._reader.read(READ_SIZE))
return self._current_read
return None | [
"def",
"_get_reader",
"(",
"self",
",",
"network_reader",
")",
":",
"with",
"(",
"yield",
"from",
"self",
".",
"_lock",
")",
":",
"if",
"self",
".",
"_reader_process",
"is",
"None",
":",
"self",
".",
"_reader_process",
"=",
"network_reader",
"if",
"self",
... | Get a reader or None if another reader is already reading. | [
"Get",
"a",
"reader",
"or",
"None",
"if",
"another",
"reader",
"is",
"already",
"reading",
"."
] | a221678448fb5d24e977ef562f81d56aacc89ab1 | https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/utils/asyncio/telnet_server.py#L223-L234 | train | 221,189 |
GNS3/gns3-server | gns3server/utils/asyncio/telnet_server.py | AsyncioTelnetServer._read | def _read(self, cmd, buffer, location, reader):
""" Reads next op from the buffer or reader"""
try:
op = buffer[location]
cmd.append(op)
return op
except IndexError:
op = yield from reader.read(1)
buffer.extend(op)
cmd.append(buffer[location])
return op | python | def _read(self, cmd, buffer, location, reader):
""" Reads next op from the buffer or reader"""
try:
op = buffer[location]
cmd.append(op)
return op
except IndexError:
op = yield from reader.read(1)
buffer.extend(op)
cmd.append(buffer[location])
return op | [
"def",
"_read",
"(",
"self",
",",
"cmd",
",",
"buffer",
",",
"location",
",",
"reader",
")",
":",
"try",
":",
"op",
"=",
"buffer",
"[",
"location",
"]",
"cmd",
".",
"append",
"(",
"op",
")",
"return",
"op",
"except",
"IndexError",
":",
"op",
"=",
... | Reads next op from the buffer or reader | [
"Reads",
"next",
"op",
"from",
"the",
"buffer",
"or",
"reader"
] | a221678448fb5d24e977ef562f81d56aacc89ab1 | https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/utils/asyncio/telnet_server.py#L295-L305 | train | 221,190 |
GNS3/gns3-server | gns3server/utils/asyncio/telnet_server.py | AsyncioTelnetServer._negotiate | def _negotiate(self, data, connection):
""" Performs negotiation commands"""
command, payload = data[0], data[1:]
if command == NAWS:
if len(payload) == 4:
columns, rows = struct.unpack(str('!HH'), bytes(payload))
connection.window_size_changed(columns, rows)
else:
log.warning('Wrong number of NAWS bytes')
else:
log.debug("Not supported negotiation sequence, received {} bytes", len(data)) | python | def _negotiate(self, data, connection):
""" Performs negotiation commands"""
command, payload = data[0], data[1:]
if command == NAWS:
if len(payload) == 4:
columns, rows = struct.unpack(str('!HH'), bytes(payload))
connection.window_size_changed(columns, rows)
else:
log.warning('Wrong number of NAWS bytes')
else:
log.debug("Not supported negotiation sequence, received {} bytes", len(data)) | [
"def",
"_negotiate",
"(",
"self",
",",
"data",
",",
"connection",
")",
":",
"command",
",",
"payload",
"=",
"data",
"[",
"0",
"]",
",",
"data",
"[",
"1",
":",
"]",
"if",
"command",
"==",
"NAWS",
":",
"if",
"len",
"(",
"payload",
")",
"==",
"4",
... | Performs negotiation commands | [
"Performs",
"negotiation",
"commands"
] | a221678448fb5d24e977ef562f81d56aacc89ab1 | https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/utils/asyncio/telnet_server.py#L307-L318 | train | 221,191 |
GNS3/gns3-server | gns3server/utils/ping_stats.py | PingStats.get | def get(cls):
"""
Get ping statistics
:returns: hash
"""
stats = {}
cur_time = time.time()
# minimum interval for getting CPU and memory statistics
if cur_time < cls._last_measurement or \
cur_time > cls._last_measurement + 1.9:
cls._last_measurement = cur_time
# Non blocking call to get cpu usage. First call will return 0
cls._last_cpu_percent = psutil.cpu_percent(interval=None)
cls._last_mem_percent = psutil.virtual_memory().percent
stats["cpu_usage_percent"] = cls._last_cpu_percent
stats["memory_usage_percent"] = cls._last_mem_percent
return stats | python | def get(cls):
"""
Get ping statistics
:returns: hash
"""
stats = {}
cur_time = time.time()
# minimum interval for getting CPU and memory statistics
if cur_time < cls._last_measurement or \
cur_time > cls._last_measurement + 1.9:
cls._last_measurement = cur_time
# Non blocking call to get cpu usage. First call will return 0
cls._last_cpu_percent = psutil.cpu_percent(interval=None)
cls._last_mem_percent = psutil.virtual_memory().percent
stats["cpu_usage_percent"] = cls._last_cpu_percent
stats["memory_usage_percent"] = cls._last_mem_percent
return stats | [
"def",
"get",
"(",
"cls",
")",
":",
"stats",
"=",
"{",
"}",
"cur_time",
"=",
"time",
".",
"time",
"(",
")",
"# minimum interval for getting CPU and memory statistics",
"if",
"cur_time",
"<",
"cls",
".",
"_last_measurement",
"or",
"cur_time",
">",
"cls",
".",
... | Get ping statistics
:returns: hash | [
"Get",
"ping",
"statistics"
] | a221678448fb5d24e977ef562f81d56aacc89ab1 | https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/utils/ping_stats.py#L33-L50 | train | 221,192 |
GNS3/gns3-server | gns3server/ubridge/ubridge_hypervisor.py | UBridgeHypervisor.send | def send(self, command):
"""
Sends commands to this hypervisor.
:param command: a uBridge hypervisor command
:returns: results as a list
"""
# uBridge responses are of the form:
# 1xx yyyyyy\r\n
# 1xx yyyyyy\r\n
# ...
# 100-yyyy\r\n
# or
# 2xx-yyyy\r\n
#
# Where 1xx is a code from 100-199 for a success or 200-299 for an error
# The result might be multiple lines and might be less than the buffer size
# but still have more data. The only thing we know for sure is the last line
# will begin with '100-' or a '2xx-' and end with '\r\n'
if self._writer is None or self._reader is None:
raise UbridgeError("Not connected")
try:
command = command.strip() + '\n'
log.debug("sending {}".format(command))
self._writer.write(command.encode())
yield from self._writer.drain()
except OSError as e:
raise UbridgeError("Lost communication with {host}:{port} :{error}, Dynamips process running: {run}"
.format(host=self._host, port=self._port, error=e, run=self.is_running()))
# Now retrieve the result
data = []
buf = ''
retries = 0
max_retries = 10
while True:
try:
try:
chunk = yield from self._reader.read(1024)
except asyncio.CancelledError:
# task has been canceled but continue to read
# any remaining data sent by the hypervisor
continue
except ConnectionResetError as e:
# Sometimes WinError 64 (ERROR_NETNAME_DELETED) is returned here on Windows.
# These happen if connection reset is received before IOCP could complete
# a previous operation. Ignore and try again....
log.warning("Connection reset received while reading uBridge response: {}".format(e))
continue
if not chunk:
if retries > max_retries:
raise UbridgeError("No data returned from {host}:{port}, uBridge process running: {run}"
.format(host=self._host, port=self._port, run=self.is_running()))
else:
retries += 1
yield from asyncio.sleep(0.1)
continue
retries = 0
buf += chunk.decode("utf-8")
except OSError as e:
raise UbridgeError("Lost communication with {host}:{port} :{error}, uBridge process running: {run}"
.format(host=self._host, port=self._port, error=e, run=self.is_running()))
# If the buffer doesn't end in '\n' then we can't be done
try:
if buf[-1] != '\n':
continue
except IndexError:
raise UbridgeError("Could not communicate with {host}:{port}, uBridge process running: {run}"
.format(host=self._host, port=self._port, run=self.is_running()))
data += buf.split('\r\n')
if data[-1] == '':
data.pop()
buf = ''
# Does it contain an error code?
if self.error_re.search(data[-1]):
raise UbridgeError(data[-1][4:])
# Or does the last line begin with '100-'? Then we are done!
if data[-1][:4] == '100-':
data[-1] = data[-1][4:]
if data[-1] == 'OK':
data.pop()
break
# Remove success responses codes
for index in range(len(data)):
if self.success_re.search(data[index]):
data[index] = data[index][4:]
log.debug("returned result {}".format(data))
return data | python | def send(self, command):
"""
Sends commands to this hypervisor.
:param command: a uBridge hypervisor command
:returns: results as a list
"""
# uBridge responses are of the form:
# 1xx yyyyyy\r\n
# 1xx yyyyyy\r\n
# ...
# 100-yyyy\r\n
# or
# 2xx-yyyy\r\n
#
# Where 1xx is a code from 100-199 for a success or 200-299 for an error
# The result might be multiple lines and might be less than the buffer size
# but still have more data. The only thing we know for sure is the last line
# will begin with '100-' or a '2xx-' and end with '\r\n'
if self._writer is None or self._reader is None:
raise UbridgeError("Not connected")
try:
command = command.strip() + '\n'
log.debug("sending {}".format(command))
self._writer.write(command.encode())
yield from self._writer.drain()
except OSError as e:
raise UbridgeError("Lost communication with {host}:{port} :{error}, Dynamips process running: {run}"
.format(host=self._host, port=self._port, error=e, run=self.is_running()))
# Now retrieve the result
data = []
buf = ''
retries = 0
max_retries = 10
while True:
try:
try:
chunk = yield from self._reader.read(1024)
except asyncio.CancelledError:
# task has been canceled but continue to read
# any remaining data sent by the hypervisor
continue
except ConnectionResetError as e:
# Sometimes WinError 64 (ERROR_NETNAME_DELETED) is returned here on Windows.
# These happen if connection reset is received before IOCP could complete
# a previous operation. Ignore and try again....
log.warning("Connection reset received while reading uBridge response: {}".format(e))
continue
if not chunk:
if retries > max_retries:
raise UbridgeError("No data returned from {host}:{port}, uBridge process running: {run}"
.format(host=self._host, port=self._port, run=self.is_running()))
else:
retries += 1
yield from asyncio.sleep(0.1)
continue
retries = 0
buf += chunk.decode("utf-8")
except OSError as e:
raise UbridgeError("Lost communication with {host}:{port} :{error}, uBridge process running: {run}"
.format(host=self._host, port=self._port, error=e, run=self.is_running()))
# If the buffer doesn't end in '\n' then we can't be done
try:
if buf[-1] != '\n':
continue
except IndexError:
raise UbridgeError("Could not communicate with {host}:{port}, uBridge process running: {run}"
.format(host=self._host, port=self._port, run=self.is_running()))
data += buf.split('\r\n')
if data[-1] == '':
data.pop()
buf = ''
# Does it contain an error code?
if self.error_re.search(data[-1]):
raise UbridgeError(data[-1][4:])
# Or does the last line begin with '100-'? Then we are done!
if data[-1][:4] == '100-':
data[-1] = data[-1][4:]
if data[-1] == 'OK':
data.pop()
break
# Remove success responses codes
for index in range(len(data)):
if self.success_re.search(data[index]):
data[index] = data[index][4:]
log.debug("returned result {}".format(data))
return data | [
"def",
"send",
"(",
"self",
",",
"command",
")",
":",
"# uBridge responses are of the form:",
"# 1xx yyyyyy\\r\\n",
"# 1xx yyyyyy\\r\\n",
"# ...",
"# 100-yyyy\\r\\n",
"# or",
"# 2xx-yyyy\\r\\n",
"#",
"# Where 1xx is a code from 100-199 for a success or 200-299 for an error"... | Sends commands to this hypervisor.
:param command: a uBridge hypervisor command
:returns: results as a list | [
"Sends",
"commands",
"to",
"this",
"hypervisor",
"."
] | a221678448fb5d24e977ef562f81d56aacc89ab1 | https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/ubridge/ubridge_hypervisor.py#L180-L277 | train | 221,193 |
GNS3/gns3-server | gns3server/web/route.py | parse_request | def parse_request(request, input_schema, raw):
"""Parse body of request and raise HTTP errors in case of problems"""
request.json = {}
if not raw:
body = yield from request.read()
if body:
try:
request.json = json.loads(body.decode('utf-8'))
except ValueError as e:
request.json = {"malformed_json": body.decode('utf-8')}
raise aiohttp.web.HTTPBadRequest(text="Invalid JSON {}".format(e))
# Parse the query string
if len(request.query_string) > 0:
for (k, v) in urllib.parse.parse_qs(request.query_string).items():
request.json[k] = v[0]
if input_schema:
try:
jsonschema.validate(request.json, input_schema)
except jsonschema.ValidationError as e:
log.error("Invalid input query. JSON schema error: {}".format(e.message))
raise aiohttp.web.HTTPBadRequest(text="Invalid JSON: {} in schema: {}".format(
e.message,
json.dumps(e.schema)))
return request | python | def parse_request(request, input_schema, raw):
"""Parse body of request and raise HTTP errors in case of problems"""
request.json = {}
if not raw:
body = yield from request.read()
if body:
try:
request.json = json.loads(body.decode('utf-8'))
except ValueError as e:
request.json = {"malformed_json": body.decode('utf-8')}
raise aiohttp.web.HTTPBadRequest(text="Invalid JSON {}".format(e))
# Parse the query string
if len(request.query_string) > 0:
for (k, v) in urllib.parse.parse_qs(request.query_string).items():
request.json[k] = v[0]
if input_schema:
try:
jsonschema.validate(request.json, input_schema)
except jsonschema.ValidationError as e:
log.error("Invalid input query. JSON schema error: {}".format(e.message))
raise aiohttp.web.HTTPBadRequest(text="Invalid JSON: {} in schema: {}".format(
e.message,
json.dumps(e.schema)))
return request | [
"def",
"parse_request",
"(",
"request",
",",
"input_schema",
",",
"raw",
")",
":",
"request",
".",
"json",
"=",
"{",
"}",
"if",
"not",
"raw",
":",
"body",
"=",
"yield",
"from",
"request",
".",
"read",
"(",
")",
"if",
"body",
":",
"try",
":",
"reque... | Parse body of request and raise HTTP errors in case of problems | [
"Parse",
"body",
"of",
"request",
"and",
"raise",
"HTTP",
"errors",
"in",
"case",
"of",
"problems"
] | a221678448fb5d24e977ef562f81d56aacc89ab1 | https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/web/route.py#L40-L67 | train | 221,194 |
GNS3/gns3-server | gns3server/web/route.py | Route.authenticate | def authenticate(cls, request, route, server_config):
"""
Ask user for authentication
:returns: Response if you need to auth the user otherwise None
"""
if not server_config.getboolean("auth", False):
return
user = server_config.get("user", "").strip()
password = server_config.get("password", "").strip()
if len(user) == 0:
return
if "AUTHORIZATION" in request.headers:
if request.headers["AUTHORIZATION"] == aiohttp.helpers.BasicAuth(user, password, "utf-8").encode():
return
log.error("Invalid auth. Username should %s", user)
response = Response(request=request, route=route)
response.set_status(401)
response.headers["WWW-Authenticate"] = 'Basic realm="GNS3 server"'
# Force close the keep alive. Work around a Qt issue where Qt timeout instead of handling the 401
# this happen only for the first query send by the client.
response.force_close()
return response | python | def authenticate(cls, request, route, server_config):
"""
Ask user for authentication
:returns: Response if you need to auth the user otherwise None
"""
if not server_config.getboolean("auth", False):
return
user = server_config.get("user", "").strip()
password = server_config.get("password", "").strip()
if len(user) == 0:
return
if "AUTHORIZATION" in request.headers:
if request.headers["AUTHORIZATION"] == aiohttp.helpers.BasicAuth(user, password, "utf-8").encode():
return
log.error("Invalid auth. Username should %s", user)
response = Response(request=request, route=route)
response.set_status(401)
response.headers["WWW-Authenticate"] = 'Basic realm="GNS3 server"'
# Force close the keep alive. Work around a Qt issue where Qt timeout instead of handling the 401
# this happen only for the first query send by the client.
response.force_close()
return response | [
"def",
"authenticate",
"(",
"cls",
",",
"request",
",",
"route",
",",
"server_config",
")",
":",
"if",
"not",
"server_config",
".",
"getboolean",
"(",
"\"auth\"",
",",
"False",
")",
":",
"return",
"user",
"=",
"server_config",
".",
"get",
"(",
"\"user\"",
... | Ask user for authentication
:returns: Response if you need to auth the user otherwise None | [
"Ask",
"user",
"for",
"authentication"
] | a221678448fb5d24e977ef562f81d56aacc89ab1 | https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/web/route.py#L100-L127 | train | 221,195 |
GNS3/gns3-server | gns3server/notification_queue.py | NotificationQueue.get | def get(self, timeout):
"""
When timeout is expire we send a ping notification with server information
"""
# At first get we return a ping so the client immediately receives data
if self._first:
self._first = False
return ("ping", PingStats.get(), {})
try:
(action, msg, kwargs) = yield from asyncio.wait_for(super().get(), timeout)
except asyncio.futures.TimeoutError:
return ("ping", PingStats.get(), {})
return (action, msg, kwargs) | python | def get(self, timeout):
"""
When timeout is expire we send a ping notification with server information
"""
# At first get we return a ping so the client immediately receives data
if self._first:
self._first = False
return ("ping", PingStats.get(), {})
try:
(action, msg, kwargs) = yield from asyncio.wait_for(super().get(), timeout)
except asyncio.futures.TimeoutError:
return ("ping", PingStats.get(), {})
return (action, msg, kwargs) | [
"def",
"get",
"(",
"self",
",",
"timeout",
")",
":",
"# At first get we return a ping so the client immediately receives data",
"if",
"self",
".",
"_first",
":",
"self",
".",
"_first",
"=",
"False",
"return",
"(",
"\"ping\"",
",",
"PingStats",
".",
"get",
"(",
"... | When timeout is expire we send a ping notification with server information | [
"When",
"timeout",
"is",
"expire",
"we",
"send",
"a",
"ping",
"notification",
"with",
"server",
"information"
] | a221678448fb5d24e977ef562f81d56aacc89ab1 | https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/notification_queue.py#L34-L48 | train | 221,196 |
GNS3/gns3-server | gns3server/notification_queue.py | NotificationQueue.get_json | def get_json(self, timeout):
"""
Get a message as a JSON
"""
(action, msg, kwargs) = yield from self.get(timeout)
if hasattr(msg, "__json__"):
msg = {"action": action, "event": msg.__json__()}
else:
msg = {"action": action, "event": msg}
msg.update(kwargs)
return json.dumps(msg, sort_keys=True) | python | def get_json(self, timeout):
"""
Get a message as a JSON
"""
(action, msg, kwargs) = yield from self.get(timeout)
if hasattr(msg, "__json__"):
msg = {"action": action, "event": msg.__json__()}
else:
msg = {"action": action, "event": msg}
msg.update(kwargs)
return json.dumps(msg, sort_keys=True) | [
"def",
"get_json",
"(",
"self",
",",
"timeout",
")",
":",
"(",
"action",
",",
"msg",
",",
"kwargs",
")",
"=",
"yield",
"from",
"self",
".",
"get",
"(",
"timeout",
")",
"if",
"hasattr",
"(",
"msg",
",",
"\"__json__\"",
")",
":",
"msg",
"=",
"{",
"... | Get a message as a JSON | [
"Get",
"a",
"message",
"as",
"a",
"JSON"
] | a221678448fb5d24e977ef562f81d56aacc89ab1 | https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/notification_queue.py#L51-L61 | train | 221,197 |
GNS3/gns3-server | gns3server/compute/vpcs/vpcs_vm.py | VPCSVM.close | def close(self):
"""
Closes this VPCS VM.
"""
if not (yield from super().close()):
return False
nio = self._ethernet_adapter.get_nio(0)
if isinstance(nio, NIOUDP):
self.manager.port_manager.release_udp_port(nio.lport, self._project)
if self._local_udp_tunnel:
self.manager.port_manager.release_udp_port(self._local_udp_tunnel[0].lport, self._project)
self.manager.port_manager.release_udp_port(self._local_udp_tunnel[1].lport, self._project)
self._local_udp_tunnel = None
yield from self._stop_ubridge()
if self.is_running():
self._terminate_process()
return True | python | def close(self):
"""
Closes this VPCS VM.
"""
if not (yield from super().close()):
return False
nio = self._ethernet_adapter.get_nio(0)
if isinstance(nio, NIOUDP):
self.manager.port_manager.release_udp_port(nio.lport, self._project)
if self._local_udp_tunnel:
self.manager.port_manager.release_udp_port(self._local_udp_tunnel[0].lport, self._project)
self.manager.port_manager.release_udp_port(self._local_udp_tunnel[1].lport, self._project)
self._local_udp_tunnel = None
yield from self._stop_ubridge()
if self.is_running():
self._terminate_process()
return True | [
"def",
"close",
"(",
"self",
")",
":",
"if",
"not",
"(",
"yield",
"from",
"super",
"(",
")",
".",
"close",
"(",
")",
")",
":",
"return",
"False",
"nio",
"=",
"self",
".",
"_ethernet_adapter",
".",
"get_nio",
"(",
"0",
")",
"if",
"isinstance",
"(",
... | Closes this VPCS VM. | [
"Closes",
"this",
"VPCS",
"VM",
"."
] | a221678448fb5d24e977ef562f81d56aacc89ab1 | https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/compute/vpcs/vpcs_vm.py#L81-L103 | train | 221,198 |
GNS3/gns3-server | gns3server/compute/vpcs/vpcs_vm.py | VPCSVM._check_requirements | def _check_requirements(self):
"""
Check if VPCS is available with the correct version.
"""
path = self._vpcs_path()
if not path:
raise VPCSError("No path to a VPCS executable has been set")
# This raise an error if ubridge is not available
self.ubridge_path
if not os.path.isfile(path):
raise VPCSError("VPCS program '{}' is not accessible".format(path))
if not os.access(path, os.X_OK):
raise VPCSError("VPCS program '{}' is not executable".format(path))
yield from self._check_vpcs_version() | python | def _check_requirements(self):
"""
Check if VPCS is available with the correct version.
"""
path = self._vpcs_path()
if not path:
raise VPCSError("No path to a VPCS executable has been set")
# This raise an error if ubridge is not available
self.ubridge_path
if not os.path.isfile(path):
raise VPCSError("VPCS program '{}' is not accessible".format(path))
if not os.access(path, os.X_OK):
raise VPCSError("VPCS program '{}' is not executable".format(path))
yield from self._check_vpcs_version() | [
"def",
"_check_requirements",
"(",
"self",
")",
":",
"path",
"=",
"self",
".",
"_vpcs_path",
"(",
")",
"if",
"not",
"path",
":",
"raise",
"VPCSError",
"(",
"\"No path to a VPCS executable has been set\"",
")",
"# This raise an error if ubridge is not available",
"self",... | Check if VPCS is available with the correct version. | [
"Check",
"if",
"VPCS",
"is",
"available",
"with",
"the",
"correct",
"version",
"."
] | a221678448fb5d24e977ef562f81d56aacc89ab1 | https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/compute/vpcs/vpcs_vm.py#L106-L124 | train | 221,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.