text_prompt stringlengths 100 17.7k ⌀ | code_prompt stringlengths 7 9.86k ⌀ |
|---|---|
<SYSTEM_TASK:>
Used to get component DOI from a tag and confirm it is actually for that tag
<END_TASK>
<USER_TASK:>
Description:
def extract_component_doi(tag, nodenames):
"""
Used to get component DOI from a tag and confirm it is actually for that tag
and it is not for one of its children in the list of no... |
component_doi = None
if(tag.name == "sub-article"):
component_doi = doi_uri_to_doi(node_text(first(raw_parser.article_id(tag, pub_id_type= "doi"))))
else:
object_id_tag = first(raw_parser.object_id(tag, pub_id_type= "doi"))
# Tweak: if it is media and has no object_id_tag then it i... |
<SYSTEM_TASK:>
Find the corresp tags included in author-notes
<END_TASK>
<USER_TASK:>
Description:
def correspondence(soup):
"""
Find the corresp tags included in author-notes
for primary correspondence
""" |
correspondence = []
author_notes_nodes = raw_parser.author_notes(soup)
if author_notes_nodes:
corresp_nodes = raw_parser.corresp(author_notes_nodes)
for tag in corresp_nodes:
correspondence.append(tag.text)
return correspondence |
<SYSTEM_TASK:>
Find the award group award id, one for each
<END_TASK>
<USER_TASK:>
Description:
def award_group_award_id(tag):
"""
Find the award group award id, one for each
item found in the get_funding_group section
""" |
award_group_award_id = []
award_id_tags = extract_nodes(tag, "award-id")
for t in award_id_tags:
award_group_award_id.append(t.text)
return award_group_award_id |
<SYSTEM_TASK:>
Find the award group principal award recipient, one for each
<END_TASK>
<USER_TASK:>
Description:
def award_group_principal_award_recipient(tag):
"""
Find the award group principal award recipient, one for each
item found in the get_funding_group section
""" |
award_group_principal_award_recipient = []
principal_award_recipients = extract_nodes(tag, "principal-award-recipient")
for t in principal_award_recipients:
principal_award_recipient_text = ""
institution = node_text(first(extract_nodes(t, "institution")))
surname = node_text(firs... |
<SYSTEM_TASK:>
DOI in an object-id tag found inside the tag
<END_TASK>
<USER_TASK:>
Description:
def object_id_doi(tag, parent_tag_name=None):
"""DOI in an object-id tag found inside the tag""" |
doi = None
object_id = None
object_ids = raw_parser.object_id(tag, "doi")
if object_ids:
object_id = first([id_ for id_ in object_ids])
if parent_tag_name and object_id and object_id.parent.name != parent_tag_name:
object_id = None
if object_id:
doi = node_contents_str(o... |
<SYSTEM_TASK:>
Extract the title tag and sometimes inspect its parents
<END_TASK>
<USER_TASK:>
Description:
def title_tag_inspected(tag, parent_tag_name=None, p_parent_tag_name=None, direct_sibling_only=False):
"""Extract the title tag and sometimes inspect its parents""" |
title_tag = None
if direct_sibling_only is True:
for sibling_tag in tag:
if sibling_tag.name and sibling_tag.name == "title":
title_tag = sibling_tag
else:
title_tag = raw_parser.title(tag)
if parent_tag_name and p_parent_tag_name:
if (title_tag and... |
<SYSTEM_TASK:>
Extract the text of a title tag and sometimes inspect its parents
<END_TASK>
<USER_TASK:>
Description:
def title_text(tag, parent_tag_name=None, p_parent_tag_name=None, direct_sibling_only=False):
"""Extract the text of a title tag and sometimes inspect its parents""" |
title = None
title_tag = title_tag_inspected(tag, parent_tag_name, p_parent_tag_name, direct_sibling_only)
if title_tag:
title = node_contents_str(title_tag)
return title |
<SYSTEM_TASK:>
Get body json and then alter it with section wrapping and removing boxed-text
<END_TASK>
<USER_TASK:>
Description:
def body_json(soup, base_url=None):
""" Get body json and then alter it with section wrapping and removing boxed-text """ |
body_content = body(soup, remove_key_info_box=True, base_url=base_url)
# Wrap in a section if the first block is not a section
if (body_content and len(body_content) > 0 and "type" in body_content[0]
and body_content[0]["type"] != "section"):
# Wrap this one
new_body_section = Order... |
<SYSTEM_TASK:>
Render the tag as body content and call recursively if
<END_TASK>
<USER_TASK:>
Description:
def body_block_content_render(tag, recursive=False, base_url=None):
"""
Render the tag as body content and call recursively if
the tag has child tags
""" |
block_content_list = []
tag_content = OrderedDict()
if tag.name == "p":
for block_content in body_block_paragraph_render(tag, base_url=base_url):
if block_content != {}:
block_content_list.append(block_content)
else:
tag_content = body_block_content(tag, bas... |
<SYSTEM_TASK:>
paragraphs may wrap some other body block content
<END_TASK>
<USER_TASK:>
Description:
def body_block_paragraph_render(p_tag, html_flag=True, base_url=None):
"""
paragraphs may wrap some other body block content
this is separated out so it can be called from more than one place
""" |
# Configure the XML to HTML conversion preference for shorthand use below
convert = lambda xml_string: xml_to_html(html_flag, xml_string, base_url)
block_content_list = []
tag_content_content = []
nodenames = body_block_nodenames()
paragraph_content = u''
for child_tag in p_tag:
... |
<SYSTEM_TASK:>
fig and media tag captions are similar so use this common function
<END_TASK>
<USER_TASK:>
Description:
def body_block_caption_render(caption_tags, base_url=None):
"""fig and media tag captions are similar so use this common function""" |
caption_content = []
supplementary_material_tags = []
for block_tag in remove_doi_paragraph(caption_tags):
# Note then skip p tags with supplementary-material inside
if raw_parser.supplementary_material(block_tag):
for supp_tag in raw_parser.supplementary_material(block_tag):
... |
<SYSTEM_TASK:>
fig and media tag caption may have supplementary material
<END_TASK>
<USER_TASK:>
Description:
def body_block_supplementary_material_render(supp_tags, base_url=None):
"""fig and media tag caption may have supplementary material""" |
source_data = []
for supp_tag in supp_tags:
for block_content in body_block_content_render(supp_tag, base_url=base_url):
if block_content != {}:
if "content" in block_content:
del block_content["content"]
source_data.append(block_content)
... |
<SYSTEM_TASK:>
set the title, label and caption values in a consistent way
<END_TASK>
<USER_TASK:>
Description:
def body_block_title_label_caption(tag_content, title_value, label_value,
caption_content, set_caption=True, prefer_title=False, prefer_label=False):
"""set the title, l... |
set_if_value(tag_content, "label", rstrip_punctuation(label_value))
set_if_value(tag_content, "title", title_value)
if set_caption is True and caption_content and len(caption_content) > 0:
tag_content["caption"] = caption_content
if prefer_title:
if "title" not in tag_content and label_... |
<SYSTEM_TASK:>
compile author affiliations for json output
<END_TASK>
<USER_TASK:>
Description:
def author_affiliations(author, html_flag=True):
"""compile author affiliations for json output""" |
# Configure the XML to HTML conversion preference for shorthand use below
convert = lambda xml_string: xml_to_html(html_flag, xml_string)
affilations = []
if author.get("affiliations"):
for affiliation in author.get("affiliations"):
affiliation_json = OrderedDict()
af... |
<SYSTEM_TASK:>
add more author json
<END_TASK>
<USER_TASK:>
Description:
def author_json_details(author, author_json, contributions, correspondence,
competing_interests, equal_contributions_map, present_address_data,
foot_notes_data, html_flag=True):
# Configure the X... |
if author_affiliations(author):
author_json["affiliations"] = author_affiliations(author)
# foot notes or additionalInformation
if author_foot_notes(author, foot_notes_data):
author_json["additionalInformation"] = author_foot_notes(author, foot_notes_data)
# email
if author_email_... |
<SYSTEM_TASK:>
compile a map of author collab to group-author-key
<END_TASK>
<USER_TASK:>
Description:
def collab_to_group_author_key_map(authors):
"""compile a map of author collab to group-author-key""" |
collab_map = {}
for author in authors:
if author.get("collab"):
collab_map[author.get("collab")] = author.get("group-author-key")
return collab_map |
<SYSTEM_TASK:>
assign numeric values to each unique equal-contrib id
<END_TASK>
<USER_TASK:>
Description:
def map_equal_contributions(contributors):
"""assign numeric values to each unique equal-contrib id""" |
equal_contribution_map = {}
equal_contribution_keys = []
for contributor in contributors:
if contributor.get("references") and "equal-contrib" in contributor.get("references"):
for key in contributor["references"]["equal-contrib"]:
if key not in equal_contribution_keys:
... |
<SYSTEM_TASK:>
authors list in article json format
<END_TASK>
<USER_TASK:>
Description:
def authors_json(soup):
"""authors list in article json format""" |
authors_json_data = []
contributors_data = contributors(soup, "full")
author_contributions_data = author_contributions(soup, None)
author_competing_interests_data = competing_interests(soup, None)
author_correspondence_data = full_correspondence(soup)
authors_non_byline_data = authors_non_bylin... |
<SYSTEM_TASK:>
take preferred names from authors json and format them into an author line
<END_TASK>
<USER_TASK:>
Description:
def author_line(soup):
"""take preferred names from authors json and format them into an author line""" |
author_line = None
authors_json_data = authors_json(soup)
author_names = extract_author_line_names(authors_json_data)
if len(author_names) > 0:
author_line = format_author_line(author_names)
return author_line |
<SYSTEM_TASK:>
authorLine format depends on if there is 1, 2 or more than 2 authors
<END_TASK>
<USER_TASK:>
Description:
def format_author_line(author_names):
"""authorLine format depends on if there is 1, 2 or more than 2 authors""" |
author_line = None
if not author_names:
return author_line
if len(author_names) <= 2:
author_line = ", ".join(author_names)
elif len(author_names) > 2:
author_line = author_names[0] + " et al."
return author_line |
<SYSTEM_TASK:>
for use in removing unwanted boxed-content from appendices json
<END_TASK>
<USER_TASK:>
Description:
def unwrap_appendix_box(json_content):
"""for use in removing unwanted boxed-content from appendices json""" |
if json_content.get("content") and len(json_content["content"]) > 0:
first_block = json_content["content"][0]
if (first_block.get("type")
and first_block.get("type") == "box"
and first_block.get("content")):
if first_block.get("doi") and not json_content.get("doi... |
<SYSTEM_TASK:>
Get simple assignments from node tree.
<END_TASK>
<USER_TASK:>
Description:
def _get_simple_assignments(tree):
"""Get simple assignments from node tree.""" |
result = {}
for node in ast.walk(tree):
if isinstance(node, ast.Assign):
for target in node.targets:
if isinstance(target, ast.Name):
result[target.id] = node.value
return result |
<SYSTEM_TASK:>
Render a value, ensuring that any nested dicts are sorted by key.
<END_TASK>
<USER_TASK:>
Description:
def render_value(value):
"""Render a value, ensuring that any nested dicts are sorted by key.""" |
if isinstance(value, list):
return '[' + ', '.join(render_value(v) for v in value) + ']'
elif isinstance(value, dict):
return (
'{' +
', '.join('{k!r}: {v}'.format(
k=k, v=render_value(v)) for k, v in sorted(value.items())) +
'}')
else:
... |
<SYSTEM_TASK:>
Render syntactically valid python service double code.
<END_TASK>
<USER_TASK:>
Description:
def write_service_double_file(target_root, service_name, rendered):
"""Render syntactically valid python service double code.""" |
target_path = os.path.join(
target_root,
'snapstore_schemas', 'service_doubles', '%s.py' % service_name
)
with open(target_path, 'w') as target_file:
target_file.write(rendered) |
<SYSTEM_TASK:>
Recursively sorts a JSON schema by dict key.
<END_TASK>
<USER_TASK:>
Description:
def _sort_schema(schema):
"""Recursively sorts a JSON schema by dict key.""" |
if isinstance(schema, dict):
for k, v in sorted(schema.items()):
if isinstance(v, dict):
yield k, OrderedDict(_sort_schema(v))
elif isinstance(v, list):
yield k, list(_sort_schema(v))
else:
yield k, v
elif isinstance(s... |
<SYSTEM_TASK:>
Returns a JSON Schema representation of a form field.
<END_TASK>
<USER_TASK:>
Description:
def get_field_schema(name, field):
"""Returns a JSON Schema representation of a form field.""" |
field_schema = {
'type': 'string',
}
if field.label:
field_schema['title'] = str(field.label) # force translation
if field.help_text:
field_schema['description'] = str(field.help_text) # force translation
if isinstance(field, (fields.URLField, fields.FileField)):
... |
<SYSTEM_TASK:>
Validate the body of incoming requests for a flask view.
<END_TASK>
<USER_TASK:>
Description:
def validate_body(schema):
"""Validate the body of incoming requests for a flask view.
An example usage might look like this::
from snapstore_schemas import validate_body
@validate_bo... |
location = get_callsite_location()
def decorator(fn):
validate_schema(schema)
wrapper = wrap_request(fn, schema)
record_schemas(
fn, wrapper, location, request_schema=sort_schema(schema))
return wrapper
return decorator |
<SYSTEM_TASK:>
Support extracting the schema from the decorated function.
<END_TASK>
<USER_TASK:>
Description:
def record_schemas(
fn, wrapper, location, request_schema=None, response_schema=None):
"""Support extracting the schema from the decorated function.""" |
# have we already been decorated by an acceptable api call?
has_acceptable = hasattr(fn, '_acceptable_metadata')
if request_schema is not None:
# preserve schema for later use
wrapper._request_schema = wrapper._request_schema = request_schema
wrapper._request_schema_location = loca... |
<SYSTEM_TASK:>
Validate the body of a response from a flask view.
<END_TASK>
<USER_TASK:>
Description:
def validate_output(schema):
"""Validate the body of a response from a flask view.
Like `validate_body`, this function compares a json document to a
jsonschema specification. However, this function applie... |
location = get_callsite_location()
def decorator(fn):
validate_schema(schema)
wrapper = wrap_response(fn, schema)
record_schemas(
fn, wrapper, location, response_schema=sort_schema(schema))
return wrapper
return decorator |
<SYSTEM_TASK:>
Validate `payload` against `schema`, returning an error list.
<END_TASK>
<USER_TASK:>
Description:
def validate(payload, schema):
"""Validate `payload` against `schema`, returning an error list.
jsonschema provides lots of information in it's errors, but it can be a bit
of work to extract al... |
v = jsonschema.Draft4Validator(
schema, format_checker=jsonschema.FormatChecker())
error_list = []
for error in v.iter_errors(payload):
message = error.message
location = '/' + '/'.join([str(c) for c in error.absolute_path])
error_list.append(message + ' at ' + location)
... |
<SYSTEM_TASK:>
Connects to a Phoenix query server.
<END_TASK>
<USER_TASK:>
Description:
def connect(url, max_retries=None, **kwargs):
"""Connects to a Phoenix query server.
:param url:
URL to the Phoenix query server, e.g. ``http://localhost:8765/``
:param autocommit:
Switch the connection... |
client = AvaticaClient(url, max_retries=max_retries)
client.connect()
return Connection(client, **kwargs) |
<SYSTEM_TASK:>
Opens a HTTP connection to the RPC server.
<END_TASK>
<USER_TASK:>
Description:
def connect(self):
"""Opens a HTTP connection to the RPC server.""" |
logger.debug("Opening connection to %s:%s", self.url.hostname, self.url.port)
try:
self.connection = httplib.HTTPConnection(self.url.hostname, self.url.port)
self.connection.connect()
except (httplib.HTTPException, socket.error) as e:
raise errors.InterfaceEr... |
<SYSTEM_TASK:>
Closes the HTTP connection to the RPC server.
<END_TASK>
<USER_TASK:>
Description:
def close(self):
"""Closes the HTTP connection to the RPC server.""" |
if self.connection is not None:
logger.debug("Closing connection to %s:%s", self.url.hostname, self.url.port)
try:
self.connection.close()
except httplib.HTTPException:
logger.warning("Error while closing connection", exc_info=True)
... |
<SYSTEM_TASK:>
Synchronizes connection properties with the server.
<END_TASK>
<USER_TASK:>
Description:
def connection_sync(self, connection_id, connProps=None):
"""Synchronizes connection properties with the server.
:param connection_id:
ID of the current connection.
:param connPr... |
if connProps is None:
connProps = {}
request = requests_pb2.ConnectionSyncRequest()
request.connection_id = connection_id
request.conn_props.auto_commit = connProps.get('autoCommit', False)
request.conn_props.has_auto_commit = True
request.conn_props.read_on... |
<SYSTEM_TASK:>
Opens a new connection.
<END_TASK>
<USER_TASK:>
Description:
def open_connection(self, connection_id, info=None):
"""Opens a new connection.
:param connection_id:
ID of the connection to open.
""" |
request = requests_pb2.OpenConnectionRequest()
request.connection_id = connection_id
if info is not None:
# Info is a list of repeated pairs, setting a dict directly fails
for k, v in info.items():
request.info[k] = v
response_data = self._apply(... |
<SYSTEM_TASK:>
Closes a connection.
<END_TASK>
<USER_TASK:>
Description:
def close_connection(self, connection_id):
"""Closes a connection.
:param connection_id:
ID of the connection to close.
""" |
request = requests_pb2.CloseConnectionRequest()
request.connection_id = connection_id
self._apply(request) |
<SYSTEM_TASK:>
Closes a statement.
<END_TASK>
<USER_TASK:>
Description:
def close_statement(self, connection_id, statement_id):
"""Closes a statement.
:param connection_id:
ID of the current connection.
:param statement_id:
ID of the statement to close.
""" |
request = requests_pb2.CloseStatementRequest()
request.connection_id = connection_id
request.statement_id = statement_id
self._apply(request) |
<SYSTEM_TASK:>
Prepares and immediately executes a statement.
<END_TASK>
<USER_TASK:>
Description:
def prepare_and_execute(self, connection_id, statement_id, sql, max_rows_total=None, first_frame_max_size=None):
"""Prepares and immediately executes a statement.
:param connection_id:
ID of t... |
request = requests_pb2.PrepareAndExecuteRequest()
request.connection_id = connection_id
request.statement_id = statement_id
request.sql = sql
if max_rows_total is not None:
request.max_rows_total = max_rows_total
if first_frame_max_size is not None:
... |
<SYSTEM_TASK:>
Prepares a statement.
<END_TASK>
<USER_TASK:>
Description:
def prepare(self, connection_id, sql, max_rows_total=None):
"""Prepares a statement.
:param connection_id:
ID of the current connection.
:param sql:
SQL query.
:param max_rows_total:
... |
request = requests_pb2.PrepareRequest()
request.connection_id = connection_id
request.sql = sql
if max_rows_total is not None:
request.max_rows_total = max_rows_total
response_data = self._apply(request)
response = responses_pb2.PrepareResponse()
res... |
<SYSTEM_TASK:>
Closes the cursor.
<END_TASK>
<USER_TASK:>
Description:
def close(self):
"""Closes the cursor.
No further operations are allowed once the cursor is closed.
If the cursor is used in a ``with`` statement, this method will
be automatically called at the end of the ``with`` b... |
if self._closed:
raise ProgrammingError('the cursor is already closed')
if self._id is not None:
self._connection._client.close_statement(self._connection._id, self._id)
self._id = None
self._signature = None
self._column_data_types = []
self.... |
<SYSTEM_TASK:>
Transforms a Row into Python values.
<END_TASK>
<USER_TASK:>
Description:
def _transform_row(self, row):
"""Transforms a Row into Python values.
:param row:
A ``common_pb2.Row`` object.
:returns:
A list of values casted into the correct Python types.
... |
tmp_row = []
for i, column in enumerate(row.value):
if column.has_array_value:
raise NotImplementedError('array types are not supported')
elif column.scalar_value.null:
tmp_row.append(None)
else:
field_name, rep, mutat... |
<SYSTEM_TASK:>
Read-only attribute providing the current 0-based index of the
<END_TASK>
<USER_TASK:>
Description:
def rownumber(self):
"""Read-only attribute providing the current 0-based index of the
cursor in the result set or ``None`` if the index cannot be
determined.
The index can... |
if self._frame is not None and self._pos is not None:
return self._frame.offset + self._pos
return self._pos |
<SYSTEM_TASK:>
Closes the connection.
<END_TASK>
<USER_TASK:>
Description:
def close(self):
"""Closes the connection.
No further operations are allowed, either on the connection or any
of its cursors, once the connection is closed.
If the connection is used in a ``with`` statement, this... |
if self._closed:
raise ProgrammingError('the connection is already closed')
for cursor_ref in self._cursors:
cursor = cursor_ref()
if cursor is not None and not cursor._closed:
cursor.close()
self._client.close_connection(self._id)
sel... |
<SYSTEM_TASK:>
Creates a new cursor.
<END_TASK>
<USER_TASK:>
Description:
def cursor(self, cursor_factory=None):
"""Creates a new cursor.
:param cursor_factory:
This argument can be used to create non-standard cursors.
The class returned must be a subclass of
:class:... |
if self._closed:
raise ProgrammingError('the connection is already closed')
cursor = (cursor_factory or self.cursor_factory)(self)
self._cursors.append(weakref.ref(cursor, self._cursors.remove))
return cursor |
<SYSTEM_TASK:>
Sets one or more parameters in the current connection.
<END_TASK>
<USER_TASK:>
Description:
def set_session(self, autocommit=None, readonly=None):
"""Sets one or more parameters in the current connection.
:param autocommit:
Switch the connection to autocommit mode. With the c... |
props = {}
if autocommit is not None:
props['autoCommit'] = bool(autocommit)
if readonly is not None:
props['readOnly'] = bool(readonly)
props = self._client.connection_sync(self._id, props)
self._autocommit = props.auto_commit
self._readonly = pr... |
<SYSTEM_TASK:>
Predict target values for X.
<END_TASK>
<USER_TASK:>
Description:
def predict(self, X):
"""Predict target values for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : arra... |
K = pairwise_kernels(self.X, X, metric=self.kernel, gamma=self.gamma)
return (K * self.y[:, None]).sum(axis=0) / K.sum(axis=0) |
<SYSTEM_TASK:>
Generate the random hidden layer's activations given X as input.
<END_TASK>
<USER_TASK:>
Description:
def transform(self, X, y=None):
"""Generate the random hidden layer's activations given X as input.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_sample... |
if (self.components_ is None):
raise ValueError('No components initialized')
return self._compute_hidden_activations(X) |
<SYSTEM_TASK:>
Generate MLP weights
<END_TASK>
<USER_TASK:>
Description:
def _compute_weights(self, X, rs):
"""Generate MLP weights""" |
# use supplied weights if present
weights = self._get_user_components('weights')
if (weights is None):
n_features = X.shape[1]
hw_size = (n_features, self.n_hidden)
weights = rs.normal(size=hw_size)
self.components_['weights'] = weights |
<SYSTEM_TASK:>
Generate components of hidden layer given X
<END_TASK>
<USER_TASK:>
Description:
def _generate_components(self, X):
"""Generate components of hidden layer given X""" |
rs = check_random_state(self.random_state)
if (self._use_mlp_input):
self._compute_biases(rs)
self._compute_weights(X, rs)
if (self._use_rbf_input):
self._compute_centers(X, sp.issparse(X), rs)
self._compute_radii() |
<SYSTEM_TASK:>
Compute input activations given X
<END_TASK>
<USER_TASK:>
Description:
def _compute_input_activations(self, X):
"""Compute input activations given X""" |
n_samples = X.shape[0]
mlp_acts = np.zeros((n_samples, self.n_hidden))
if (self._use_mlp_input):
b = self.components_['biases']
w = self.components_['weights']
mlp_acts = self.alpha * (safe_sparse_dot(X, w) + b)
rbf_acts = np.zeros((n_samples, self... |
<SYSTEM_TASK:>
Generate centers, then compute tau, dF and dN vals
<END_TASK>
<USER_TASK:>
Description:
def _compute_centers(self, X, sparse, rs):
"""Generate centers, then compute tau, dF and dN vals""" |
super(GRBFRandomLayer, self)._compute_centers(X, sparse, rs)
centers = self.components_['centers']
sorted_distances = np.sort(squareform(pdist(centers)))
self.dF_vals = sorted_distances[:, -1]
self.dN_vals = sorted_distances[:, 1]/100.0
#self.dN_vals = 0.0002 * np.ones... |
<SYSTEM_TASK:>
fit regression using pseudo-inverse
<END_TASK>
<USER_TASK:>
Description:
def _fit_regression(self, y):
"""
fit regression using pseudo-inverse
or supplied regressor
""" |
if self.regressor is None:
self.coefs_ = safe_sparse_dot(pinv2(self.hidden_activations_), y)
else:
self.regressor.fit(self.hidden_activations_, y)
self.fitted_ = True |
<SYSTEM_TASK:>
Force use of accuracy score since we don't inherit
<END_TASK>
<USER_TASK:>
Description:
def score(self, X, y):
"""Force use of accuracy score since we don't inherit
from ClassifierMixin""" |
from sklearn.metrics import accuracy_score
return accuracy_score(y, self.predict(X)) |
<SYSTEM_TASK:>
Required only for DRF 3.1, which does not make dynamically added attribute available in obj in serializer.
<END_TASK>
<USER_TASK:>
Description:
def compat_serializer_attr(serializer, obj):
"""
Required only for DRF 3.1, which does not make dynamically added attribute available in obj in serialize... |
if DRFVLIST[0] == 3 and DRFVLIST[1] == 1:
for i in serializer.instance:
if i.id == obj.id:
return i
else:
return obj |
<SYSTEM_TASK:>
get_paginated_response is unknown to DRF 3.0
<END_TASK>
<USER_TASK:>
Description:
def compat_get_paginated_response(view, page):
""" get_paginated_response is unknown to DRF 3.0 """ |
if DRFVLIST[0] == 3 and DRFVLIST[1] >= 1:
from rest_messaging.serializers import ComplexMessageSerializer # circular import
serializer = ComplexMessageSerializer(page, many=True)
return view.get_paginated_response(serializer.data)
else:
serializer = view.get_pagination_serializ... |
<SYSTEM_TASK:>
Allows to define a callback for serializing information about the user.
<END_TASK>
<USER_TASK:>
Description:
def get_participants(self, obj):
""" Allows to define a callback for serializing information about the user. """ |
# we set the many to many serialization to False, because we only want it with retrieve requests
if self.callback is None:
return [participant.id for participant in obj.participants.all()]
else:
# we do not want user information
return self.callback(obj) |
<SYSTEM_TASK:>
We say if the message should trigger a notification
<END_TASK>
<USER_TASK:>
Description:
def get_is_notification(self, obj):
""" We say if the message should trigger a notification """ |
try:
o = compat_serializer_attr(self, obj)
return o.is_notification
except Exception:
return False |
<SYSTEM_TASK:>
Return the ids of the people who read the message instance.
<END_TASK>
<USER_TASK:>
Description:
def get_readers(self, obj):
""" Return the ids of the people who read the message instance. """ |
try:
o = compat_serializer_attr(self, obj)
return o.readers
except Exception:
return [] |
<SYSTEM_TASK:>
Gets all the threads in which the current participant is involved. The method excludes threads where the participant has left.
<END_TASK>
<USER_TASK:>
Description:
def get_threads_where_participant_is_active(self, participant_id):
""" Gets all the threads in which the current participant is invol... |
participations = Participation.objects.\
filter(participant__id=participant_id).\
exclude(date_left__lte=now()).\
distinct().\
select_related('thread')
return Thread.objects.\
filter(id__in=[p.thread.id for p in participations]).\
... |
<SYSTEM_TASK:>
Gets the threads where the specified participants are active and no one has left.
<END_TASK>
<USER_TASK:>
Description:
def get_active_threads_involving_all_participants(self, *participant_ids):
""" Gets the threads where the specified participants are active and no one has left. """ |
query = Thread.objects.\
exclude(participation__date_left__lte=now()).\
annotate(count_participants=Count('participants')).\
filter(count_participants=len(participant_ids))
for participant_id in participant_ids:
query = query.filter(participants__id=par... |
<SYSTEM_TASK:>
When a Participant posts a message to other participants without specifying an existing Thread,
<END_TASK>
<USER_TASK:>
Description:
def get_or_create_thread(self, request, name=None, *participant_ids):
"""
When a Participant posts a message to other participants without specifying an exi... |
# we get the current participant
# or create him if he does not exit
participant_ids = list(participant_ids)
if request.rest_messaging_participant.id not in participant_ids:
participant_ids.append(request.rest_messaging_participant.id)
# we need at least one other... |
<SYSTEM_TASK:>
Returns the number of messages sent in the last 24 hours so we can ensure the user does not exceed his messaging limits
<END_TASK>
<USER_TASK:>
Description:
def return_daily_messages_count(self, sender):
""" Returns the number of messages sent in the last 24 hours so we can ensure the user does n... |
h24 = now() - timedelta(days=1)
return Message.objects.filter(sender=sender, sent_at__gte=h24).count() |
<SYSTEM_TASK:>
Check who read each message.
<END_TASK>
<USER_TASK:>
Description:
def check_who_read(self, messages):
""" Check who read each message. """ |
# we get the corresponding Participation objects
for m in messages:
readers = []
for p in m.thread.participation_set.all():
if p.date_last_check is None:
pass
elif p.date_last_check > m.sent_at:
# the messag... |
<SYSTEM_TASK:>
Check if each message requires a notification for the specified participant.
<END_TASK>
<USER_TASK:>
Description:
def check_is_notification(self, participant_id, messages):
""" Check if each message requires a notification for the specified participant. """ |
try:
# we get the last check
last_check = NotificationCheck.objects.filter(participant__id=participant_id).latest('id').date_check
except Exception:
# we have no notification check
# all the messages are considered as new
for m in messages:
... |
<SYSTEM_TASK:>
Returns the last message in each thread
<END_TASK>
<USER_TASK:>
Description:
def get_lasts_messages_of_threads(self, participant_id, check_who_read=True, check_is_notification=True):
""" Returns the last message in each thread """ |
# we get the last message for each thread
# we must query the messages using two queries because only Postgres supports .order_by('thread', '-sent_at').distinct('thread')
threads = Thread.managers.\
get_threads_where_participant_is_active(participant_id).\
annotate(last_... |
<SYSTEM_TASK:>
Returns all the messages in a thread.
<END_TASK>
<USER_TASK:>
Description:
def get_all_messages_in_thread(self, participant_id, thread_id, check_who_read=True):
""" Returns all the messages in a thread. """ |
try:
messages = Message.objects.filter(thread__id=thread_id).\
order_by('-id').\
select_related('thread').\
prefetch_related('thread__participation_set', 'thread__participation_set__participant')
except Exception:
return Message.ob... |
<SYSTEM_TASK:>
We ensure the Thread only involves eligible participants.
<END_TASK>
<USER_TASK:>
Description:
def create(self, request, *args, **kwargs):
""" We ensure the Thread only involves eligible participants. """ |
serializer = self.get_serializer(data=compat_get_request_data(request))
compat_serializer_check_is_valid(serializer)
self.perform_create(request, serializer)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, head... |
<SYSTEM_TASK:>
Pk is the pk of the Thread to which the messages belong.
<END_TASK>
<USER_TASK:>
Description:
def mark_thread_as_read(self, request, pk=None):
""" Pk is the pk of the Thread to which the messages belong. """ |
# we get the thread and check for permission
thread = Thread.objects.get(id=pk)
self.check_object_permissions(request, thread)
# we save the date
try:
participation = Participation.objects.get(thread=thread, participant=request.rest_messaging_participant)
... |
<SYSTEM_TASK:>
Extract next stops along the journey.
<END_TASK>
<USER_TASK:>
Description:
def _pass_list(self) -> List[Dict[str, Any]]:
"""Extract next stops along the journey.""" |
stops: List[Dict[str, Any]] = []
for stop in self.journey.PassList.BasicStop:
index = stop.get("index")
station = stop.Location.Station.HafasName.Text.text
station_id = stop.Location.Station.ExternalId.text
stops.append({"index": index, "stationId": stati... |
<SYSTEM_TASK:>
Check `style` against pyout.styling.schema.
<END_TASK>
<USER_TASK:>
Description:
def validate(style):
"""Check `style` against pyout.styling.schema.
Parameters
----------
style : dict
Style object to validate.
Raises
------
StyleValidationError if `style` is not vali... |
try:
import jsonschema
except ImportError:
return
try:
jsonschema.validate(style, schema)
except jsonschema.ValidationError as exc:
new_exc = StyleValidationError(exc)
# Don't dump the original jsonschema exception because it is already
# included in the... |
<SYSTEM_TASK:>
Classify `value` of bold, color, and underline keys.
<END_TASK>
<USER_TASK:>
Description:
def value_type(value):
"""Classify `value` of bold, color, and underline keys.
Parameters
----------
value : style value
Returns
-------
str, {"simple", "lookup", "re_lookup", "interval... |
try:
keys = list(value.keys())
except AttributeError:
return "simple"
if keys in [["lookup"], ["re_lookup"], ["interval"]]:
return keys[0]
raise ValueError("Type of `value` could not be determined") |
<SYSTEM_TASK:>
store an audio file to storage dir
<END_TASK>
<USER_TASK:>
Description:
def add(self, src):
""" store an audio file to storage dir
:param src: audio file path
:return: checksum value
""" |
if not audio.get_type(src):
raise TypeError('The type of this file is not supported.')
return super().add(src) |
<SYSTEM_TASK:>
Merge command with arguments.
<END_TASK>
<USER_TASK:>
Description:
def _get_cmd(command, arguments):
"""Merge command with arguments.""" |
if arguments is None:
arguments = []
if command.endswith(".py") or command.endswith(".pyw"):
return [sys.executable, command] + list(arguments)
else:
return [command] + list(arguments) |
<SYSTEM_TASK:>
A command line argument parser.
<END_TASK>
<USER_TASK:>
Description:
def argparse(argv, parser, arguments):
""" A command line argument parser.
Parses arguments coming from the argv Observable and outputs them as
Argument items in the output observable.
Parameters
-----------
arg... |
def add_arg(parser, arg_spec):
parser.add_argument(arg_spec.name, help=arg_spec.help)
return parser
parse_request = parser \
.map(lambda i: ArgumentParser(description=i.description)) \
.combine_latest(arguments, lambda parser, arg_def: add_arg(parser,arg_def)) \
.last(... |
<SYSTEM_TASK:>
Calculate the complex flow vector `Q_n`.
<END_TASK>
<USER_TASK:>
Description:
def qn(phi, *n):
"""
Calculate the complex flow vector `Q_n`.
:param array-like phi: Azimuthal angles.
:param int n: One or more harmonics to calculate.
:returns:
A single complex number if only o... |
phi = np.ravel(phi)
n = np.asarray(n)
i_n_phi = np.zeros((n.size, phi.size), dtype=complex)
np.outer(n, phi, out=i_n_phi.imag)
qn = np.exp(i_n_phi, out=i_n_phi).sum(axis=1)
if qn.size == 1:
qn = qn[0]
return qn |
<SYSTEM_TASK:>
r"""
<END_TASK>
<USER_TASK:>
Description:
def correlation(self, n, k, error=False):
r"""
Calculate `\langle k \rangle_n`,
the `k`-particle correlation function for `n`\ th-order anisotropy.
:param int n: Anisotropy order.
:param int k: Correlation order.
... |
self._calculate_corr(n, k)
corr_nk = self._corr[n][k]
if error:
self._calculate_corr_err(n, k)
return corr_nk, self._corr_err[n][k]
else:
return corr_nk |
<SYSTEM_TASK:>
r"""
<END_TASK>
<USER_TASK:>
Description:
def sample(self, multiplicity):
r"""
Randomly sample azimuthal angles `\phi`.
:param int multiplicity: Number to sample.
:returns: Array of sampled angles.
""" |
if self._n is None:
return self._uniform_phi(multiplicity)
# Since the flow PDF does not have an analytic inverse CDF, I use a
# simple accept-reject sampling algorithm. This is reasonably
# efficient since for normal-sized vn, the PDF is close to flat. Now
# due ... |
<SYSTEM_TASK:>
Formats a response from a view to handle any RDF graphs
<END_TASK>
<USER_TASK:>
Description:
def output(self, response, accepts):
""" Formats a response from a view to handle any RDF graphs
If a view function returns an RDF graph, serialize it based on Accept header
If it's not an RDF graph... |
graph = self.get_graph(response)
if graph is not None:
# decide the format
mimetype, format = self.format_selector.decide(accepts, graph.context_aware)
# requested content couldn't find anything
if mimetype is None:
return self.make_406_response()
# explicitly mark text mimetypes as utf-8
i... |
<SYSTEM_TASK:>
Wraps a view function to return formatted RDF graphs
<END_TASK>
<USER_TASK:>
Description:
def decorate(self, view):
""" Wraps a view function to return formatted RDF graphs
Uses content negotiation to serialize the graph to the client-preferred format
Passes other content through unmodified... |
from functools import wraps
@wraps(view)
def decorated(*args, **kwargs):
response = view(*args, **kwargs)
accept = self.get_accept()
return self.output(response, accept)
return decorated |
<SYSTEM_TASK:>
Return a value from configuration.
<END_TASK>
<USER_TASK:>
Description:
def get(self, var, default=None):
"""Return a value from configuration.
Safe version which always returns a default value if the value is not
found.
""" |
try:
return self.__get(var)
except (KeyError, IndexError):
return default |
<SYSTEM_TASK:>
Insert at the index.
<END_TASK>
<USER_TASK:>
Description:
def insert(self, var, value, index=None):
"""Insert at the index.
If the index is not provided appends to the end of the list.
""" |
current = self.__get(var)
if not isinstance(current, list):
raise KeyError("%s: is not a list" % var)
if index is None:
current.append(value)
else:
current.insert(index, value)
if self.auto_save:
self.save() |
<SYSTEM_TASK:>
Return a merged set of top level keys from all configurations.
<END_TASK>
<USER_TASK:>
Description:
def keys(self):
"""Return a merged set of top level keys from all configurations.""" |
s = set()
for config in self.__configs:
s |= config.keys()
return s |
<SYSTEM_TASK:>
Split the string s using shell-like syntax.
<END_TASK>
<USER_TASK:>
Description:
def split(s, posix=True):
"""Split the string s using shell-like syntax.
Args:
s (str): String to split
posix (bool): Use posix split
Returns:
list of str: List of string parts
""" |
if isinstance(s, six.binary_type):
s = s.decode("utf-8")
return shlex.split(s, posix=posix) |
<SYSTEM_TASK:>
Recursive search function.
<END_TASK>
<USER_TASK:>
Description:
def search(path, matcher="*", dirs=False, files=True):
"""Recursive search function.
Args:
path (str): Path to search recursively
matcher (str or callable): String pattern to search for or function
that r... |
if callable(matcher):
def fnmatcher(items):
return list(filter(matcher, items))
else:
def fnmatcher(items):
return fnmatch.filter(items, matcher)
for root, directories, filenames in os.walk(os.path.abspath(path)):
to_match = []
if dirs:
... |
<SYSTEM_TASK:>
Change the current working directory.
<END_TASK>
<USER_TASK:>
Description:
def chdir(directory):
"""Change the current working directory.
Args:
directory (str): Directory to go to.
""" |
directory = os.path.abspath(directory)
logger.info("chdir -> %s" % directory)
try:
if not os.path.isdir(directory):
logger.error(
"chdir -> %s failed! Directory does not exist!", directory
)
return False
os.chdir(directory)
return ... |
<SYSTEM_TASK:>
Context object for changing directory.
<END_TASK>
<USER_TASK:>
Description:
def goto(directory, create=False):
"""Context object for changing directory.
Args:
directory (str): Directory to go to.
create (bool): Create directory if it doesn't exists.
Usage::
>>> with... |
current = os.getcwd()
directory = os.path.abspath(directory)
if os.path.isdir(directory) or (create and mkdir(directory)):
logger.info("goto -> %s", directory)
os.chdir(directory)
try:
yield True
finally:
logger.info("goto <- %s", directory)
... |
<SYSTEM_TASK:>
Delete a file or directory.
<END_TASK>
<USER_TASK:>
Description:
def remove(path):
"""Delete a file or directory.
Args:
path (str): Path to the file or directory that needs to be deleted.
Returns:
bool: True if the operation is successful, False otherwise.
""" |
if os.path.isdir(path):
return __rmtree(path)
else:
return __rmfile(path) |
<SYSTEM_TASK:>
Read the content of the file.
<END_TASK>
<USER_TASK:>
Description:
def read(path, encoding="utf-8"):
"""Read the content of the file.
Args:
path (str): Path to the file
encoding (str): File encoding. Default: utf-8
Returns:
str: File content or empty string if there ... |
try:
with io.open(path, encoding=encoding) as f:
return f.read()
except Exception as e:
logger.error("read: %s failed. Error: %s", path, e)
return "" |
<SYSTEM_TASK:>
Create a file at the given path if it does not already exists.
<END_TASK>
<USER_TASK:>
Description:
def touch(path, content="", encoding="utf-8", overwrite=False):
"""Create a file at the given path if it does not already exists.
Args:
path (str): Path to the file.
content (str):... |
path = os.path.abspath(path)
if not overwrite and os.path.exists(path):
logger.warning('touch: "%s" already exists', path)
return False
try:
logger.info("touch: %s", path)
with io.open(path, "wb") as f:
if not isinstance(content, six.binary_type):
... |
<SYSTEM_TASK:>
Return an object from a dot path.
<END_TASK>
<USER_TASK:>
Description:
def get_object(path="", obj=None):
"""Return an object from a dot path.
Path can either be a full path, in which case the `get_object` function
will try to import the modules in the path and follow it to the final
obj... |
if not path:
return obj
path = path.split(".")
if obj is None:
obj = importlib.import_module(path[0])
path = path[1:]
for item in path:
if item == "*":
# This is the star query, returns non hidden objects
return [
getattr(obj, name... |
<SYSTEM_TASK:>
Load recursively all all subclasses from a module.
<END_TASK>
<USER_TASK:>
Description:
def load_subclasses(klass, modules=None):
"""Load recursively all all subclasses from a module.
Args:
klass (str or list of str): Class whose subclasses we want to load.
modules: List of addit... |
if modules:
if isinstance(modules, six.string_types):
modules = [modules]
loader = Loader()
loader.load(*modules)
return klass.__subclasses__() |
<SYSTEM_TASK:>
Return full formatted traceback as a string.
<END_TASK>
<USER_TASK:>
Description:
def get_exception():
"""Return full formatted traceback as a string.""" |
trace = ""
exception = ""
exc_list = traceback.format_exception_only(
sys.exc_info()[0], sys.exc_info()[1]
)
for entry in exc_list:
exception += entry
tb_list = traceback.format_tb(sys.exc_info()[2])
for entry in tb_list:
trace += entry
return "%s\n%s" % (excepti... |
<SYSTEM_TASK:>
Load one or more modules.
<END_TASK>
<USER_TASK:>
Description:
def load(self, *modules):
"""Load one or more modules.
Args:
modules: Either a string full path to a module or an actual module
object.
""" |
for module in modules:
if isinstance(module, six.string_types):
try:
module = get_object(module)
except Exception as e:
self.errors[module] = e
continue
self.modules[module.__package__] = module
... |
<SYSTEM_TASK:>
Calculate the product filter.
<END_TASK>
<USER_TASK:>
Description:
def _product_filter(products) -> str:
"""Calculate the product filter.""" |
_filter = 0
for product in {PRODUCTS[p] for p in products}:
_filter += product
return format(_filter, "b")[::-1] |
<SYSTEM_TASK:>
Forbid multi-line headers, to prevent header injection.
<END_TASK>
<USER_TASK:>
Description:
def forbid_multi_line_headers(name, val):
"""Forbid multi-line headers, to prevent header injection.""" |
val = smart_text(val)
if "\n" in val or "\r" in val:
raise BadHeaderError(
"Header values can't contain newlines "
"(got %r for header %r)" % (val, name)
)
try:
val = val.encode("ascii")
except UnicodeEncodeError:
if name.lower() in ("to... |
<SYSTEM_TASK:>
Close the connection to the email server.
<END_TASK>
<USER_TASK:>
Description:
def close(self):
"""Close the connection to the email server.""" |
try:
try:
self.connection.quit()
except socket.sslerror:
# This happens when calling quit() on a TLS connection
# sometimes.
self.connection.close()
except Exception as e:
logger.error(
... |
<SYSTEM_TASK:>
Send an email.
<END_TASK>
<USER_TASK:>
Description:
def _send(self, message):
"""Send an email.
Helper method that does the actual sending.
""" |
if not message.recipients():
return False
try:
self.connection.sendmail(
message.sender,
message.recipients(),
message.message().as_string(),
)
except Exception as e:
logger.error(
... |
<SYSTEM_TASK:>
Attache a file from the filesystem.
<END_TASK>
<USER_TASK:>
Description:
def attach_file(self, path, mimetype=None):
"""Attache a file from the filesystem.""" |
filename = os.path.basename(path)
content = open(path, "rb").read()
self.attach(filename, content, mimetype) |
<SYSTEM_TASK:>
Convert the filename, content, mimetype triple to attachment.
<END_TASK>
<USER_TASK:>
Description:
def _create_attachment(self, filename, content, mimetype=None):
"""Convert the filename, content, mimetype triple to attachment.""" |
if mimetype is None:
mimetype, _ = mimetypes.guess_type(filename)
if mimetype is None:
mimetype = DEFAULT_ATTACHMENT_MIME_TYPE
basetype, subtype = mimetype.split("/", 1)
if basetype == "text":
attachment = SafeMIMEText(
... |
<SYSTEM_TASK:>
Attach an alternative content representation.
<END_TASK>
<USER_TASK:>
Description:
def attach_alternative(self, content, mimetype=None):
"""Attach an alternative content representation.""" |
self.attach(content=content, mimetype=mimetype) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.