body_hash
stringlengths
64
64
body
stringlengths
23
109k
docstring
stringlengths
1
57k
path
stringlengths
4
198
name
stringlengths
1
115
repository_name
stringlengths
7
111
repository_stars
float64
0
191k
lang
stringclasses
1 value
body_without_docstring
stringlengths
14
108k
unified
stringlengths
45
133k
460cd370cee88094142b9eea185b4e68075cadea9a1bd2eec6e55ec018211ce9
def usersAttending(self, meetup_id): ' Get users attending an upcoming meetup.' query = "SELECT id, firstname, lastname, email FROM users WHERE id IN ( SELECT user_id FROM rsvps WHERE meetup_id = '{}' AND response = 'yes')".format(meetup_id) self.cur.execute(query) result = self.cur.fetchall() return result
Get users attending an upcoming meetup.
app/api/v2/models/meetup_models.py
usersAttending
MRichardN/Qustioner-api-v2
0
python
def usersAttending(self, meetup_id): ' ' query = "SELECT id, firstname, lastname, email FROM users WHERE id IN ( SELECT user_id FROM rsvps WHERE meetup_id = '{}' AND response = 'yes')".format(meetup_id) self.cur.execute(query) result = self.cur.fetchall() return result
def usersAttending(self, meetup_id): ' ' query = "SELECT id, firstname, lastname, email FROM users WHERE id IN ( SELECT user_id FROM rsvps WHERE meetup_id = '{}' AND response = 'yes')".format(meetup_id) self.cur.execute(query) result = self.cur.fetchall() return result<|docstring|>Get users attending an upcoming meetup.<|endoftext|>
482b1a516ef599ffb484f90b1efdb6f27dabad6d871c0cff202a922332894159
def getUpcomings(self): ' Get all upcoming meetups in the next 1 week ' today = datetime.now().strftime('%d/%m/%Y') endWeek = (datetime.now() + timedelta(days=7)).strftime('%d/%m/%Y') query = "SELECT * FROM {} WHERE happeningOn BETWEEN '{}' AND '{}'".format(self.table, today, endWeek) self.cur.execute(query) result = self.cur.fetchall() return result
Get all upcoming meetups in the next 1 week
app/api/v2/models/meetup_models.py
getUpcomings
MRichardN/Qustioner-api-v2
0
python
def getUpcomings(self): ' ' today = datetime.now().strftime('%d/%m/%Y') endWeek = (datetime.now() + timedelta(days=7)).strftime('%d/%m/%Y') query = "SELECT * FROM {} WHERE happeningOn BETWEEN '{}' AND '{}'".format(self.table, today, endWeek) self.cur.execute(query) result = self.cur.fetchall() return result
def getUpcomings(self): ' ' today = datetime.now().strftime('%d/%m/%Y') endWeek = (datetime.now() + timedelta(days=7)).strftime('%d/%m/%Y') query = "SELECT * FROM {} WHERE happeningOn BETWEEN '{}' AND '{}'".format(self.table, today, endWeek) self.cur.execute(query) result = self.cur.fetchall() return result<|docstring|>Get all upcoming meetups in the next 1 week<|endoftext|>
6ee7040a5fff8f6a349f609307a1c91a6aa34f040cd0c4aa18a9e45199fc567f
def check_if_duplicate(self, data): ' Check for duplicate meetups.' query = "SELECT * FROM {} WHERE topic = '{}' AND location = '{}' ".format(self.table, data['topic'], data['location']) self.cur.execute(query) result = self.cur.fetchone() if result: return (True, 'Meetup with same topic at the same venue already exists') query = "SELECT * FROM {} WHERE happeningOn = '{}' AND location = '{}' ".format(self.table, data['happeningOn'], data['location']) self.cur.execute(query) result = self.cur.fetchone() if result: return (True, 'Meetup happening the same date at the same venue already exists') query = "SELECT * FROM {} WHERE topic = '{}' AND happeningOn = '{}' ".format(self.table, data['topic'], data['happeningOn']) self.cur.execute(query) result = self.cur.fetchone() if result: return (True, 'Meetup happening the same date with same topic already exists') return (False, None)
Check for duplicate meetups.
app/api/v2/models/meetup_models.py
check_if_duplicate
MRichardN/Qustioner-api-v2
0
python
def check_if_duplicate(self, data): ' ' query = "SELECT * FROM {} WHERE topic = '{}' AND location = '{}' ".format(self.table, data['topic'], data['location']) self.cur.execute(query) result = self.cur.fetchone() if result: return (True, 'Meetup with same topic at the same venue already exists') query = "SELECT * FROM {} WHERE happeningOn = '{}' AND location = '{}' ".format(self.table, data['happeningOn'], data['location']) self.cur.execute(query) result = self.cur.fetchone() if result: return (True, 'Meetup happening the same date at the same venue already exists') query = "SELECT * FROM {} WHERE topic = '{}' AND happeningOn = '{}' ".format(self.table, data['topic'], data['happeningOn']) self.cur.execute(query) result = self.cur.fetchone() if result: return (True, 'Meetup happening the same date with same topic already exists') return (False, None)
def check_if_duplicate(self, data): ' ' query = "SELECT * FROM {} WHERE topic = '{}' AND location = '{}' ".format(self.table, data['topic'], data['location']) self.cur.execute(query) result = self.cur.fetchone() if result: return (True, 'Meetup with same topic at the same venue already exists') query = "SELECT * FROM {} WHERE happeningOn = '{}' AND location = '{}' ".format(self.table, data['happeningOn'], data['location']) self.cur.execute(query) result = self.cur.fetchone() if result: return (True, 'Meetup happening the same date at the same venue already exists') query = "SELECT * FROM {} WHERE topic = '{}' AND happeningOn = '{}' ".format(self.table, data['topic'], data['happeningOn']) self.cur.execute(query) result = self.cur.fetchone() if result: return (True, 'Meetup happening the same date with same topic already exists') return (False, None)<|docstring|>Check for duplicate meetups.<|endoftext|>
76c0550c7705176d4b7f4c2b7ae578e2761765100215247e4aa067bb32ac0bc9
def __getattr__(self, name): '\n Allow for access of named data arrays as attributes\n if the attribute does not exist otherwise.\n ' try: return object.__getattribute__(self, name) except AttributeError: (E, V, T) = sys.exc_info() try: return self._data[name] except KeyError: pass raise E(V)
Allow for access of named data arrays as attributes if the attribute does not exist otherwise.
pictureframe/core.py
__getattr__
yotam/pictureframe
2
python
def __getattr__(self, name): '\n Allow for access of named data arrays as attributes\n if the attribute does not exist otherwise.\n ' try: return object.__getattribute__(self, name) except AttributeError: (E, V, T) = sys.exc_info() try: return self._data[name] except KeyError: pass raise E(V)
def __getattr__(self, name): '\n Allow for access of named data arrays as attributes\n if the attribute does not exist otherwise.\n ' try: return object.__getattribute__(self, name) except AttributeError: (E, V, T) = sys.exc_info() try: return self._data[name] except KeyError: pass raise E(V)<|docstring|>Allow for access of named data arrays as attributes if the attribute does not exist otherwise.<|endoftext|>
18dcfda59ebdeda2514c509c34e868ce25eb9b7aff09d009ddc9f300443be2ec
def __getitem__(self, args): '\n Either access named data array or return a new PictureFrame\n where each data array is indexed with the arguments.\n The new PictureFrame may have a different shape and number of\n fixed dimensions, depending on the indexing operation.\n ' if isinstance(args, str): return self._data[args] else: data = OrderedDict() fixed_dim = None for (k, v) in self._data.items(): subarray = v.__getitem__(args) data[k] = subarray if (fixed_dim is None): fixed_dim = ((self._fixed_dim + subarray.ndim) - v.ndim) return PictureFrame(data, fixed_dim=fixed_dim)
Either access named data array or return a new PictureFrame where each data array is indexed with the arguments. The new PictureFrame may have a different shape and number of fixed dimensions, depending on the indexing operation.
pictureframe/core.py
__getitem__
yotam/pictureframe
2
python
def __getitem__(self, args): '\n Either access named data array or return a new PictureFrame\n where each data array is indexed with the arguments.\n The new PictureFrame may have a different shape and number of\n fixed dimensions, depending on the indexing operation.\n ' if isinstance(args, str): return self._data[args] else: data = OrderedDict() fixed_dim = None for (k, v) in self._data.items(): subarray = v.__getitem__(args) data[k] = subarray if (fixed_dim is None): fixed_dim = ((self._fixed_dim + subarray.ndim) - v.ndim) return PictureFrame(data, fixed_dim=fixed_dim)
def __getitem__(self, args): '\n Either access named data array or return a new PictureFrame\n where each data array is indexed with the arguments.\n The new PictureFrame may have a different shape and number of\n fixed dimensions, depending on the indexing operation.\n ' if isinstance(args, str): return self._data[args] else: data = OrderedDict() fixed_dim = None for (k, v) in self._data.items(): subarray = v.__getitem__(args) data[k] = subarray if (fixed_dim is None): fixed_dim = ((self._fixed_dim + subarray.ndim) - v.ndim) return PictureFrame(data, fixed_dim=fixed_dim)<|docstring|>Either access named data array or return a new PictureFrame where each data array is indexed with the arguments. The new PictureFrame may have a different shape and number of fixed dimensions, depending on the indexing operation.<|endoftext|>
9728978c379e7e845bdb37f6c120400f221e7db378a6d7e8fbb6f208631ee37c
def __setitem__(self, args, values): '\n Either set named data array or set the values of each data array\n with the supplied PictureFrame\n ' if isinstance(args, str): return self._add_array(args, values) elif isinstance(values, PictureFrame): for (k, v) in self._data.items(): v[args] = values[k] else: raise ValueError('Value type not understood')
Either set named data array or set the values of each data array with the supplied PictureFrame
pictureframe/core.py
__setitem__
yotam/pictureframe
2
python
def __setitem__(self, args, values): '\n Either set named data array or set the values of each data array\n with the supplied PictureFrame\n ' if isinstance(args, str): return self._add_array(args, values) elif isinstance(values, PictureFrame): for (k, v) in self._data.items(): v[args] = values[k] else: raise ValueError('Value type not understood')
def __setitem__(self, args, values): '\n Either set named data array or set the values of each data array\n with the supplied PictureFrame\n ' if isinstance(args, str): return self._add_array(args, values) elif isinstance(values, PictureFrame): for (k, v) in self._data.items(): v[args] = values[k] else: raise ValueError('Value type not understood')<|docstring|>Either set named data array or set the values of each data array with the supplied PictureFrame<|endoftext|>
e834b54c5fc5e1c5ba11edaef3562dcbdf8ca5dc72587782a8059989137def40
def zoom(self, zoom, orders=None): "\n Rescale all data arrays along constrained axes\n Default to bilinear interpolation for float arrays\n and nearest neighbour for int arrays\n\n Parameters\n ----------\n zoom : float\n Scale factor.\n orders : dict, optional\n Override default order of interpolation for member arrays.\n Format is {'name': order}, where order is an integer.\n\n Returns\n -------\n scaled : PictureFrame\n " zoom_base = ((zoom,) * self._fixed_dim) if (orders is None): orders = {} data = OrderedDict() for (k, v) in self._data.items(): if (k in orders): order = orders[k] elif issubclass(v.dtype.type, np.integer): order = 0 elif (v.dtype == np.bool): order = 0 else: order = 2 zoom = (zoom_base + ((1,) * (v.ndim - self._fixed_dim))) data[k] = interpolation.zoom(v, order=order, zoom=zoom) return PictureFrame(data, fixed_dim=self._fixed_dim)
Rescale all data arrays along constrained axes Default to bilinear interpolation for float arrays and nearest neighbour for int arrays Parameters ---------- zoom : float Scale factor. orders : dict, optional Override default order of interpolation for member arrays. Format is {'name': order}, where order is an integer. Returns ------- scaled : PictureFrame
pictureframe/core.py
zoom
yotam/pictureframe
2
python
def zoom(self, zoom, orders=None): "\n Rescale all data arrays along constrained axes\n Default to bilinear interpolation for float arrays\n and nearest neighbour for int arrays\n\n Parameters\n ----------\n zoom : float\n Scale factor.\n orders : dict, optional\n Override default order of interpolation for member arrays.\n Format is {'name': order}, where order is an integer.\n\n Returns\n -------\n scaled : PictureFrame\n " zoom_base = ((zoom,) * self._fixed_dim) if (orders is None): orders = {} data = OrderedDict() for (k, v) in self._data.items(): if (k in orders): order = orders[k] elif issubclass(v.dtype.type, np.integer): order = 0 elif (v.dtype == np.bool): order = 0 else: order = 2 zoom = (zoom_base + ((1,) * (v.ndim - self._fixed_dim))) data[k] = interpolation.zoom(v, order=order, zoom=zoom) return PictureFrame(data, fixed_dim=self._fixed_dim)
def zoom(self, zoom, orders=None): "\n Rescale all data arrays along constrained axes\n Default to bilinear interpolation for float arrays\n and nearest neighbour for int arrays\n\n Parameters\n ----------\n zoom : float\n Scale factor.\n orders : dict, optional\n Override default order of interpolation for member arrays.\n Format is {'name': order}, where order is an integer.\n\n Returns\n -------\n scaled : PictureFrame\n " zoom_base = ((zoom,) * self._fixed_dim) if (orders is None): orders = {} data = OrderedDict() for (k, v) in self._data.items(): if (k in orders): order = orders[k] elif issubclass(v.dtype.type, np.integer): order = 0 elif (v.dtype == np.bool): order = 0 else: order = 2 zoom = (zoom_base + ((1,) * (v.ndim - self._fixed_dim))) data[k] = interpolation.zoom(v, order=order, zoom=zoom) return PictureFrame(data, fixed_dim=self._fixed_dim)<|docstring|>Rescale all data arrays along constrained axes Default to bilinear interpolation for float arrays and nearest neighbour for int arrays Parameters ---------- zoom : float Scale factor. orders : dict, optional Override default order of interpolation for member arrays. Format is {'name': order}, where order is an integer. Returns ------- scaled : PictureFrame<|endoftext|>
7a5cbd8dc6061e5bd91a417f9a769941b0c53ede6e12f12e8408dfd437a5cb97
def groupby(self, by, yield_label=True): '\n Group PictureFrame by data array name or by array of labels.\n\n Parameters\n ----------\n by : string or ndarray\n Name of data array or array of label values.\n yield_label : bool, optional\n Returns label value and PictureFrame tuple if True,\n otherwise just the indexed PictureFrame\n\n Returns\n -------\n it : generator of PictureFrame or (label, PictureFrame)\n ' if isinstance(by, str): labels = self.__getitem__(by) else: if (by.ndim != self._fixed_dim): raise ValueError('Invalid dimensions for label array') if (by.shape != self._data_shape): raise ValueError('Invalid shape for label array') labels = by for label in np.unique(labels): sub_pf = self.__getitem__((labels == label)) if yield_label: (yield (label, sub_pf)) else: (yield sub_pf)
Group PictureFrame by data array name or by array of labels. Parameters ---------- by : string or ndarray Name of data array or array of label values. yield_label : bool, optional Returns label value and PictureFrame tuple if True, otherwise just the indexed PictureFrame Returns ------- it : generator of PictureFrame or (label, PictureFrame)
pictureframe/core.py
groupby
yotam/pictureframe
2
python
def groupby(self, by, yield_label=True): '\n Group PictureFrame by data array name or by array of labels.\n\n Parameters\n ----------\n by : string or ndarray\n Name of data array or array of label values.\n yield_label : bool, optional\n Returns label value and PictureFrame tuple if True,\n otherwise just the indexed PictureFrame\n\n Returns\n -------\n it : generator of PictureFrame or (label, PictureFrame)\n ' if isinstance(by, str): labels = self.__getitem__(by) else: if (by.ndim != self._fixed_dim): raise ValueError('Invalid dimensions for label array') if (by.shape != self._data_shape): raise ValueError('Invalid shape for label array') labels = by for label in np.unique(labels): sub_pf = self.__getitem__((labels == label)) if yield_label: (yield (label, sub_pf)) else: (yield sub_pf)
def groupby(self, by, yield_label=True): '\n Group PictureFrame by data array name or by array of labels.\n\n Parameters\n ----------\n by : string or ndarray\n Name of data array or array of label values.\n yield_label : bool, optional\n Returns label value and PictureFrame tuple if True,\n otherwise just the indexed PictureFrame\n\n Returns\n -------\n it : generator of PictureFrame or (label, PictureFrame)\n ' if isinstance(by, str): labels = self.__getitem__(by) else: if (by.ndim != self._fixed_dim): raise ValueError('Invalid dimensions for label array') if (by.shape != self._data_shape): raise ValueError('Invalid shape for label array') labels = by for label in np.unique(labels): sub_pf = self.__getitem__((labels == label)) if yield_label: (yield (label, sub_pf)) else: (yield sub_pf)<|docstring|>Group PictureFrame by data array name or by array of labels. Parameters ---------- by : string or ndarray Name of data array or array of label values. yield_label : bool, optional Returns label value and PictureFrame tuple if True, otherwise just the indexed PictureFrame Returns ------- it : generator of PictureFrame or (label, PictureFrame)<|endoftext|>
52f8937c39f6fb2ed21090afe6a5dc7f0f008660fee2b4209b9c771ba46b6dc6
def browse(self): '\n Debug method for interactive viewing of PictureFrame data.\n ' if (not (self._fixed_dim == 2)): raise NotImplementedError('Browsing only supported for image data') plt.ion() for (k, v) in self._data.items(): plt.clf() if (v.ndim == 2): plt.imshow(v, cmap='Greys_r') else: try: plt.imshow(v) except TypeError: pass plt.title(k) plt.waitforbuttonpress() plt.close() plt.ioff()
Debug method for interactive viewing of PictureFrame data.
pictureframe/core.py
browse
yotam/pictureframe
2
python
def browse(self): '\n \n ' if (not (self._fixed_dim == 2)): raise NotImplementedError('Browsing only supported for image data') plt.ion() for (k, v) in self._data.items(): plt.clf() if (v.ndim == 2): plt.imshow(v, cmap='Greys_r') else: try: plt.imshow(v) except TypeError: pass plt.title(k) plt.waitforbuttonpress() plt.close() plt.ioff()
def browse(self): '\n \n ' if (not (self._fixed_dim == 2)): raise NotImplementedError('Browsing only supported for image data') plt.ion() for (k, v) in self._data.items(): plt.clf() if (v.ndim == 2): plt.imshow(v, cmap='Greys_r') else: try: plt.imshow(v) except TypeError: pass plt.title(k) plt.waitforbuttonpress() plt.close() plt.ioff()<|docstring|>Debug method for interactive viewing of PictureFrame data.<|endoftext|>
ede01f0cc37b506ff2579b18ab9bf8a6359604b46de8fc4836e5ac7c4fbbcdf8
async def handler(self, request): '\n Handle all incoming POST requests.\n ' try: content_type = request.headers.get('content-type', '') if (not content_type.lower().startswith('application/xml')): raise errors.HTTPError(response_code=HTTPStatus.BAD_REQUEST, response_description=f"The Content-Type header must be application/xml; you provided {request.headers.get('content-type', '')}") content = (await request.read()) hooks.call('before_parse', content) message_tree = validate_xml_schema(content) (message_type, message_payload) = parse_message(content) if (message_type == 'oadrResponse'): raise errors.SendEmptyHTTPResponse() if (('vtn_id' in message_payload) and (message_payload['vtn_id'] is not None) and (message_payload['vtn_id'] != self.vtn_id)): raise errors.InvalidIdError(f"The supplied vtnID is invalid. It should be '{self.vtn_id}', you supplied {message_payload['vtn_id']}.") if ((message_type not in ('oadrCreatePartyRegistration', 'oadrQueryRegistration')) and ('ven_id' in message_payload) and hasattr(self, 'ven_lookup')): result = (await utils.await_if_required(self.ven_lookup(ven_id=message_payload['ven_id']))) if ((result is None) or (result.get('registration_id', None) is None)): raise errors.RequestReregistration(message_payload['ven_id']) if (request.secure and ('ven_id' in message_payload)): if hasattr(self, 'fingerprint_lookup'): (await authenticate_message(request, message_tree, message_payload, fingerprint_lookup=self.fingerprint_lookup)) elif hasattr(self, 'ven_lookup'): (await authenticate_message(request, message_tree, message_payload, ven_lookup=self.ven_lookup)) else: logger.error("Could not authenticate this VEN because you did not provide a 'ven_lookup' function. Please see https://openleadr.org/docs/server.html#signing-messages for info.") try: if (request.secure and (message_type == 'oadrCreatePartyRegistration')): message_payload['fingerprint'] = utils.get_cert_fingerprint_from_request(request) (response_type, response_payload) = (await self.handle_message(message_type, message_payload)) except Exception as err: logger.error(f'An exception occurred during the execution of your {self.__class__.__name__} handler: {err.__class__.__name__}: {err}') raise err if ('response' not in response_payload): response_payload['response'] = {'response_code': 200, 'response_description': 'OK', 'request_id': message_payload.get('request_id')} response_payload['vtn_id'] = self.vtn_id if ('ven_id' not in response_payload): response_payload['ven_id'] = message_payload.get('ven_id') except errors.RequestReregistration as err: response_type = 'oadrRequestReregistration' response_payload = {'ven_id': err.ven_id} msg = self._create_message(response_type, **response_payload) response = web.Response(text=msg, status=HTTPStatus.OK, content_type='application/xml') except errors.SendEmptyHTTPResponse: response = web.Response(text='', status=HTTPStatus.OK, content_type='application/xml') except errors.ProtocolError as err: (response_type, response_payload) = self.error_response(message_type, err.response_code, err.response_description) msg = self._create_message(response_type, **response_payload) response = web.Response(text=msg, status=HTTPStatus.OK, content_type='application/xml') except errors.HTTPError as err: response = web.Response(text=err.response_description, status=err.response_code) except XMLSyntaxError as err: logger.warning(f'XML schema validation of incoming message failed: {err}.') response = web.Response(text=f'XML failed validation: {err}', status=HTTPStatus.BAD_REQUEST) except errors.FingerprintMismatch as err: logger.warning(err) response = web.Response(text=str(err), status=HTTPStatus.FORBIDDEN) except InvalidSignature: logger.warning('Incoming message had invalid signature, ignoring.') response = web.Response(text='Invalid Signature', status=HTTPStatus.FORBIDDEN) except Exception as err: logger.error(f'The VTN server encountered an error: {err.__class__.__name__}: {err}') response = web.Response(status=HTTPStatus.INTERNAL_SERVER_ERROR) else: msg = self._create_message(response_type, **response_payload) response = web.Response(text=msg, status=HTTPStatus.OK, content_type='application/xml') hooks.call('before_respond', response.text) return response
Handle all incoming POST requests.
openleadr/service/vtn_service.py
handler
hbscharp/openleadr-python
75
python
async def handler(self, request): '\n \n ' try: content_type = request.headers.get('content-type', ) if (not content_type.lower().startswith('application/xml')): raise errors.HTTPError(response_code=HTTPStatus.BAD_REQUEST, response_description=f"The Content-Type header must be application/xml; you provided {request.headers.get('content-type', )}") content = (await request.read()) hooks.call('before_parse', content) message_tree = validate_xml_schema(content) (message_type, message_payload) = parse_message(content) if (message_type == 'oadrResponse'): raise errors.SendEmptyHTTPResponse() if (('vtn_id' in message_payload) and (message_payload['vtn_id'] is not None) and (message_payload['vtn_id'] != self.vtn_id)): raise errors.InvalidIdError(f"The supplied vtnID is invalid. It should be '{self.vtn_id}', you supplied {message_payload['vtn_id']}.") if ((message_type not in ('oadrCreatePartyRegistration', 'oadrQueryRegistration')) and ('ven_id' in message_payload) and hasattr(self, 'ven_lookup')): result = (await utils.await_if_required(self.ven_lookup(ven_id=message_payload['ven_id']))) if ((result is None) or (result.get('registration_id', None) is None)): raise errors.RequestReregistration(message_payload['ven_id']) if (request.secure and ('ven_id' in message_payload)): if hasattr(self, 'fingerprint_lookup'): (await authenticate_message(request, message_tree, message_payload, fingerprint_lookup=self.fingerprint_lookup)) elif hasattr(self, 'ven_lookup'): (await authenticate_message(request, message_tree, message_payload, ven_lookup=self.ven_lookup)) else: logger.error("Could not authenticate this VEN because you did not provide a 'ven_lookup' function. Please see https://openleadr.org/docs/server.html#signing-messages for info.") try: if (request.secure and (message_type == 'oadrCreatePartyRegistration')): message_payload['fingerprint'] = utils.get_cert_fingerprint_from_request(request) (response_type, response_payload) = (await self.handle_message(message_type, message_payload)) except Exception as err: logger.error(f'An exception occurred during the execution of your {self.__class__.__name__} handler: {err.__class__.__name__}: {err}') raise err if ('response' not in response_payload): response_payload['response'] = {'response_code': 200, 'response_description': 'OK', 'request_id': message_payload.get('request_id')} response_payload['vtn_id'] = self.vtn_id if ('ven_id' not in response_payload): response_payload['ven_id'] = message_payload.get('ven_id') except errors.RequestReregistration as err: response_type = 'oadrRequestReregistration' response_payload = {'ven_id': err.ven_id} msg = self._create_message(response_type, **response_payload) response = web.Response(text=msg, status=HTTPStatus.OK, content_type='application/xml') except errors.SendEmptyHTTPResponse: response = web.Response(text=, status=HTTPStatus.OK, content_type='application/xml') except errors.ProtocolError as err: (response_type, response_payload) = self.error_response(message_type, err.response_code, err.response_description) msg = self._create_message(response_type, **response_payload) response = web.Response(text=msg, status=HTTPStatus.OK, content_type='application/xml') except errors.HTTPError as err: response = web.Response(text=err.response_description, status=err.response_code) except XMLSyntaxError as err: logger.warning(f'XML schema validation of incoming message failed: {err}.') response = web.Response(text=f'XML failed validation: {err}', status=HTTPStatus.BAD_REQUEST) except errors.FingerprintMismatch as err: logger.warning(err) response = web.Response(text=str(err), status=HTTPStatus.FORBIDDEN) except InvalidSignature: logger.warning('Incoming message had invalid signature, ignoring.') response = web.Response(text='Invalid Signature', status=HTTPStatus.FORBIDDEN) except Exception as err: logger.error(f'The VTN server encountered an error: {err.__class__.__name__}: {err}') response = web.Response(status=HTTPStatus.INTERNAL_SERVER_ERROR) else: msg = self._create_message(response_type, **response_payload) response = web.Response(text=msg, status=HTTPStatus.OK, content_type='application/xml') hooks.call('before_respond', response.text) return response
async def handler(self, request): '\n \n ' try: content_type = request.headers.get('content-type', ) if (not content_type.lower().startswith('application/xml')): raise errors.HTTPError(response_code=HTTPStatus.BAD_REQUEST, response_description=f"The Content-Type header must be application/xml; you provided {request.headers.get('content-type', )}") content = (await request.read()) hooks.call('before_parse', content) message_tree = validate_xml_schema(content) (message_type, message_payload) = parse_message(content) if (message_type == 'oadrResponse'): raise errors.SendEmptyHTTPResponse() if (('vtn_id' in message_payload) and (message_payload['vtn_id'] is not None) and (message_payload['vtn_id'] != self.vtn_id)): raise errors.InvalidIdError(f"The supplied vtnID is invalid. It should be '{self.vtn_id}', you supplied {message_payload['vtn_id']}.") if ((message_type not in ('oadrCreatePartyRegistration', 'oadrQueryRegistration')) and ('ven_id' in message_payload) and hasattr(self, 'ven_lookup')): result = (await utils.await_if_required(self.ven_lookup(ven_id=message_payload['ven_id']))) if ((result is None) or (result.get('registration_id', None) is None)): raise errors.RequestReregistration(message_payload['ven_id']) if (request.secure and ('ven_id' in message_payload)): if hasattr(self, 'fingerprint_lookup'): (await authenticate_message(request, message_tree, message_payload, fingerprint_lookup=self.fingerprint_lookup)) elif hasattr(self, 'ven_lookup'): (await authenticate_message(request, message_tree, message_payload, ven_lookup=self.ven_lookup)) else: logger.error("Could not authenticate this VEN because you did not provide a 'ven_lookup' function. Please see https://openleadr.org/docs/server.html#signing-messages for info.") try: if (request.secure and (message_type == 'oadrCreatePartyRegistration')): message_payload['fingerprint'] = utils.get_cert_fingerprint_from_request(request) (response_type, response_payload) = (await self.handle_message(message_type, message_payload)) except Exception as err: logger.error(f'An exception occurred during the execution of your {self.__class__.__name__} handler: {err.__class__.__name__}: {err}') raise err if ('response' not in response_payload): response_payload['response'] = {'response_code': 200, 'response_description': 'OK', 'request_id': message_payload.get('request_id')} response_payload['vtn_id'] = self.vtn_id if ('ven_id' not in response_payload): response_payload['ven_id'] = message_payload.get('ven_id') except errors.RequestReregistration as err: response_type = 'oadrRequestReregistration' response_payload = {'ven_id': err.ven_id} msg = self._create_message(response_type, **response_payload) response = web.Response(text=msg, status=HTTPStatus.OK, content_type='application/xml') except errors.SendEmptyHTTPResponse: response = web.Response(text=, status=HTTPStatus.OK, content_type='application/xml') except errors.ProtocolError as err: (response_type, response_payload) = self.error_response(message_type, err.response_code, err.response_description) msg = self._create_message(response_type, **response_payload) response = web.Response(text=msg, status=HTTPStatus.OK, content_type='application/xml') except errors.HTTPError as err: response = web.Response(text=err.response_description, status=err.response_code) except XMLSyntaxError as err: logger.warning(f'XML schema validation of incoming message failed: {err}.') response = web.Response(text=f'XML failed validation: {err}', status=HTTPStatus.BAD_REQUEST) except errors.FingerprintMismatch as err: logger.warning(err) response = web.Response(text=str(err), status=HTTPStatus.FORBIDDEN) except InvalidSignature: logger.warning('Incoming message had invalid signature, ignoring.') response = web.Response(text='Invalid Signature', status=HTTPStatus.FORBIDDEN) except Exception as err: logger.error(f'The VTN server encountered an error: {err.__class__.__name__}: {err}') response = web.Response(status=HTTPStatus.INTERNAL_SERVER_ERROR) else: msg = self._create_message(response_type, **response_payload) response = web.Response(text=msg, status=HTTPStatus.OK, content_type='application/xml') hooks.call('before_respond', response.text) return response<|docstring|>Handle all incoming POST requests.<|endoftext|>
f2caad5e61f3139f81656c20d6582e3864a5d1ff16bae5b6df02500b7691feee
def ip4_from_int(ip): 'Convert :py:class:`int` to IPv4 string\n\n :param ip: int representing an IPv4\n :type ip: int\n :return: IP in dot-decimal notation\n :rtype: str\n ' return socket.inet_ntoa(struct.pack('>L', ip))
Convert :py:class:`int` to IPv4 string :param ip: int representing an IPv4 :type ip: int :return: IP in dot-decimal notation :rtype: str
steam/utils/__init__.py
ip4_from_int
sammiee5311/steam
727
python
def ip4_from_int(ip): 'Convert :py:class:`int` to IPv4 string\n\n :param ip: int representing an IPv4\n :type ip: int\n :return: IP in dot-decimal notation\n :rtype: str\n ' return socket.inet_ntoa(struct.pack('>L', ip))
def ip4_from_int(ip): 'Convert :py:class:`int` to IPv4 string\n\n :param ip: int representing an IPv4\n :type ip: int\n :return: IP in dot-decimal notation\n :rtype: str\n ' return socket.inet_ntoa(struct.pack('>L', ip))<|docstring|>Convert :py:class:`int` to IPv4 string :param ip: int representing an IPv4 :type ip: int :return: IP in dot-decimal notation :rtype: str<|endoftext|>
e71de3e20fceef1ef728b9fe48b40b9a32cc8c9cef58d4d7644e0304721812f8
def ip4_to_int(ip): 'Convert IPv4 string to :py:class:`int`\n\n :param ip: IPv4 in dot-decimal notation\n :type ip: str\n :rtype: int\n ' return struct.unpack('>L', socket.inet_aton(ip))[0]
Convert IPv4 string to :py:class:`int` :param ip: IPv4 in dot-decimal notation :type ip: str :rtype: int
steam/utils/__init__.py
ip4_to_int
sammiee5311/steam
727
python
def ip4_to_int(ip): 'Convert IPv4 string to :py:class:`int`\n\n :param ip: IPv4 in dot-decimal notation\n :type ip: str\n :rtype: int\n ' return struct.unpack('>L', socket.inet_aton(ip))[0]
def ip4_to_int(ip): 'Convert IPv4 string to :py:class:`int`\n\n :param ip: IPv4 in dot-decimal notation\n :type ip: str\n :rtype: int\n ' return struct.unpack('>L', socket.inet_aton(ip))[0]<|docstring|>Convert IPv4 string to :py:class:`int` :param ip: IPv4 in dot-decimal notation :type ip: str :rtype: int<|endoftext|>
d280da47b09e553f70b399826d66c6296aab51a5d47c570c4b8667a0d107bc60
def ip6_from_bytes(ip): 'Convert :py:class:`bytes` to IPv6 string\n\n :param ip: IPv6 in dot-decimal notation\n :type ip: bytes\n :rtype: str\n ' return socket.inet_ntop(socket.AF_INET6, ip)
Convert :py:class:`bytes` to IPv6 string :param ip: IPv6 in dot-decimal notation :type ip: bytes :rtype: str
steam/utils/__init__.py
ip6_from_bytes
sammiee5311/steam
727
python
def ip6_from_bytes(ip): 'Convert :py:class:`bytes` to IPv6 string\n\n :param ip: IPv6 in dot-decimal notation\n :type ip: bytes\n :rtype: str\n ' return socket.inet_ntop(socket.AF_INET6, ip)
def ip6_from_bytes(ip): 'Convert :py:class:`bytes` to IPv6 string\n\n :param ip: IPv6 in dot-decimal notation\n :type ip: bytes\n :rtype: str\n ' return socket.inet_ntop(socket.AF_INET6, ip)<|docstring|>Convert :py:class:`bytes` to IPv6 string :param ip: IPv6 in dot-decimal notation :type ip: bytes :rtype: str<|endoftext|>
d353c50cbbb6ba03796c61073a40657a79e6c691f088b9adfbe48fe50235353c
def ip6_to_bytes(ip): 'Convert IPv6 string to :py:class:`bytes`\n\n :param ip: IPv6 in dot-decimal notation\n :type ip: str\n :rtype: bytes\n ' return socket.inet_pton(socket.AF_INET6, ip)
Convert IPv6 string to :py:class:`bytes` :param ip: IPv6 in dot-decimal notation :type ip: str :rtype: bytes
steam/utils/__init__.py
ip6_to_bytes
sammiee5311/steam
727
python
def ip6_to_bytes(ip): 'Convert IPv6 string to :py:class:`bytes`\n\n :param ip: IPv6 in dot-decimal notation\n :type ip: str\n :rtype: bytes\n ' return socket.inet_pton(socket.AF_INET6, ip)
def ip6_to_bytes(ip): 'Convert IPv6 string to :py:class:`bytes`\n\n :param ip: IPv6 in dot-decimal notation\n :type ip: str\n :rtype: bytes\n ' return socket.inet_pton(socket.AF_INET6, ip)<|docstring|>Convert IPv6 string to :py:class:`bytes` :param ip: IPv6 in dot-decimal notation :type ip: str :rtype: bytes<|endoftext|>
9c05ceaa34633a652db01a3b3e3e675399e2d472f8c4ebc05f6d833fb285e1a3
def chunks(arr, size): 'Splits a list into chunks\n\n :param arr: list to split\n :type arr: :class:`list`\n :param size: number of elements in each chunk\n :type size: :class:`int`\n :return: generator object\n :rtype: :class:`generator`\n ' for i in _range(0, len(arr), size): (yield arr[i:(i + size)])
Splits a list into chunks :param arr: list to split :type arr: :class:`list` :param size: number of elements in each chunk :type size: :class:`int` :return: generator object :rtype: :class:`generator`
steam/utils/__init__.py
chunks
sammiee5311/steam
727
python
def chunks(arr, size): 'Splits a list into chunks\n\n :param arr: list to split\n :type arr: :class:`list`\n :param size: number of elements in each chunk\n :type size: :class:`int`\n :return: generator object\n :rtype: :class:`generator`\n ' for i in _range(0, len(arr), size): (yield arr[i:(i + size)])
def chunks(arr, size): 'Splits a list into chunks\n\n :param arr: list to split\n :type arr: :class:`list`\n :param size: number of elements in each chunk\n :type size: :class:`int`\n :return: generator object\n :rtype: :class:`generator`\n ' for i in _range(0, len(arr), size): (yield arr[i:(i + size)])<|docstring|>Splits a list into chunks :param arr: list to split :type arr: :class:`list` :param size: number of elements in each chunk :type size: :class:`int` :return: generator object :rtype: :class:`generator`<|endoftext|>
c87c3056689e3f68ba8f84614f9e118ca3c37d2adadb59da06b5ca3fe262790b
def create_user(self, stu_id, nickname=None, password=None, **extra_fields): '\n Create and save a user with the given stu_id and password.\n ' if (not stu_id): raise ValueError('The given stu_id must be set') if (not nickname): nickname = ('默认昵称' + stu_id[(- 4):]) user = self.model(stu_id=stu_id, nickname=nickname, **extra_fields) user.set_password(password) user.save(using=self._db) return user
Create and save a user with the given stu_id and password.
person/models.py
create_user
PeiGiZhu/NWU_ACM_MIS
0
python
def create_user(self, stu_id, nickname=None, password=None, **extra_fields): '\n \n ' if (not stu_id): raise ValueError('The given stu_id must be set') if (not nickname): nickname = ('默认昵称' + stu_id[(- 4):]) user = self.model(stu_id=stu_id, nickname=nickname, **extra_fields) user.set_password(password) user.save(using=self._db) return user
def create_user(self, stu_id, nickname=None, password=None, **extra_fields): '\n \n ' if (not stu_id): raise ValueError('The given stu_id must be set') if (not nickname): nickname = ('默认昵称' + stu_id[(- 4):]) user = self.model(stu_id=stu_id, nickname=nickname, **extra_fields) user.set_password(password) user.save(using=self._db) return user<|docstring|>Create and save a user with the given stu_id and password.<|endoftext|>
8520b88aab5966cbb1f3645b0b17a9ed659557167e02249707e930e1c2211b05
def __init__(self, sim_env, ecu_id): ' Constructor\n \n Input: ecu_id string id of the corresponding AbstractECU\n sim_env simpy.Environment environment of this component\n Output: -\n ' AbstractCommModule.__init__(self, sim_env) self._ecu_id = ecu_id self._jitter_in = 1 self.monitor_list = RefList() self._init_layers(self.sim_env, self.MessageClass) self._tags = ['AUTH_SEND_TIME_BEFORE_ENCRYPTION', 'AUTH_SEND_TIME_AFTER_ENCRYPTION', 'AUTH_RECEIVE_TIME_BEFORE_DECRYPTION', 'AUTH_RECEIVE_TIME_AFTER_DECRYPTION']
Constructor Input: ecu_id string id of the corresponding AbstractECU sim_env simpy.Environment environment of this component Output: -
Tutorial4AddSimpleTiming/layers/impl_comm_module_my_protocol.py
__init__
arturmrowca/IVNS
0
python
def __init__(self, sim_env, ecu_id): ' Constructor\n \n Input: ecu_id string id of the corresponding AbstractECU\n sim_env simpy.Environment environment of this component\n Output: -\n ' AbstractCommModule.__init__(self, sim_env) self._ecu_id = ecu_id self._jitter_in = 1 self.monitor_list = RefList() self._init_layers(self.sim_env, self.MessageClass) self._tags = ['AUTH_SEND_TIME_BEFORE_ENCRYPTION', 'AUTH_SEND_TIME_AFTER_ENCRYPTION', 'AUTH_RECEIVE_TIME_BEFORE_DECRYPTION', 'AUTH_RECEIVE_TIME_AFTER_DECRYPTION']
def __init__(self, sim_env, ecu_id): ' Constructor\n \n Input: ecu_id string id of the corresponding AbstractECU\n sim_env simpy.Environment environment of this component\n Output: -\n ' AbstractCommModule.__init__(self, sim_env) self._ecu_id = ecu_id self._jitter_in = 1 self.monitor_list = RefList() self._init_layers(self.sim_env, self.MessageClass) self._tags = ['AUTH_SEND_TIME_BEFORE_ENCRYPTION', 'AUTH_SEND_TIME_AFTER_ENCRYPTION', 'AUTH_RECEIVE_TIME_BEFORE_DECRYPTION', 'AUTH_RECEIVE_TIME_AFTER_DECRYPTION']<|docstring|>Constructor Input: ecu_id string id of the corresponding AbstractECU sim_env simpy.Environment environment of this component Output: -<|endoftext|>
8106f244617a5733016831a3d52b8e64233887167f827374191788a77bf2f2dc
def _init_layers(self, sim_env, MessageClass): ' Initializes the software layers \n \n Input: sim_env simpy.Environment environment of this component \n MessageClass AbstractBusMessage class of the messages how they are sent on the CAN Bus\n Output: - \n ' self.physical_lay = StdPhysicalLayer(sim_env) self.datalink_lay = StdDatalinkLayer(sim_env) self.transp_lay = SegmentTransportLayer(sim_env, MessageClass) self.datalink_lay.physical_lay = self.physical_lay self.transp_lay.datalink_lay = self.datalink_lay
Initializes the software layers Input: sim_env simpy.Environment environment of this component MessageClass AbstractBusMessage class of the messages how they are sent on the CAN Bus Output: -
Tutorial4AddSimpleTiming/layers/impl_comm_module_my_protocol.py
_init_layers
arturmrowca/IVNS
0
python
def _init_layers(self, sim_env, MessageClass): ' Initializes the software layers \n \n Input: sim_env simpy.Environment environment of this component \n MessageClass AbstractBusMessage class of the messages how they are sent on the CAN Bus\n Output: - \n ' self.physical_lay = StdPhysicalLayer(sim_env) self.datalink_lay = StdDatalinkLayer(sim_env) self.transp_lay = SegmentTransportLayer(sim_env, MessageClass) self.datalink_lay.physical_lay = self.physical_lay self.transp_lay.datalink_lay = self.datalink_lay
def _init_layers(self, sim_env, MessageClass): ' Initializes the software layers \n \n Input: sim_env simpy.Environment environment of this component \n MessageClass AbstractBusMessage class of the messages how they are sent on the CAN Bus\n Output: - \n ' self.physical_lay = StdPhysicalLayer(sim_env) self.datalink_lay = StdDatalinkLayer(sim_env) self.transp_lay = SegmentTransportLayer(sim_env, MessageClass) self.datalink_lay.physical_lay = self.physical_lay self.transp_lay.datalink_lay = self.datalink_lay<|docstring|>Initializes the software layers Input: sim_env simpy.Environment environment of this component MessageClass AbstractBusMessage class of the messages how they are sent on the CAN Bus Output: -<|endoftext|>
ca0226fbe2a0b5383a4ab17736060885aaf2b627ac9dba4aaf9404a4976d863c
def monitor_update(self): ' updates the monitor connected to this ecu\n \n Input: -\n Output: monitor_list RefList list of MonitorInputs\n ' items_1 = len(self.transp_lay.datalink_lay.controller.receive_buffer.items) items_2 = self.transp_lay.datalink_lay.transmit_buffer_size G().mon(self.monitor_list, MonitorInput(items_1, MonitorTags.BT_ECU_RECEIVE_BUFFER, self._ecu_id, self.sim_env.now)) G().mon(self.monitor_list, MonitorInput(items_2, MonitorTags.BT_ECU_TRANSMIT_BUFFER, self._ecu_id, self.sim_env.now)) self.monitor_list.clear_on_access() return self.monitor_list.get()
updates the monitor connected to this ecu Input: - Output: monitor_list RefList list of MonitorInputs
Tutorial4AddSimpleTiming/layers/impl_comm_module_my_protocol.py
monitor_update
arturmrowca/IVNS
0
python
def monitor_update(self): ' updates the monitor connected to this ecu\n \n Input: -\n Output: monitor_list RefList list of MonitorInputs\n ' items_1 = len(self.transp_lay.datalink_lay.controller.receive_buffer.items) items_2 = self.transp_lay.datalink_lay.transmit_buffer_size G().mon(self.monitor_list, MonitorInput(items_1, MonitorTags.BT_ECU_RECEIVE_BUFFER, self._ecu_id, self.sim_env.now)) G().mon(self.monitor_list, MonitorInput(items_2, MonitorTags.BT_ECU_TRANSMIT_BUFFER, self._ecu_id, self.sim_env.now)) self.monitor_list.clear_on_access() return self.monitor_list.get()
def monitor_update(self): ' updates the monitor connected to this ecu\n \n Input: -\n Output: monitor_list RefList list of MonitorInputs\n ' items_1 = len(self.transp_lay.datalink_lay.controller.receive_buffer.items) items_2 = self.transp_lay.datalink_lay.transmit_buffer_size G().mon(self.monitor_list, MonitorInput(items_1, MonitorTags.BT_ECU_RECEIVE_BUFFER, self._ecu_id, self.sim_env.now)) G().mon(self.monitor_list, MonitorInput(items_2, MonitorTags.BT_ECU_TRANSMIT_BUFFER, self._ecu_id, self.sim_env.now)) self.monitor_list.clear_on_access() return self.monitor_list.get()<|docstring|>updates the monitor connected to this ecu Input: - Output: monitor_list RefList list of MonitorInputs<|endoftext|>
d9a65fc9ca0a19fa449dae0efcbc6c5808c1b2ed2a1f4869e943fd11c9421658
def modified_lines_from_udiff(udiff): 'Extract from a udiff an iterator of tuples of (start, end) line\n numbers.' chunks = re.split('\n@@ [^\n]+\n', udiff)[1:] line_numbers = re.findall('@@\\s[+-]\\d+,\\d+ \\+(\\d+)', udiff) line_numbers = list(map(int, line_numbers)) for (c, start) in zip(chunks, line_numbers): ilines = enumerate((line for line in c.splitlines() if (not line.startswith('-'))), start=start) added_lines = [i for (i, line) in ilines if line.startswith('+')] if added_lines: (yield (added_lines[0], added_lines[(- 1)]))
Extract from a udiff an iterator of tuples of (start, end) line numbers.
pep8radius/diff.py
modified_lines_from_udiff
GoodRx/pep8radius
1
python
def modified_lines_from_udiff(udiff): 'Extract from a udiff an iterator of tuples of (start, end) line\n numbers.' chunks = re.split('\n@@ [^\n]+\n', udiff)[1:] line_numbers = re.findall('@@\\s[+-]\\d+,\\d+ \\+(\\d+)', udiff) line_numbers = list(map(int, line_numbers)) for (c, start) in zip(chunks, line_numbers): ilines = enumerate((line for line in c.splitlines() if (not line.startswith('-'))), start=start) added_lines = [i for (i, line) in ilines if line.startswith('+')] if added_lines: (yield (added_lines[0], added_lines[(- 1)]))
def modified_lines_from_udiff(udiff): 'Extract from a udiff an iterator of tuples of (start, end) line\n numbers.' chunks = re.split('\n@@ [^\n]+\n', udiff)[1:] line_numbers = re.findall('@@\\s[+-]\\d+,\\d+ \\+(\\d+)', udiff) line_numbers = list(map(int, line_numbers)) for (c, start) in zip(chunks, line_numbers): ilines = enumerate((line for line in c.splitlines() if (not line.startswith('-'))), start=start) added_lines = [i for (i, line) in ilines if line.startswith('+')] if added_lines: (yield (added_lines[0], added_lines[(- 1)]))<|docstring|>Extract from a udiff an iterator of tuples of (start, end) line numbers.<|endoftext|>
11545c3379fd3bf69f89057ccbfe86b869e72a0c51cf320944662998b8af8160
def udiff_lines_fixed(u): 'Count lines fixed (removed) in udiff.' removed_changes = re.findall('\n\\-', u) return len(removed_changes)
Count lines fixed (removed) in udiff.
pep8radius/diff.py
udiff_lines_fixed
GoodRx/pep8radius
1
python
def udiff_lines_fixed(u): removed_changes = re.findall('\n\\-', u) return len(removed_changes)
def udiff_lines_fixed(u): removed_changes = re.findall('\n\\-', u) return len(removed_changes)<|docstring|>Count lines fixed (removed) in udiff.<|endoftext|>
c4801be0e5bec9af76eece7843dd02a0d903c5e9a597fc9606b51f9a49dfa42f
def get_diff(original, fixed, file_name, original_label='original', fixed_label='fixed'): 'Return text of unified diff between original and fixed.' (original, fixed) = (original.splitlines(True), fixed.splitlines(True)) newline = '\n' from difflib import unified_diff diff = unified_diff(original, fixed, os.path.join(original_label, file_name), os.path.join(fixed_label, file_name), lineterm=newline) text = '' for line in diff: text += line if (not line.endswith(newline)): text += ((newline + '\\ No newline at end of file') + newline) return text
Return text of unified diff between original and fixed.
pep8radius/diff.py
get_diff
GoodRx/pep8radius
1
python
def get_diff(original, fixed, file_name, original_label='original', fixed_label='fixed'): (original, fixed) = (original.splitlines(True), fixed.splitlines(True)) newline = '\n' from difflib import unified_diff diff = unified_diff(original, fixed, os.path.join(original_label, file_name), os.path.join(fixed_label, file_name), lineterm=newline) text = for line in diff: text += line if (not line.endswith(newline)): text += ((newline + '\\ No newline at end of file') + newline) return text
def get_diff(original, fixed, file_name, original_label='original', fixed_label='fixed'): (original, fixed) = (original.splitlines(True), fixed.splitlines(True)) newline = '\n' from difflib import unified_diff diff = unified_diff(original, fixed, os.path.join(original_label, file_name), os.path.join(fixed_label, file_name), lineterm=newline) text = for line in diff: text += line if (not line.endswith(newline)): text += ((newline + '\\ No newline at end of file') + newline) return text<|docstring|>Return text of unified diff between original and fixed.<|endoftext|>
1eddc3f5f22b04758fbca16609ecbd9dc3e913acb8dfbae0aec69239d4bdb1db
def print_diff(diff, color=True): 'Pretty printing for a diff, if color then we use a simple color scheme\n (red for removed lines, green for added lines).' import colorama if (not diff): return if (not color): colorama.init = (lambda autoreset: None) colorama.Fore.RED = '' colorama.Back.RED = '' colorama.Fore.GREEN = '' colorama.deinit = (lambda : None) colorama.init(autoreset=True) for line in diff.splitlines(): if (line.startswith('+') and (not line.startswith('+++ '))): print((colorama.Fore.GREEN + line)) elif (line.startswith('-') and (not line.startswith('--- '))): split_whitespace = re.split('(\\s+)$', line) if (len(split_whitespace) > 1): (line, trailing, _) = split_whitespace else: (line, trailing) = (split_whitespace[0], '') print((colorama.Fore.RED + line), end='') print((colorama.Back.RED + trailing)) elif (line == '\\ No newline at end of file'): print((colorama.Fore.RED + line)) else: print(line) colorama.deinit()
Pretty printing for a diff, if color then we use a simple color scheme (red for removed lines, green for added lines).
pep8radius/diff.py
print_diff
GoodRx/pep8radius
1
python
def print_diff(diff, color=True): 'Pretty printing for a diff, if color then we use a simple color scheme\n (red for removed lines, green for added lines).' import colorama if (not diff): return if (not color): colorama.init = (lambda autoreset: None) colorama.Fore.RED = colorama.Back.RED = colorama.Fore.GREEN = colorama.deinit = (lambda : None) colorama.init(autoreset=True) for line in diff.splitlines(): if (line.startswith('+') and (not line.startswith('+++ '))): print((colorama.Fore.GREEN + line)) elif (line.startswith('-') and (not line.startswith('--- '))): split_whitespace = re.split('(\\s+)$', line) if (len(split_whitespace) > 1): (line, trailing, _) = split_whitespace else: (line, trailing) = (split_whitespace[0], ) print((colorama.Fore.RED + line), end=) print((colorama.Back.RED + trailing)) elif (line == '\\ No newline at end of file'): print((colorama.Fore.RED + line)) else: print(line) colorama.deinit()
def print_diff(diff, color=True): 'Pretty printing for a diff, if color then we use a simple color scheme\n (red for removed lines, green for added lines).' import colorama if (not diff): return if (not color): colorama.init = (lambda autoreset: None) colorama.Fore.RED = colorama.Back.RED = colorama.Fore.GREEN = colorama.deinit = (lambda : None) colorama.init(autoreset=True) for line in diff.splitlines(): if (line.startswith('+') and (not line.startswith('+++ '))): print((colorama.Fore.GREEN + line)) elif (line.startswith('-') and (not line.startswith('--- '))): split_whitespace = re.split('(\\s+)$', line) if (len(split_whitespace) > 1): (line, trailing, _) = split_whitespace else: (line, trailing) = (split_whitespace[0], ) print((colorama.Fore.RED + line), end=) print((colorama.Back.RED + trailing)) elif (line == '\\ No newline at end of file'): print((colorama.Fore.RED + line)) else: print(line) colorama.deinit()<|docstring|>Pretty printing for a diff, if color then we use a simple color scheme (red for removed lines, green for added lines).<|endoftext|>
f384449a8f0bc263790fa3f0464f5dab64d3e4e17cf579d055af543d002e33dd
@given(seed) def test_1q_random(self, seed): 'Checks one qubit decompositions' unitary = random_unitary(2, seed=seed) self.check_one_qubit_euler_angles(unitary) self.check_one_qubit_euler_angles(unitary, 'U3') self.check_one_qubit_euler_angles(unitary, 'U1X') self.check_one_qubit_euler_angles(unitary, 'PSX') self.check_one_qubit_euler_angles(unitary, 'ZSX') self.check_one_qubit_euler_angles(unitary, 'ZYZ') self.check_one_qubit_euler_angles(unitary, 'ZXZ') self.check_one_qubit_euler_angles(unitary, 'XYX') self.check_one_qubit_euler_angles(unitary, 'RR')
Checks one qubit decompositions
test/randomized/test_synthesis.py
test_1q_random
biblio-techers/Qiskit-Fall-Fest-2021
1,599
python
@given(seed) def test_1q_random(self, seed): unitary = random_unitary(2, seed=seed) self.check_one_qubit_euler_angles(unitary) self.check_one_qubit_euler_angles(unitary, 'U3') self.check_one_qubit_euler_angles(unitary, 'U1X') self.check_one_qubit_euler_angles(unitary, 'PSX') self.check_one_qubit_euler_angles(unitary, 'ZSX') self.check_one_qubit_euler_angles(unitary, 'ZYZ') self.check_one_qubit_euler_angles(unitary, 'ZXZ') self.check_one_qubit_euler_angles(unitary, 'XYX') self.check_one_qubit_euler_angles(unitary, 'RR')
@given(seed) def test_1q_random(self, seed): unitary = random_unitary(2, seed=seed) self.check_one_qubit_euler_angles(unitary) self.check_one_qubit_euler_angles(unitary, 'U3') self.check_one_qubit_euler_angles(unitary, 'U1X') self.check_one_qubit_euler_angles(unitary, 'PSX') self.check_one_qubit_euler_angles(unitary, 'ZSX') self.check_one_qubit_euler_angles(unitary, 'ZYZ') self.check_one_qubit_euler_angles(unitary, 'ZXZ') self.check_one_qubit_euler_angles(unitary, 'XYX') self.check_one_qubit_euler_angles(unitary, 'RR')<|docstring|>Checks one qubit decompositions<|endoftext|>
88732f657ac7df0a9c28d6f1f014a2350b870541da776b38ef1790fc61e57dd3
@settings(deadline=None) @given(seed) def test_2q_random(self, seed): 'Checks two qubit decompositions' unitary = random_unitary(4, seed=seed) self.check_exact_decomposition(unitary.data, two_qubit_cnot_decompose)
Checks two qubit decompositions
test/randomized/test_synthesis.py
test_2q_random
biblio-techers/Qiskit-Fall-Fest-2021
1,599
python
@settings(deadline=None) @given(seed) def test_2q_random(self, seed): unitary = random_unitary(4, seed=seed) self.check_exact_decomposition(unitary.data, two_qubit_cnot_decompose)
@settings(deadline=None) @given(seed) def test_2q_random(self, seed): unitary = random_unitary(4, seed=seed) self.check_exact_decomposition(unitary.data, two_qubit_cnot_decompose)<|docstring|>Checks two qubit decompositions<|endoftext|>
9a6b277c348d669072683ae531262d4675947ee30fae9e208bd5e77da66b7772
@given(strategies.tuples(*([seed] * 5))) def test_exact_supercontrolled_decompose_random(self, seeds): 'Exact decomposition for random supercontrolled basis and random target' k1 = np.kron(random_unitary(2, seed=seeds[0]).data, random_unitary(2, seed=seeds[1]).data) k2 = np.kron(random_unitary(2, seed=seeds[2]).data, random_unitary(2, seed=seeds[3]).data) basis_unitary = ((k1 @ Ud((np.pi / 4), 0, 0)) @ k2) decomposer = TwoQubitBasisDecomposer(UnitaryGate(basis_unitary)) self.check_exact_decomposition(random_unitary(4, seed=seeds[4]).data, decomposer)
Exact decomposition for random supercontrolled basis and random target
test/randomized/test_synthesis.py
test_exact_supercontrolled_decompose_random
biblio-techers/Qiskit-Fall-Fest-2021
1,599
python
@given(strategies.tuples(*([seed] * 5))) def test_exact_supercontrolled_decompose_random(self, seeds): k1 = np.kron(random_unitary(2, seed=seeds[0]).data, random_unitary(2, seed=seeds[1]).data) k2 = np.kron(random_unitary(2, seed=seeds[2]).data, random_unitary(2, seed=seeds[3]).data) basis_unitary = ((k1 @ Ud((np.pi / 4), 0, 0)) @ k2) decomposer = TwoQubitBasisDecomposer(UnitaryGate(basis_unitary)) self.check_exact_decomposition(random_unitary(4, seed=seeds[4]).data, decomposer)
@given(strategies.tuples(*([seed] * 5))) def test_exact_supercontrolled_decompose_random(self, seeds): k1 = np.kron(random_unitary(2, seed=seeds[0]).data, random_unitary(2, seed=seeds[1]).data) k2 = np.kron(random_unitary(2, seed=seeds[2]).data, random_unitary(2, seed=seeds[3]).data) basis_unitary = ((k1 @ Ud((np.pi / 4), 0, 0)) @ k2) decomposer = TwoQubitBasisDecomposer(UnitaryGate(basis_unitary)) self.check_exact_decomposition(random_unitary(4, seed=seeds[4]).data, decomposer)<|docstring|>Exact decomposition for random supercontrolled basis and random target<|endoftext|>
19de9e1a8f9a780556797eeb35c61838d75382fc1f24a5b8a731a8390ec44409
@given(strategies.tuples(*([rotation] * 6)), seed) def test_cx_equivalence_0cx_random(self, rnd, seed): 'Check random circuits with 0 cx gates locally equivalent to identity.' qr = QuantumRegister(2, name='q') qc = QuantumCircuit(qr) qc.u(rnd[0], rnd[1], rnd[2], qr[0]) qc.u(rnd[3], rnd[4], rnd[5], qr[1]) sim = UnitarySimulatorPy() unitary = execute(qc, sim, seed_simulator=seed).result().get_unitary() self.assertEqual(two_qubit_cnot_decompose.num_basis_gates(unitary), 0)
Check random circuits with 0 cx gates locally equivalent to identity.
test/randomized/test_synthesis.py
test_cx_equivalence_0cx_random
biblio-techers/Qiskit-Fall-Fest-2021
1,599
python
@given(strategies.tuples(*([rotation] * 6)), seed) def test_cx_equivalence_0cx_random(self, rnd, seed): qr = QuantumRegister(2, name='q') qc = QuantumCircuit(qr) qc.u(rnd[0], rnd[1], rnd[2], qr[0]) qc.u(rnd[3], rnd[4], rnd[5], qr[1]) sim = UnitarySimulatorPy() unitary = execute(qc, sim, seed_simulator=seed).result().get_unitary() self.assertEqual(two_qubit_cnot_decompose.num_basis_gates(unitary), 0)
@given(strategies.tuples(*([rotation] * 6)), seed) def test_cx_equivalence_0cx_random(self, rnd, seed): qr = QuantumRegister(2, name='q') qc = QuantumCircuit(qr) qc.u(rnd[0], rnd[1], rnd[2], qr[0]) qc.u(rnd[3], rnd[4], rnd[5], qr[1]) sim = UnitarySimulatorPy() unitary = execute(qc, sim, seed_simulator=seed).result().get_unitary() self.assertEqual(two_qubit_cnot_decompose.num_basis_gates(unitary), 0)<|docstring|>Check random circuits with 0 cx gates locally equivalent to identity.<|endoftext|>
73d19686263f95c625e346f5f6bd519e893b8f30a067ea969fbe46ea3394e46e
@given(strategies.tuples(*([rotation] * 12)), seed) def test_cx_equivalence_1cx_random(self, rnd, seed): 'Check random circuits with 1 cx gates locally equivalent to a cx.' qr = QuantumRegister(2, name='q') qc = QuantumCircuit(qr) qc.u(rnd[0], rnd[1], rnd[2], qr[0]) qc.u(rnd[3], rnd[4], rnd[5], qr[1]) qc.cx(qr[1], qr[0]) qc.u(rnd[6], rnd[7], rnd[8], qr[0]) qc.u(rnd[9], rnd[10], rnd[11], qr[1]) sim = UnitarySimulatorPy() unitary = execute(qc, sim, seed_simulator=seed).result().get_unitary() self.assertEqual(two_qubit_cnot_decompose.num_basis_gates(unitary), 1)
Check random circuits with 1 cx gates locally equivalent to a cx.
test/randomized/test_synthesis.py
test_cx_equivalence_1cx_random
biblio-techers/Qiskit-Fall-Fest-2021
1,599
python
@given(strategies.tuples(*([rotation] * 12)), seed) def test_cx_equivalence_1cx_random(self, rnd, seed): qr = QuantumRegister(2, name='q') qc = QuantumCircuit(qr) qc.u(rnd[0], rnd[1], rnd[2], qr[0]) qc.u(rnd[3], rnd[4], rnd[5], qr[1]) qc.cx(qr[1], qr[0]) qc.u(rnd[6], rnd[7], rnd[8], qr[0]) qc.u(rnd[9], rnd[10], rnd[11], qr[1]) sim = UnitarySimulatorPy() unitary = execute(qc, sim, seed_simulator=seed).result().get_unitary() self.assertEqual(two_qubit_cnot_decompose.num_basis_gates(unitary), 1)
@given(strategies.tuples(*([rotation] * 12)), seed) def test_cx_equivalence_1cx_random(self, rnd, seed): qr = QuantumRegister(2, name='q') qc = QuantumCircuit(qr) qc.u(rnd[0], rnd[1], rnd[2], qr[0]) qc.u(rnd[3], rnd[4], rnd[5], qr[1]) qc.cx(qr[1], qr[0]) qc.u(rnd[6], rnd[7], rnd[8], qr[0]) qc.u(rnd[9], rnd[10], rnd[11], qr[1]) sim = UnitarySimulatorPy() unitary = execute(qc, sim, seed_simulator=seed).result().get_unitary() self.assertEqual(two_qubit_cnot_decompose.num_basis_gates(unitary), 1)<|docstring|>Check random circuits with 1 cx gates locally equivalent to a cx.<|endoftext|>
21e5a280ca5ce073f6d9d5559a52aba5da35e143e257ccb6c6ac76271bedb460
@given(strategies.tuples(*([rotation] * 18)), seed) def test_cx_equivalence_2cx_random(self, rnd, seed): 'Check random circuits with 2 cx gates locally equivalent to some circuit with 2 cx.' qr = QuantumRegister(2, name='q') qc = QuantumCircuit(qr) qc.u(rnd[0], rnd[1], rnd[2], qr[0]) qc.u(rnd[3], rnd[4], rnd[5], qr[1]) qc.cx(qr[1], qr[0]) qc.u(rnd[6], rnd[7], rnd[8], qr[0]) qc.u(rnd[9], rnd[10], rnd[11], qr[1]) qc.cx(qr[0], qr[1]) qc.u(rnd[12], rnd[13], rnd[14], qr[0]) qc.u(rnd[15], rnd[16], rnd[17], qr[1]) sim = UnitarySimulatorPy() unitary = execute(qc, sim, seed_simulator=seed).result().get_unitary() self.assertEqual(two_qubit_cnot_decompose.num_basis_gates(unitary), 2)
Check random circuits with 2 cx gates locally equivalent to some circuit with 2 cx.
test/randomized/test_synthesis.py
test_cx_equivalence_2cx_random
biblio-techers/Qiskit-Fall-Fest-2021
1,599
python
@given(strategies.tuples(*([rotation] * 18)), seed) def test_cx_equivalence_2cx_random(self, rnd, seed): qr = QuantumRegister(2, name='q') qc = QuantumCircuit(qr) qc.u(rnd[0], rnd[1], rnd[2], qr[0]) qc.u(rnd[3], rnd[4], rnd[5], qr[1]) qc.cx(qr[1], qr[0]) qc.u(rnd[6], rnd[7], rnd[8], qr[0]) qc.u(rnd[9], rnd[10], rnd[11], qr[1]) qc.cx(qr[0], qr[1]) qc.u(rnd[12], rnd[13], rnd[14], qr[0]) qc.u(rnd[15], rnd[16], rnd[17], qr[1]) sim = UnitarySimulatorPy() unitary = execute(qc, sim, seed_simulator=seed).result().get_unitary() self.assertEqual(two_qubit_cnot_decompose.num_basis_gates(unitary), 2)
@given(strategies.tuples(*([rotation] * 18)), seed) def test_cx_equivalence_2cx_random(self, rnd, seed): qr = QuantumRegister(2, name='q') qc = QuantumCircuit(qr) qc.u(rnd[0], rnd[1], rnd[2], qr[0]) qc.u(rnd[3], rnd[4], rnd[5], qr[1]) qc.cx(qr[1], qr[0]) qc.u(rnd[6], rnd[7], rnd[8], qr[0]) qc.u(rnd[9], rnd[10], rnd[11], qr[1]) qc.cx(qr[0], qr[1]) qc.u(rnd[12], rnd[13], rnd[14], qr[0]) qc.u(rnd[15], rnd[16], rnd[17], qr[1]) sim = UnitarySimulatorPy() unitary = execute(qc, sim, seed_simulator=seed).result().get_unitary() self.assertEqual(two_qubit_cnot_decompose.num_basis_gates(unitary), 2)<|docstring|>Check random circuits with 2 cx gates locally equivalent to some circuit with 2 cx.<|endoftext|>
ec58273c87218040ff167567f78317e07c86df3b94e212f8451e470be34197d9
@given(strategies.tuples(*([rotation] * 24)), seed) def test_cx_equivalence_3cx_random(self, rnd, seed): 'Check random circuits with 3 cx gates are outside the 0, 1, and 2 qubit regions.' qr = QuantumRegister(2, name='q') qc = QuantumCircuit(qr) qc.u(rnd[0], rnd[1], rnd[2], qr[0]) qc.u(rnd[3], rnd[4], rnd[5], qr[1]) qc.cx(qr[1], qr[0]) qc.u(rnd[6], rnd[7], rnd[8], qr[0]) qc.u(rnd[9], rnd[10], rnd[11], qr[1]) qc.cx(qr[0], qr[1]) qc.u(rnd[12], rnd[13], rnd[14], qr[0]) qc.u(rnd[15], rnd[16], rnd[17], qr[1]) qc.cx(qr[1], qr[0]) qc.u(rnd[18], rnd[19], rnd[20], qr[0]) qc.u(rnd[21], rnd[22], rnd[23], qr[1]) sim = UnitarySimulatorPy() unitary = execute(qc, sim, seed_simulator=seed).result().get_unitary() self.assertEqual(two_qubit_cnot_decompose.num_basis_gates(unitary), 3)
Check random circuits with 3 cx gates are outside the 0, 1, and 2 qubit regions.
test/randomized/test_synthesis.py
test_cx_equivalence_3cx_random
biblio-techers/Qiskit-Fall-Fest-2021
1,599
python
@given(strategies.tuples(*([rotation] * 24)), seed) def test_cx_equivalence_3cx_random(self, rnd, seed): qr = QuantumRegister(2, name='q') qc = QuantumCircuit(qr) qc.u(rnd[0], rnd[1], rnd[2], qr[0]) qc.u(rnd[3], rnd[4], rnd[5], qr[1]) qc.cx(qr[1], qr[0]) qc.u(rnd[6], rnd[7], rnd[8], qr[0]) qc.u(rnd[9], rnd[10], rnd[11], qr[1]) qc.cx(qr[0], qr[1]) qc.u(rnd[12], rnd[13], rnd[14], qr[0]) qc.u(rnd[15], rnd[16], rnd[17], qr[1]) qc.cx(qr[1], qr[0]) qc.u(rnd[18], rnd[19], rnd[20], qr[0]) qc.u(rnd[21], rnd[22], rnd[23], qr[1]) sim = UnitarySimulatorPy() unitary = execute(qc, sim, seed_simulator=seed).result().get_unitary() self.assertEqual(two_qubit_cnot_decompose.num_basis_gates(unitary), 3)
@given(strategies.tuples(*([rotation] * 24)), seed) def test_cx_equivalence_3cx_random(self, rnd, seed): qr = QuantumRegister(2, name='q') qc = QuantumCircuit(qr) qc.u(rnd[0], rnd[1], rnd[2], qr[0]) qc.u(rnd[3], rnd[4], rnd[5], qr[1]) qc.cx(qr[1], qr[0]) qc.u(rnd[6], rnd[7], rnd[8], qr[0]) qc.u(rnd[9], rnd[10], rnd[11], qr[1]) qc.cx(qr[0], qr[1]) qc.u(rnd[12], rnd[13], rnd[14], qr[0]) qc.u(rnd[15], rnd[16], rnd[17], qr[1]) qc.cx(qr[1], qr[0]) qc.u(rnd[18], rnd[19], rnd[20], qr[0]) qc.u(rnd[21], rnd[22], rnd[23], qr[1]) sim = UnitarySimulatorPy() unitary = execute(qc, sim, seed_simulator=seed).result().get_unitary() self.assertEqual(two_qubit_cnot_decompose.num_basis_gates(unitary), 3)<|docstring|>Check random circuits with 3 cx gates are outside the 0, 1, and 2 qubit regions.<|endoftext|>
e8b15b50023750edc29f76697234ee0c81341eca653d55f7db9f490b1ac14ed0
def fetch_wordrep(subsample=None, rng=None): '\n Fetch MSR WordRep dataset for testing both syntactic and semantic dataset\n\n Returns\n -------\n data : sklearn.datasets.base.Bunch\n dictionary-like object. Keys of interest:\n \'X\': matrix of word pairs\n \'y\': vector of answers\n \'category\': name of category\n \'category_high_level\': name of high level category (semantic/syntactic)\n\n References\n ----------\n Gao, Bin and Bian, Jiang and Liu, Tie-Yan,\n "Wordrep: A benchmark for research on learning word representations", 2014\n\n\n Notes\n -----\n This dataset is too big to calculate and store all word analogy quadruples, this is\n why it returns word paris\n\n ' path = _fetch_file(url='https://www.dropbox.com/sh/5k78h9gllvc44vt/AAALLQq-Bge605OIMlmGBbNJa?dl=1', data_dir='analogy', uncompress=True, move='EN-WORDREP/EN-WORDREP.zip', verbose=0) wikipedia_dict = glob.glob(os.path.join(path, 'Pairs_from_Wikipedia_and_Dictionary/*.txt')) wordnet = glob.glob(os.path.join(path, 'Pairs_from_WordNet/*.txt')) word_pairs = [] category = [] category_high_level = [] files = (wikipedia_dict + wordnet) for file_name in files: c = os.path.basename(file_name).split('.')[0] c = c[(c.index('-') + 1):] with open(file_name, 'r') as f: for l in f.read().splitlines(): word_pairs.append(standardize_string(l).split()) category.append(c) category_high_level.append(('wikipedia-dict' if (file_name in wikipedia_dict) else 'wordnet')) if subsample: assert (0 <= subsample <= 1.0) rng = check_random_state(rng) ids = rng.choice(range(len(word_pairs)), int((subsample * len(word_pairs))), replace=False) word_pairs = [word_pairs[i] for i in ids] category = [category[i] for i in ids] category_high_level = [category_high_level[i] for i in ids] wordnet_categories = {'Antonym', 'Attribute', 'Causes', 'DerivedFrom', 'Entails', 'HasContext', 'InstanceOf', 'IsA', 'MadeOf', 'MemberOf', 'PartOf', 'RelatedTo', 'SimilarTo'} wikipedia_categories = {'adjective-to-adverb', 'all-capital-cities', 'city-in-state', 'comparative', 'currency', 'man-woman', 'nationality-adjective', 'past-tense', 'plural-nouns', 'plural-verbs', 'present-participle', 'superlative'} return Bunch(category_high_level=np.array(category_high_level), X=np.array(word_pairs), category=np.array(category), wikipedia_categories=wordnet_categories, wordnet_categories=wikipedia_categories)
Fetch MSR WordRep dataset for testing both syntactic and semantic dataset Returns ------- data : sklearn.datasets.base.Bunch dictionary-like object. Keys of interest: 'X': matrix of word pairs 'y': vector of answers 'category': name of category 'category_high_level': name of high level category (semantic/syntactic) References ---------- Gao, Bin and Bian, Jiang and Liu, Tie-Yan, "Wordrep: A benchmark for research on learning word representations", 2014 Notes ----- This dataset is too big to calculate and store all word analogy quadruples, this is why it returns word paris
web/datasets/analogy.py
fetch_wordrep
jihaepat/word-embeddings-benchmarks
416
python
def fetch_wordrep(subsample=None, rng=None): '\n Fetch MSR WordRep dataset for testing both syntactic and semantic dataset\n\n Returns\n -------\n data : sklearn.datasets.base.Bunch\n dictionary-like object. Keys of interest:\n \'X\': matrix of word pairs\n \'y\': vector of answers\n \'category\': name of category\n \'category_high_level\': name of high level category (semantic/syntactic)\n\n References\n ----------\n Gao, Bin and Bian, Jiang and Liu, Tie-Yan,\n "Wordrep: A benchmark for research on learning word representations", 2014\n\n\n Notes\n -----\n This dataset is too big to calculate and store all word analogy quadruples, this is\n why it returns word paris\n\n ' path = _fetch_file(url='https://www.dropbox.com/sh/5k78h9gllvc44vt/AAALLQq-Bge605OIMlmGBbNJa?dl=1', data_dir='analogy', uncompress=True, move='EN-WORDREP/EN-WORDREP.zip', verbose=0) wikipedia_dict = glob.glob(os.path.join(path, 'Pairs_from_Wikipedia_and_Dictionary/*.txt')) wordnet = glob.glob(os.path.join(path, 'Pairs_from_WordNet/*.txt')) word_pairs = [] category = [] category_high_level = [] files = (wikipedia_dict + wordnet) for file_name in files: c = os.path.basename(file_name).split('.')[0] c = c[(c.index('-') + 1):] with open(file_name, 'r') as f: for l in f.read().splitlines(): word_pairs.append(standardize_string(l).split()) category.append(c) category_high_level.append(('wikipedia-dict' if (file_name in wikipedia_dict) else 'wordnet')) if subsample: assert (0 <= subsample <= 1.0) rng = check_random_state(rng) ids = rng.choice(range(len(word_pairs)), int((subsample * len(word_pairs))), replace=False) word_pairs = [word_pairs[i] for i in ids] category = [category[i] for i in ids] category_high_level = [category_high_level[i] for i in ids] wordnet_categories = {'Antonym', 'Attribute', 'Causes', 'DerivedFrom', 'Entails', 'HasContext', 'InstanceOf', 'IsA', 'MadeOf', 'MemberOf', 'PartOf', 'RelatedTo', 'SimilarTo'} wikipedia_categories = {'adjective-to-adverb', 'all-capital-cities', 'city-in-state', 'comparative', 'currency', 'man-woman', 'nationality-adjective', 'past-tense', 'plural-nouns', 'plural-verbs', 'present-participle', 'superlative'} return Bunch(category_high_level=np.array(category_high_level), X=np.array(word_pairs), category=np.array(category), wikipedia_categories=wordnet_categories, wordnet_categories=wikipedia_categories)
def fetch_wordrep(subsample=None, rng=None): '\n Fetch MSR WordRep dataset for testing both syntactic and semantic dataset\n\n Returns\n -------\n data : sklearn.datasets.base.Bunch\n dictionary-like object. Keys of interest:\n \'X\': matrix of word pairs\n \'y\': vector of answers\n \'category\': name of category\n \'category_high_level\': name of high level category (semantic/syntactic)\n\n References\n ----------\n Gao, Bin and Bian, Jiang and Liu, Tie-Yan,\n "Wordrep: A benchmark for research on learning word representations", 2014\n\n\n Notes\n -----\n This dataset is too big to calculate and store all word analogy quadruples, this is\n why it returns word paris\n\n ' path = _fetch_file(url='https://www.dropbox.com/sh/5k78h9gllvc44vt/AAALLQq-Bge605OIMlmGBbNJa?dl=1', data_dir='analogy', uncompress=True, move='EN-WORDREP/EN-WORDREP.zip', verbose=0) wikipedia_dict = glob.glob(os.path.join(path, 'Pairs_from_Wikipedia_and_Dictionary/*.txt')) wordnet = glob.glob(os.path.join(path, 'Pairs_from_WordNet/*.txt')) word_pairs = [] category = [] category_high_level = [] files = (wikipedia_dict + wordnet) for file_name in files: c = os.path.basename(file_name).split('.')[0] c = c[(c.index('-') + 1):] with open(file_name, 'r') as f: for l in f.read().splitlines(): word_pairs.append(standardize_string(l).split()) category.append(c) category_high_level.append(('wikipedia-dict' if (file_name in wikipedia_dict) else 'wordnet')) if subsample: assert (0 <= subsample <= 1.0) rng = check_random_state(rng) ids = rng.choice(range(len(word_pairs)), int((subsample * len(word_pairs))), replace=False) word_pairs = [word_pairs[i] for i in ids] category = [category[i] for i in ids] category_high_level = [category_high_level[i] for i in ids] wordnet_categories = {'Antonym', 'Attribute', 'Causes', 'DerivedFrom', 'Entails', 'HasContext', 'InstanceOf', 'IsA', 'MadeOf', 'MemberOf', 'PartOf', 'RelatedTo', 'SimilarTo'} wikipedia_categories = {'adjective-to-adverb', 'all-capital-cities', 'city-in-state', 'comparative', 'currency', 'man-woman', 'nationality-adjective', 'past-tense', 'plural-nouns', 'plural-verbs', 'present-participle', 'superlative'} return Bunch(category_high_level=np.array(category_high_level), X=np.array(word_pairs), category=np.array(category), wikipedia_categories=wordnet_categories, wordnet_categories=wikipedia_categories)<|docstring|>Fetch MSR WordRep dataset for testing both syntactic and semantic dataset Returns ------- data : sklearn.datasets.base.Bunch dictionary-like object. Keys of interest: 'X': matrix of word pairs 'y': vector of answers 'category': name of category 'category_high_level': name of high level category (semantic/syntactic) References ---------- Gao, Bin and Bian, Jiang and Liu, Tie-Yan, "Wordrep: A benchmark for research on learning word representations", 2014 Notes ----- This dataset is too big to calculate and store all word analogy quadruples, this is why it returns word paris<|endoftext|>
e6d9285a27037b07c60ba2bc887093af35b3563296e9530ec2d43176ed5590b5
def fetch_google_analogy(): '\n Fetch Google dataset for testing both semantic and syntactic analogies.\n\n Returns\n -------\n data : sklearn.datasets.base.Bunch\n dictionary-like object. Keys of interest:\n \'X\': matrix of word questions\n \'y\': vector of answers\n \'category\': name of category\n \'category_high_level\': name of high level category (semantic/syntactic)\n\n References\n ----------\n Mikolov, Tomas and Sutskever, Ilya and Chen, Kai and Corrado, Greg S and Dean, Jeff,\n "Distributed representations of words and phrases and their compositionality", 2013\n\n Notes\n -----\n This dataset is a subset of WordRep dataset.\n\n ' url = 'https://www.dropbox.com/s/eujtyfb5zem1mim/EN-GOOGLE.txt?dl=1' with open(_fetch_file(url, 'analogy/EN-GOOGLE', verbose=0), 'r') as f: L = f.read().splitlines() questions = [] answers = [] category = [] cat = None for l in L: if l.startswith(':'): cat = l.lower().split()[1] else: words = standardize_string(l).split() questions.append(words[0:3]) answers.append(words[3]) category.append(cat) assert (set(category) == set(['gram3-comparative', 'gram8-plural', 'capital-common-countries', 'city-in-state', 'family', 'gram9-plural-verbs', 'gram2-opposite', 'currency', 'gram4-superlative', 'gram6-nationality-adjective', 'gram7-past-tense', 'gram5-present-participle', 'capital-world', 'gram1-adjective-to-adverb'])) syntactic = set([c for c in set(category) if c.startswith('gram')]) category_high_level = [] for cat in category: category_high_level.append(('syntactic' if (cat in syntactic) else 'semantic')) return Bunch(X=np.vstack(questions).astype('object'), y=np.hstack(answers).astype('object'), category=np.hstack(category).astype('object'), category_high_level=np.hstack(category_high_level).astype('object'))
Fetch Google dataset for testing both semantic and syntactic analogies. Returns ------- data : sklearn.datasets.base.Bunch dictionary-like object. Keys of interest: 'X': matrix of word questions 'y': vector of answers 'category': name of category 'category_high_level': name of high level category (semantic/syntactic) References ---------- Mikolov, Tomas and Sutskever, Ilya and Chen, Kai and Corrado, Greg S and Dean, Jeff, "Distributed representations of words and phrases and their compositionality", 2013 Notes ----- This dataset is a subset of WordRep dataset.
web/datasets/analogy.py
fetch_google_analogy
jihaepat/word-embeddings-benchmarks
416
python
def fetch_google_analogy(): '\n Fetch Google dataset for testing both semantic and syntactic analogies.\n\n Returns\n -------\n data : sklearn.datasets.base.Bunch\n dictionary-like object. Keys of interest:\n \'X\': matrix of word questions\n \'y\': vector of answers\n \'category\': name of category\n \'category_high_level\': name of high level category (semantic/syntactic)\n\n References\n ----------\n Mikolov, Tomas and Sutskever, Ilya and Chen, Kai and Corrado, Greg S and Dean, Jeff,\n "Distributed representations of words and phrases and their compositionality", 2013\n\n Notes\n -----\n This dataset is a subset of WordRep dataset.\n\n ' url = 'https://www.dropbox.com/s/eujtyfb5zem1mim/EN-GOOGLE.txt?dl=1' with open(_fetch_file(url, 'analogy/EN-GOOGLE', verbose=0), 'r') as f: L = f.read().splitlines() questions = [] answers = [] category = [] cat = None for l in L: if l.startswith(':'): cat = l.lower().split()[1] else: words = standardize_string(l).split() questions.append(words[0:3]) answers.append(words[3]) category.append(cat) assert (set(category) == set(['gram3-comparative', 'gram8-plural', 'capital-common-countries', 'city-in-state', 'family', 'gram9-plural-verbs', 'gram2-opposite', 'currency', 'gram4-superlative', 'gram6-nationality-adjective', 'gram7-past-tense', 'gram5-present-participle', 'capital-world', 'gram1-adjective-to-adverb'])) syntactic = set([c for c in set(category) if c.startswith('gram')]) category_high_level = [] for cat in category: category_high_level.append(('syntactic' if (cat in syntactic) else 'semantic')) return Bunch(X=np.vstack(questions).astype('object'), y=np.hstack(answers).astype('object'), category=np.hstack(category).astype('object'), category_high_level=np.hstack(category_high_level).astype('object'))
def fetch_google_analogy(): '\n Fetch Google dataset for testing both semantic and syntactic analogies.\n\n Returns\n -------\n data : sklearn.datasets.base.Bunch\n dictionary-like object. Keys of interest:\n \'X\': matrix of word questions\n \'y\': vector of answers\n \'category\': name of category\n \'category_high_level\': name of high level category (semantic/syntactic)\n\n References\n ----------\n Mikolov, Tomas and Sutskever, Ilya and Chen, Kai and Corrado, Greg S and Dean, Jeff,\n "Distributed representations of words and phrases and their compositionality", 2013\n\n Notes\n -----\n This dataset is a subset of WordRep dataset.\n\n ' url = 'https://www.dropbox.com/s/eujtyfb5zem1mim/EN-GOOGLE.txt?dl=1' with open(_fetch_file(url, 'analogy/EN-GOOGLE', verbose=0), 'r') as f: L = f.read().splitlines() questions = [] answers = [] category = [] cat = None for l in L: if l.startswith(':'): cat = l.lower().split()[1] else: words = standardize_string(l).split() questions.append(words[0:3]) answers.append(words[3]) category.append(cat) assert (set(category) == set(['gram3-comparative', 'gram8-plural', 'capital-common-countries', 'city-in-state', 'family', 'gram9-plural-verbs', 'gram2-opposite', 'currency', 'gram4-superlative', 'gram6-nationality-adjective', 'gram7-past-tense', 'gram5-present-participle', 'capital-world', 'gram1-adjective-to-adverb'])) syntactic = set([c for c in set(category) if c.startswith('gram')]) category_high_level = [] for cat in category: category_high_level.append(('syntactic' if (cat in syntactic) else 'semantic')) return Bunch(X=np.vstack(questions).astype('object'), y=np.hstack(answers).astype('object'), category=np.hstack(category).astype('object'), category_high_level=np.hstack(category_high_level).astype('object'))<|docstring|>Fetch Google dataset for testing both semantic and syntactic analogies. Returns ------- data : sklearn.datasets.base.Bunch dictionary-like object. Keys of interest: 'X': matrix of word questions 'y': vector of answers 'category': name of category 'category_high_level': name of high level category (semantic/syntactic) References ---------- Mikolov, Tomas and Sutskever, Ilya and Chen, Kai and Corrado, Greg S and Dean, Jeff, "Distributed representations of words and phrases and their compositionality", 2013 Notes ----- This dataset is a subset of WordRep dataset.<|endoftext|>
bc2b2f7c0506dfb1de65a95f18d97c19c2dc10d96876d57b38ea8ed783420a32
def fetch_msr_analogy(): '\n Fetch MSR dataset for testing performance on syntactic analogies\n\n Returns\n -------\n data : sklearn.datasets.base.Bunch\n dictionary-like object. Keys of interest:\n \'X\': matrix of word questions\n \'y\': vector of answers\n \'category\': name of category\n \'category_high_level\': name of high level category (noun/adjective/verb)\n\n References\n ----------\n Originally published at http://research.microsoft.com/en-us/projects/rnn/.\n\n Notes\n -----\n Authors description: "more precisely, we tagged 267M words of newspaper text\n with Treebank POS tags (Marcus et al., 1993). We then selected 100 of the most frequent comparative adjectives\n (words labeled JJR); 100 of the most frequent plural nouns (NNS); 100 of the most frequent possessive nouns\n (NN POS); and 100 of the most frequent base form verbs (VB).\n We then systematically generated analogy questions by randomly matching each of the 100 words with 5 other words\n from the same category, and creating variants.\n ' url = 'https://www.dropbox.com/s/ne0fib302jqbatw/EN-MSR.txt?dl=1' with open(_fetch_file(url, 'analogy/EN-MSR', verbose=0), 'r') as f: L = f.read().splitlines() questions = [] answers = [] category = [] for l in L: words = standardize_string(l).split() questions.append(words[0:3]) answers.append(words[4]) category.append(words[3]) verb = set([c for c in set(category) if c.startswith('VB')]) noun = set([c for c in set(category) if c.startswith('NN')]) category_high_level = [] for cat in category: if (cat in verb): category_high_level.append('verb') elif (cat in noun): category_high_level.append('noun') else: category_high_level.append('adjective') assert (set([c.upper() for c in category]) == set(['VBD_VBZ', 'VB_VBD', 'VBZ_VBD', 'VBZ_VB', 'NNPOS_NN', 'JJR_JJS', 'JJS_JJR', 'NNS_NN', 'JJR_JJ', 'NN_NNS', 'VB_VBZ', 'VBD_VB', 'JJS_JJ', 'NN_NNPOS', 'JJ_JJS', 'JJ_JJR'])) return Bunch(X=np.vstack(questions).astype('object'), y=np.hstack(answers).astype('object'), category=np.hstack(category).astype('object'), category_high_level=np.hstack(category_high_level).astype('object'))
Fetch MSR dataset for testing performance on syntactic analogies Returns ------- data : sklearn.datasets.base.Bunch dictionary-like object. Keys of interest: 'X': matrix of word questions 'y': vector of answers 'category': name of category 'category_high_level': name of high level category (noun/adjective/verb) References ---------- Originally published at http://research.microsoft.com/en-us/projects/rnn/. Notes ----- Authors description: "more precisely, we tagged 267M words of newspaper text with Treebank POS tags (Marcus et al., 1993). We then selected 100 of the most frequent comparative adjectives (words labeled JJR); 100 of the most frequent plural nouns (NNS); 100 of the most frequent possessive nouns (NN POS); and 100 of the most frequent base form verbs (VB). We then systematically generated analogy questions by randomly matching each of the 100 words with 5 other words from the same category, and creating variants.
web/datasets/analogy.py
fetch_msr_analogy
jihaepat/word-embeddings-benchmarks
416
python
def fetch_msr_analogy(): '\n Fetch MSR dataset for testing performance on syntactic analogies\n\n Returns\n -------\n data : sklearn.datasets.base.Bunch\n dictionary-like object. Keys of interest:\n \'X\': matrix of word questions\n \'y\': vector of answers\n \'category\': name of category\n \'category_high_level\': name of high level category (noun/adjective/verb)\n\n References\n ----------\n Originally published at http://research.microsoft.com/en-us/projects/rnn/.\n\n Notes\n -----\n Authors description: "more precisely, we tagged 267M words of newspaper text\n with Treebank POS tags (Marcus et al., 1993). We then selected 100 of the most frequent comparative adjectives\n (words labeled JJR); 100 of the most frequent plural nouns (NNS); 100 of the most frequent possessive nouns\n (NN POS); and 100 of the most frequent base form verbs (VB).\n We then systematically generated analogy questions by randomly matching each of the 100 words with 5 other words\n from the same category, and creating variants.\n ' url = 'https://www.dropbox.com/s/ne0fib302jqbatw/EN-MSR.txt?dl=1' with open(_fetch_file(url, 'analogy/EN-MSR', verbose=0), 'r') as f: L = f.read().splitlines() questions = [] answers = [] category = [] for l in L: words = standardize_string(l).split() questions.append(words[0:3]) answers.append(words[4]) category.append(words[3]) verb = set([c for c in set(category) if c.startswith('VB')]) noun = set([c for c in set(category) if c.startswith('NN')]) category_high_level = [] for cat in category: if (cat in verb): category_high_level.append('verb') elif (cat in noun): category_high_level.append('noun') else: category_high_level.append('adjective') assert (set([c.upper() for c in category]) == set(['VBD_VBZ', 'VB_VBD', 'VBZ_VBD', 'VBZ_VB', 'NNPOS_NN', 'JJR_JJS', 'JJS_JJR', 'NNS_NN', 'JJR_JJ', 'NN_NNS', 'VB_VBZ', 'VBD_VB', 'JJS_JJ', 'NN_NNPOS', 'JJ_JJS', 'JJ_JJR'])) return Bunch(X=np.vstack(questions).astype('object'), y=np.hstack(answers).astype('object'), category=np.hstack(category).astype('object'), category_high_level=np.hstack(category_high_level).astype('object'))
def fetch_msr_analogy(): '\n Fetch MSR dataset for testing performance on syntactic analogies\n\n Returns\n -------\n data : sklearn.datasets.base.Bunch\n dictionary-like object. Keys of interest:\n \'X\': matrix of word questions\n \'y\': vector of answers\n \'category\': name of category\n \'category_high_level\': name of high level category (noun/adjective/verb)\n\n References\n ----------\n Originally published at http://research.microsoft.com/en-us/projects/rnn/.\n\n Notes\n -----\n Authors description: "more precisely, we tagged 267M words of newspaper text\n with Treebank POS tags (Marcus et al., 1993). We then selected 100 of the most frequent comparative adjectives\n (words labeled JJR); 100 of the most frequent plural nouns (NNS); 100 of the most frequent possessive nouns\n (NN POS); and 100 of the most frequent base form verbs (VB).\n We then systematically generated analogy questions by randomly matching each of the 100 words with 5 other words\n from the same category, and creating variants.\n ' url = 'https://www.dropbox.com/s/ne0fib302jqbatw/EN-MSR.txt?dl=1' with open(_fetch_file(url, 'analogy/EN-MSR', verbose=0), 'r') as f: L = f.read().splitlines() questions = [] answers = [] category = [] for l in L: words = standardize_string(l).split() questions.append(words[0:3]) answers.append(words[4]) category.append(words[3]) verb = set([c for c in set(category) if c.startswith('VB')]) noun = set([c for c in set(category) if c.startswith('NN')]) category_high_level = [] for cat in category: if (cat in verb): category_high_level.append('verb') elif (cat in noun): category_high_level.append('noun') else: category_high_level.append('adjective') assert (set([c.upper() for c in category]) == set(['VBD_VBZ', 'VB_VBD', 'VBZ_VBD', 'VBZ_VB', 'NNPOS_NN', 'JJR_JJS', 'JJS_JJR', 'NNS_NN', 'JJR_JJ', 'NN_NNS', 'VB_VBZ', 'VBD_VB', 'JJS_JJ', 'NN_NNPOS', 'JJ_JJS', 'JJ_JJR'])) return Bunch(X=np.vstack(questions).astype('object'), y=np.hstack(answers).astype('object'), category=np.hstack(category).astype('object'), category_high_level=np.hstack(category_high_level).astype('object'))<|docstring|>Fetch MSR dataset for testing performance on syntactic analogies Returns ------- data : sklearn.datasets.base.Bunch dictionary-like object. Keys of interest: 'X': matrix of word questions 'y': vector of answers 'category': name of category 'category_high_level': name of high level category (noun/adjective/verb) References ---------- Originally published at http://research.microsoft.com/en-us/projects/rnn/. Notes ----- Authors description: "more precisely, we tagged 267M words of newspaper text with Treebank POS tags (Marcus et al., 1993). We then selected 100 of the most frequent comparative adjectives (words labeled JJR); 100 of the most frequent plural nouns (NNS); 100 of the most frequent possessive nouns (NN POS); and 100 of the most frequent base form verbs (VB). We then systematically generated analogy questions by randomly matching each of the 100 words with 5 other words from the same category, and creating variants.<|endoftext|>
ba16900f84ffb06ad2536f4382734de2ebe82ce7a6949d1ad317053cb42e6c7f
def fetch_semeval_2012_2(which='all', which_scoring='golden'): '\n Fetch dataset used for SEMEVAL 2012 task 2 competition\n\n Parameters\n -------\n which : "all", "train" or "test"\n which_scoring: "golden" or "platinium" (see Notes)\n\n Returns\n -------\n data : sklearn.datasets.base.Bunch\n dictionary-like object. Keys of interest:\n \'X_prot\': dictionary keyed on category. Each entry is a matrix of prototype word pairs (see Notes)\n \'X\': dictionary keyed on category. Each entry is a matrix of question word pairs\n \'y\': dictionary keyed on category. Each entry is a dictionary word pair -> score\n\n \'categories_names\': dictionary keyed on category. Each entry is a human readable name of\n category.\n \'categories_descriptions\': dictionary keyed on category. Each entry is a human readable description of\n category.\n\n References\n ----------\n DA Jurgens et al.,\n "Measuring degrees of relational similarity. In *SEM 2012: The First Joint Conference on Lexical\n and Computational Semantics", 2012\n\n Notes\n -----\n Dataset used in competition was scored as in golden scoring (which_scoring) parameter, however\n organiser have release improved labels afterwards (platinium scoring)\n\n The task is, given two pairs of words, A:B and C:D, determine the degree to which the semantic relations between\n A and B are similar to those between C and D. Unlike the more familiar task of semantic relation identification,\n which assigns each word pair to a discrete semantic relation class, this task recognizes the continuous range of\n degrees of relational similarity. The challenge is to determine the degrees of relational similarity between a\n given reference word pair and a variety of other pairs, mostly in the same general semantic relation class as the\n reference pair.\n ' assert (which in ['all', 'train', 'test']) assert (which_scoring in ['golden', 'platinium']) path = _fetch_file(url='https://www.dropbox.com/sh/aarqsfnumx3d8ds/AAB05Mu2HdypP0pudGrNjooaa?dl=1', data_dir='analogy', uncompress=True, move='EN-SEMVAL-2012-2/EN-SEMVAL-2012-2.zip', verbose=0) train_files = (set(glob.glob(os.path.join(path, 'train*.txt'))) - set(glob.glob(os.path.join(path, 'train*_meta.txt')))) test_files = (set(glob.glob(os.path.join(path, 'test*.txt'))) - set(glob.glob(os.path.join(path, 'test*_meta.txt')))) if (which == 'train'): files = train_files elif (which == 'test'): files = test_files elif (which == 'all'): files = train_files.union(test_files) questions = defaultdict(list) prototypes = {} golden_scores = {} platinium_scores = {} scores = {'platinium': platinium_scores, 'golden': golden_scores} categories_names = {} categories_descriptions = {} for f in files: with open((f[0:(- 4)] + '_meta.txt')) as meta_f: meta = meta_f.read().splitlines()[1].split(',') with open(((os.path.dirname(f) + '/pl-') + os.path.basename(f))) as f_pl: platinium = f_pl.read().splitlines() with open(f) as f_gl: golden = f_gl.read().splitlines() assert (platinium[0] == golden[0]), ('Incorrect file for ', f) c = ((meta[0] + '_') + meta[1]) categories_names[c] = ((meta[2] + '_') + meta[3]) categories_descriptions[c] = meta[4] prototypes[c] = [l.split(':') for l in platinium[0].replace(': ', ':').replace(' ', ',').replace('.', '').split(',')] golden_scores[c] = {} platinium_scores[c] = {} questions_raw = [] for line_pl in platinium[1:]: (word_pair, score) = line_pl.split() questions_raw.append(word_pair) questions[c].append([standardize_string(w) for w in word_pair.split(':')]) platinium_scores[c][word_pair] = score for line_g in golden[1:]: (word_pair, score) = line_g.split() golden_scores[c][word_pair] = score platinium_scores[c] = [platinium_scores[c][w] for w in questions_raw] golden_scores[c] = [golden_scores[c][w] for w in questions_raw] return Bunch(X_prot=_change_list_to_np(prototypes), X=_change_list_to_np(questions), y=scores[which_scoring], categories_names=categories_names, categories_descriptions=categories_descriptions)
Fetch dataset used for SEMEVAL 2012 task 2 competition Parameters ------- which : "all", "train" or "test" which_scoring: "golden" or "platinium" (see Notes) Returns ------- data : sklearn.datasets.base.Bunch dictionary-like object. Keys of interest: 'X_prot': dictionary keyed on category. Each entry is a matrix of prototype word pairs (see Notes) 'X': dictionary keyed on category. Each entry is a matrix of question word pairs 'y': dictionary keyed on category. Each entry is a dictionary word pair -> score 'categories_names': dictionary keyed on category. Each entry is a human readable name of category. 'categories_descriptions': dictionary keyed on category. Each entry is a human readable description of category. References ---------- DA Jurgens et al., "Measuring degrees of relational similarity. In *SEM 2012: The First Joint Conference on Lexical and Computational Semantics", 2012 Notes ----- Dataset used in competition was scored as in golden scoring (which_scoring) parameter, however organiser have release improved labels afterwards (platinium scoring) The task is, given two pairs of words, A:B and C:D, determine the degree to which the semantic relations between A and B are similar to those between C and D. Unlike the more familiar task of semantic relation identification, which assigns each word pair to a discrete semantic relation class, this task recognizes the continuous range of degrees of relational similarity. The challenge is to determine the degrees of relational similarity between a given reference word pair and a variety of other pairs, mostly in the same general semantic relation class as the reference pair.
web/datasets/analogy.py
fetch_semeval_2012_2
jihaepat/word-embeddings-benchmarks
416
python
def fetch_semeval_2012_2(which='all', which_scoring='golden'): '\n Fetch dataset used for SEMEVAL 2012 task 2 competition\n\n Parameters\n -------\n which : "all", "train" or "test"\n which_scoring: "golden" or "platinium" (see Notes)\n\n Returns\n -------\n data : sklearn.datasets.base.Bunch\n dictionary-like object. Keys of interest:\n \'X_prot\': dictionary keyed on category. Each entry is a matrix of prototype word pairs (see Notes)\n \'X\': dictionary keyed on category. Each entry is a matrix of question word pairs\n \'y\': dictionary keyed on category. Each entry is a dictionary word pair -> score\n\n \'categories_names\': dictionary keyed on category. Each entry is a human readable name of\n category.\n \'categories_descriptions\': dictionary keyed on category. Each entry is a human readable description of\n category.\n\n References\n ----------\n DA Jurgens et al.,\n "Measuring degrees of relational similarity. In *SEM 2012: The First Joint Conference on Lexical\n and Computational Semantics", 2012\n\n Notes\n -----\n Dataset used in competition was scored as in golden scoring (which_scoring) parameter, however\n organiser have release improved labels afterwards (platinium scoring)\n\n The task is, given two pairs of words, A:B and C:D, determine the degree to which the semantic relations between\n A and B are similar to those between C and D. Unlike the more familiar task of semantic relation identification,\n which assigns each word pair to a discrete semantic relation class, this task recognizes the continuous range of\n degrees of relational similarity. The challenge is to determine the degrees of relational similarity between a\n given reference word pair and a variety of other pairs, mostly in the same general semantic relation class as the\n reference pair.\n ' assert (which in ['all', 'train', 'test']) assert (which_scoring in ['golden', 'platinium']) path = _fetch_file(url='https://www.dropbox.com/sh/aarqsfnumx3d8ds/AAB05Mu2HdypP0pudGrNjooaa?dl=1', data_dir='analogy', uncompress=True, move='EN-SEMVAL-2012-2/EN-SEMVAL-2012-2.zip', verbose=0) train_files = (set(glob.glob(os.path.join(path, 'train*.txt'))) - set(glob.glob(os.path.join(path, 'train*_meta.txt')))) test_files = (set(glob.glob(os.path.join(path, 'test*.txt'))) - set(glob.glob(os.path.join(path, 'test*_meta.txt')))) if (which == 'train'): files = train_files elif (which == 'test'): files = test_files elif (which == 'all'): files = train_files.union(test_files) questions = defaultdict(list) prototypes = {} golden_scores = {} platinium_scores = {} scores = {'platinium': platinium_scores, 'golden': golden_scores} categories_names = {} categories_descriptions = {} for f in files: with open((f[0:(- 4)] + '_meta.txt')) as meta_f: meta = meta_f.read().splitlines()[1].split(',') with open(((os.path.dirname(f) + '/pl-') + os.path.basename(f))) as f_pl: platinium = f_pl.read().splitlines() with open(f) as f_gl: golden = f_gl.read().splitlines() assert (platinium[0] == golden[0]), ('Incorrect file for ', f) c = ((meta[0] + '_') + meta[1]) categories_names[c] = ((meta[2] + '_') + meta[3]) categories_descriptions[c] = meta[4] prototypes[c] = [l.split(':') for l in platinium[0].replace(': ', ':').replace(' ', ',').replace('.', ).split(',')] golden_scores[c] = {} platinium_scores[c] = {} questions_raw = [] for line_pl in platinium[1:]: (word_pair, score) = line_pl.split() questions_raw.append(word_pair) questions[c].append([standardize_string(w) for w in word_pair.split(':')]) platinium_scores[c][word_pair] = score for line_g in golden[1:]: (word_pair, score) = line_g.split() golden_scores[c][word_pair] = score platinium_scores[c] = [platinium_scores[c][w] for w in questions_raw] golden_scores[c] = [golden_scores[c][w] for w in questions_raw] return Bunch(X_prot=_change_list_to_np(prototypes), X=_change_list_to_np(questions), y=scores[which_scoring], categories_names=categories_names, categories_descriptions=categories_descriptions)
def fetch_semeval_2012_2(which='all', which_scoring='golden'): '\n Fetch dataset used for SEMEVAL 2012 task 2 competition\n\n Parameters\n -------\n which : "all", "train" or "test"\n which_scoring: "golden" or "platinium" (see Notes)\n\n Returns\n -------\n data : sklearn.datasets.base.Bunch\n dictionary-like object. Keys of interest:\n \'X_prot\': dictionary keyed on category. Each entry is a matrix of prototype word pairs (see Notes)\n \'X\': dictionary keyed on category. Each entry is a matrix of question word pairs\n \'y\': dictionary keyed on category. Each entry is a dictionary word pair -> score\n\n \'categories_names\': dictionary keyed on category. Each entry is a human readable name of\n category.\n \'categories_descriptions\': dictionary keyed on category. Each entry is a human readable description of\n category.\n\n References\n ----------\n DA Jurgens et al.,\n "Measuring degrees of relational similarity. In *SEM 2012: The First Joint Conference on Lexical\n and Computational Semantics", 2012\n\n Notes\n -----\n Dataset used in competition was scored as in golden scoring (which_scoring) parameter, however\n organiser have release improved labels afterwards (platinium scoring)\n\n The task is, given two pairs of words, A:B and C:D, determine the degree to which the semantic relations between\n A and B are similar to those between C and D. Unlike the more familiar task of semantic relation identification,\n which assigns each word pair to a discrete semantic relation class, this task recognizes the continuous range of\n degrees of relational similarity. The challenge is to determine the degrees of relational similarity between a\n given reference word pair and a variety of other pairs, mostly in the same general semantic relation class as the\n reference pair.\n ' assert (which in ['all', 'train', 'test']) assert (which_scoring in ['golden', 'platinium']) path = _fetch_file(url='https://www.dropbox.com/sh/aarqsfnumx3d8ds/AAB05Mu2HdypP0pudGrNjooaa?dl=1', data_dir='analogy', uncompress=True, move='EN-SEMVAL-2012-2/EN-SEMVAL-2012-2.zip', verbose=0) train_files = (set(glob.glob(os.path.join(path, 'train*.txt'))) - set(glob.glob(os.path.join(path, 'train*_meta.txt')))) test_files = (set(glob.glob(os.path.join(path, 'test*.txt'))) - set(glob.glob(os.path.join(path, 'test*_meta.txt')))) if (which == 'train'): files = train_files elif (which == 'test'): files = test_files elif (which == 'all'): files = train_files.union(test_files) questions = defaultdict(list) prototypes = {} golden_scores = {} platinium_scores = {} scores = {'platinium': platinium_scores, 'golden': golden_scores} categories_names = {} categories_descriptions = {} for f in files: with open((f[0:(- 4)] + '_meta.txt')) as meta_f: meta = meta_f.read().splitlines()[1].split(',') with open(((os.path.dirname(f) + '/pl-') + os.path.basename(f))) as f_pl: platinium = f_pl.read().splitlines() with open(f) as f_gl: golden = f_gl.read().splitlines() assert (platinium[0] == golden[0]), ('Incorrect file for ', f) c = ((meta[0] + '_') + meta[1]) categories_names[c] = ((meta[2] + '_') + meta[3]) categories_descriptions[c] = meta[4] prototypes[c] = [l.split(':') for l in platinium[0].replace(': ', ':').replace(' ', ',').replace('.', ).split(',')] golden_scores[c] = {} platinium_scores[c] = {} questions_raw = [] for line_pl in platinium[1:]: (word_pair, score) = line_pl.split() questions_raw.append(word_pair) questions[c].append([standardize_string(w) for w in word_pair.split(':')]) platinium_scores[c][word_pair] = score for line_g in golden[1:]: (word_pair, score) = line_g.split() golden_scores[c][word_pair] = score platinium_scores[c] = [platinium_scores[c][w] for w in questions_raw] golden_scores[c] = [golden_scores[c][w] for w in questions_raw] return Bunch(X_prot=_change_list_to_np(prototypes), X=_change_list_to_np(questions), y=scores[which_scoring], categories_names=categories_names, categories_descriptions=categories_descriptions)<|docstring|>Fetch dataset used for SEMEVAL 2012 task 2 competition Parameters ------- which : "all", "train" or "test" which_scoring: "golden" or "platinium" (see Notes) Returns ------- data : sklearn.datasets.base.Bunch dictionary-like object. Keys of interest: 'X_prot': dictionary keyed on category. Each entry is a matrix of prototype word pairs (see Notes) 'X': dictionary keyed on category. Each entry is a matrix of question word pairs 'y': dictionary keyed on category. Each entry is a dictionary word pair -> score 'categories_names': dictionary keyed on category. Each entry is a human readable name of category. 'categories_descriptions': dictionary keyed on category. Each entry is a human readable description of category. References ---------- DA Jurgens et al., "Measuring degrees of relational similarity. In *SEM 2012: The First Joint Conference on Lexical and Computational Semantics", 2012 Notes ----- Dataset used in competition was scored as in golden scoring (which_scoring) parameter, however organiser have release improved labels afterwards (platinium scoring) The task is, given two pairs of words, A:B and C:D, determine the degree to which the semantic relations between A and B are similar to those between C and D. Unlike the more familiar task of semantic relation identification, which assigns each word pair to a discrete semantic relation class, this task recognizes the continuous range of degrees of relational similarity. The challenge is to determine the degrees of relational similarity between a given reference word pair and a variety of other pairs, mostly in the same general semantic relation class as the reference pair.<|endoftext|>
624a34c0fe587763d1adcef387356f6d3365181274494a9cb3ec8a96637e93c5
@registries.Registry.defaults('sample_mode') def _default_list_sample_mode(): ' By default we do not sample from lists backed types ' return False
By default we do not sample from lists backed types
datacraft/defaults.py
_default_list_sample_mode
bbux-dev/datagen
0
python
@registries.Registry.defaults('sample_mode') def _default_list_sample_mode(): ' ' return False
@registries.Registry.defaults('sample_mode') def _default_list_sample_mode(): ' ' return False<|docstring|>By default we do not sample from lists backed types<|endoftext|>
ec9ebc0e3325252f0847303e428f8d5564e17cd12e5ed8d42a79741e5c500161
@registries.Registry.defaults('unicode_join_with') @registries.Registry.defaults('char_class_join_with') @registries.Registry.defaults('combine_join_with') def _default_char_class_join_with(): ' default join for char_class and combine types ' return ''
default join for char_class and combine types
datacraft/defaults.py
_default_char_class_join_with
bbux-dev/datagen
0
python
@registries.Registry.defaults('unicode_join_with') @registries.Registry.defaults('char_class_join_with') @registries.Registry.defaults('combine_join_with') def _default_char_class_join_with(): ' ' return
@registries.Registry.defaults('unicode_join_with') @registries.Registry.defaults('char_class_join_with') @registries.Registry.defaults('combine_join_with') def _default_char_class_join_with(): ' ' return <|docstring|>default join for char_class and combine types<|endoftext|>
4d3274299f98abc1d5636f1093aa8fbc6ce0e61349169b8289ad351dc354707c
@registries.Registry.defaults('combine_as_list') @registries.Registry.defaults('geo_as_list') def _default_as_list_false(): ' default as list for combine and geo types ' return False
default as list for combine and geo types
datacraft/defaults.py
_default_as_list_false
bbux-dev/datagen
0
python
@registries.Registry.defaults('combine_as_list') @registries.Registry.defaults('geo_as_list') def _default_as_list_false(): ' ' return False
@registries.Registry.defaults('combine_as_list') @registries.Registry.defaults('geo_as_list') def _default_as_list_false(): ' ' return False<|docstring|>default as list for combine and geo types<|endoftext|>
d5c3cdd8caad4f6330186054ff91d8ed5fccfb06b3042aa62af5d3de29a7655b
@registries.Registry.defaults('geo_lat_first') def _default_geo_lat_first(): ' default lat first for geo types ' return False
default lat first for geo types
datacraft/defaults.py
_default_geo_lat_first
bbux-dev/datagen
0
python
@registries.Registry.defaults('geo_lat_first') def _default_geo_lat_first(): ' ' return False
@registries.Registry.defaults('geo_lat_first') def _default_geo_lat_first(): ' ' return False<|docstring|>default lat first for geo types<|endoftext|>
b9b385aa91f8b64e63c26554700f7dc5a28464dabb949631ba2d16655f5529a2
@registries.Registry.defaults('geo_join_with') def _default_geo_join_with(): ' default join for geo types ' return ','
default join for geo types
datacraft/defaults.py
_default_geo_join_with
bbux-dev/datagen
0
python
@registries.Registry.defaults('geo_join_with') def _default_geo_join_with(): ' ' return ','
@registries.Registry.defaults('geo_join_with') def _default_geo_join_with(): ' ' return ','<|docstring|>default join for geo types<|endoftext|>
f96fa90ccb9161c4bb5ce443045bb7544176a863c4b42b4e35a872e2285fe595
@registries.Registry.defaults('date_stddev_days') def _default_date_stddev_days(): ' default date stddev days ' return 15
default date stddev days
datacraft/defaults.py
_default_date_stddev_days
bbux-dev/datagen
0
python
@registries.Registry.defaults('date_stddev_days') def _default_date_stddev_days(): ' ' return 15
@registries.Registry.defaults('date_stddev_days') def _default_date_stddev_days(): ' ' return 15<|docstring|>default date stddev days<|endoftext|>
0fc7dc4624c4430e055b7f10aa4c1d568b338c6ea90f617de342bbd9daf63f93
@registries.Registry.defaults('date_format') def _default_date_format(): ' default date format ' return '%d-%m-%Y'
default date format
datacraft/defaults.py
_default_date_format
bbux-dev/datagen
0
python
@registries.Registry.defaults('date_format') def _default_date_format(): ' ' return '%d-%m-%Y'
@registries.Registry.defaults('date_format') def _default_date_format(): ' ' return '%d-%m-%Y'<|docstring|>default date format<|endoftext|>
3a25eed4fd4c22edf1276c48c498d8b2c592ce21b8e478e3aa63222229d8bdbd
@registries.Registry.defaults('date_duration_days') def _default_date_duration_days(): ' default date duration days ' return 30
default date duration days
datacraft/defaults.py
_default_date_duration_days
bbux-dev/datagen
0
python
@registries.Registry.defaults('date_duration_days') def _default_date_duration_days(): ' ' return 30
@registries.Registry.defaults('date_duration_days') def _default_date_duration_days(): ' ' return 30<|docstring|>default date duration days<|endoftext|>
431ef185045877b8c55bd85acf7064269f95c405a95594eba1996a8b5fe87cfc
@registries.Registry.defaults('geo_precision') def _default_geo_type_precision(): ' default digits after decimal for geo types ' return 4
default digits after decimal for geo types
datacraft/defaults.py
_default_geo_type_precision
bbux-dev/datagen
0
python
@registries.Registry.defaults('geo_precision') def _default_geo_type_precision(): ' ' return 4
@registries.Registry.defaults('geo_precision') def _default_geo_type_precision(): ' ' return 4<|docstring|>default digits after decimal for geo types<|endoftext|>
bfff461c950d36f9d01c6f48f7f562cd63644580993d27e6914d055cd9d80558
@registries.Registry.defaults('json_indent') def _default_json_indent(): ' default spaces to indent when using json-pretty formatter ' return 4
default spaces to indent when using json-pretty formatter
datacraft/defaults.py
_default_json_indent
bbux-dev/datagen
0
python
@registries.Registry.defaults('json_indent') def _default_json_indent(): ' ' return 4
@registries.Registry.defaults('json_indent') def _default_json_indent(): ' ' return 4<|docstring|>default spaces to indent when using json-pretty formatter<|endoftext|>
30d1b46609f0a6e616743e3a7ea14fa287f21aa7051a3addea3c37795bfb7bd9
@registries.Registry.defaults('large_csv_size_mb') def _default_large_csv_size(): ' default size for what constitutes a large csv file ' return _LARGE_CSV_SIZE_MB
default size for what constitutes a large csv file
datacraft/defaults.py
_default_large_csv_size
bbux-dev/datagen
0
python
@registries.Registry.defaults('large_csv_size_mb') def _default_large_csv_size(): ' ' return _LARGE_CSV_SIZE_MB
@registries.Registry.defaults('large_csv_size_mb') def _default_large_csv_size(): ' ' return _LARGE_CSV_SIZE_MB<|docstring|>default size for what constitutes a large csv file<|endoftext|>
a089c579df8e80fd16bb0571cf4265d1f5c3f37c3235cacd9cb766905df9e346
@registries.Registry.defaults('data_dir') def _default_data_dir(): ' default location for data directory ' return './data'
default location for data directory
datacraft/defaults.py
_default_data_dir
bbux-dev/datagen
0
python
@registries.Registry.defaults('data_dir') def _default_data_dir(): ' ' return './data'
@registries.Registry.defaults('data_dir') def _default_data_dir(): ' ' return './data'<|docstring|>default location for data directory<|endoftext|>
96b41f45ed18f626682911eba6090b39f95296604c9582f5f67d499300469ba4
@registries.Registry.defaults('csv_file') def _default_csv_file(): ' default name for csv files ' return 'data.csv'
default name for csv files
datacraft/defaults.py
_default_csv_file
bbux-dev/datagen
0
python
@registries.Registry.defaults('csv_file') def _default_csv_file(): ' ' return 'data.csv'
@registries.Registry.defaults('csv_file') def _default_csv_file(): ' ' return 'data.csv'<|docstring|>default name for csv files<|endoftext|>
1170b35931899163dd33c18a36e4c4b48d29d0c57d51e6f35794f4eb15bc3558
@registries.Registry.defaults('mac_addr_separator') def _default_mac_address_separator(): ' default mac address separator ' return ':'
default mac address separator
datacraft/defaults.py
_default_mac_address_separator
bbux-dev/datagen
0
python
@registries.Registry.defaults('mac_addr_separator') def _default_mac_address_separator(): ' ' return ':'
@registries.Registry.defaults('mac_addr_separator') def _default_mac_address_separator(): ' ' return ':'<|docstring|>default mac address separator<|endoftext|>
bce8c28ed2ea2b6dfa90acdf3209e7df19d1d948e6b7a079d1771f76a2e634eb
@registries.Registry.defaults('outfile_prefix') def _default_outfile_prefix(): ' default output file prefix ' return 'generated'
default output file prefix
datacraft/defaults.py
_default_outfile_prefix
bbux-dev/datagen
0
python
@registries.Registry.defaults('outfile_prefix') def _default_outfile_prefix(): ' ' return 'generated'
@registries.Registry.defaults('outfile_prefix') def _default_outfile_prefix(): ' ' return 'generated'<|docstring|>default output file prefix<|endoftext|>
656076ff17391875d963e185d1c0ef71284781a52a8ac1cff916edcac6cdf3ed
@registries.Registry.defaults('outfile_extension') def _default_outfile_extension(): ' default output file extension ' return ''
default output file extension
datacraft/defaults.py
_default_outfile_extension
bbux-dev/datagen
0
python
@registries.Registry.defaults('outfile_extension') def _default_outfile_extension(): ' ' return
@registries.Registry.defaults('outfile_extension') def _default_outfile_extension(): ' ' return <|docstring|>default output file extension<|endoftext|>
e25735c9db8a8de0db93166f6eaf3e29c8ab6cd74c3380bd0440a39d43a541e1
@registries.Registry.defaults('log_level') def _default_log_level(): ' default logging level ' return logging.INFO
default logging level
datacraft/defaults.py
_default_log_level
bbux-dev/datagen
0
python
@registries.Registry.defaults('log_level') def _default_log_level(): ' ' return logging.INFO
@registries.Registry.defaults('log_level') def _default_log_level(): ' ' return logging.INFO<|docstring|>default logging level<|endoftext|>
6acf2a011420d58184e8a3a906130f82ef51abac72964c1cc012c142852ede60
@registries.Registry.defaults('strict_mode') def _default_strict_mode(): ' default strict schema checking enabled ' return False
default strict schema checking enabled
datacraft/defaults.py
_default_strict_mode
bbux-dev/datagen
0
python
@registries.Registry.defaults('strict_mode') def _default_strict_mode(): ' ' return False
@registries.Registry.defaults('strict_mode') def _default_strict_mode(): ' ' return False<|docstring|>default strict schema checking enabled<|endoftext|>
262abb06f50cbead9ad390ee5026485b62556e4269c000737f6c702b21490b14
@registries.Registry.defaults('exclude_internal') def _default_exclude_internal(): ' default if internal fields should be excluded from output ' return False
default if internal fields should be excluded from output
datacraft/defaults.py
_default_exclude_internal
bbux-dev/datagen
0
python
@registries.Registry.defaults('exclude_internal') def _default_exclude_internal(): ' ' return False
@registries.Registry.defaults('exclude_internal') def _default_exclude_internal(): ' ' return False<|docstring|>default if internal fields should be excluded from output<|endoftext|>
007d06b9fe96d1065a20699531f752a67cc9c641420a75d98a454413917fc209
@registries.Registry.defaults('sample_lists') def _default_sample_lists(): ' default if lists should be sampled ' return False
default if lists should be sampled
datacraft/defaults.py
_default_sample_lists
bbux-dev/datagen
0
python
@registries.Registry.defaults('sample_lists') def _default_sample_lists(): ' ' return False
@registries.Registry.defaults('sample_lists') def _default_sample_lists(): ' ' return False<|docstring|>default if lists should be sampled<|endoftext|>
ea8b4c5d093fc7fda69c6d8ad5cc91bea6d8847748d5546dc7e861053ed42738
@registries.Registry.defaults('uuid_variant') def _default_uuid_variant(): ' default uuid variant ' return 4
default uuid variant
datacraft/defaults.py
_default_uuid_variant
bbux-dev/datagen
0
python
@registries.Registry.defaults('uuid_variant') def _default_uuid_variant(): ' ' return 4
@registries.Registry.defaults('uuid_variant') def _default_uuid_variant(): ' ' return 4<|docstring|>default uuid variant<|endoftext|>
2d0c6c653193a2919c10bbc2c947d03076ed81818090036380d1e0b7aec98581
@registries.Registry.defaults('format_json_ascii') def _default_format_json_ascii(): ' if the JSON formatted data should be ascii ' return False
if the JSON formatted data should be ascii
datacraft/defaults.py
_default_format_json_ascii
bbux-dev/datagen
0
python
@registries.Registry.defaults('format_json_ascii') def _default_format_json_ascii(): ' ' return False
@registries.Registry.defaults('format_json_ascii') def _default_format_json_ascii(): ' ' return False<|docstring|>if the JSON formatted data should be ascii<|endoftext|>
e248abd778e49cd3c37ee6ca8538f6bb3dd6addac32f8333508cf743f92b293b
def __init__(self, configuration, workflow): '\n Parses the configuration file and loads CONFIG_ADDRESS and CONFIG_PORT\n ' super(NetworkAgent, self).__init__() self._mid = None self._pid = None idx_semicolon = configuration.find(':') assert (idx_semicolon >= 0) self.address = configuration[:idx_semicolon] if (not self.address): self.address = '0.0.0.0' self.port = int(configuration[(idx_semicolon + 1):]) self.debug('Network configuration: %s:%d', self.address, self.port) self._workflow = workflow self._launcher = workflow.workflow
Parses the configuration file and loads CONFIG_ADDRESS and CONFIG_PORT
veles/network_common.py
__init__
gkuznetsov/veles
1,007
python
def __init__(self, configuration, workflow): '\n \n ' super(NetworkAgent, self).__init__() self._mid = None self._pid = None idx_semicolon = configuration.find(':') assert (idx_semicolon >= 0) self.address = configuration[:idx_semicolon] if (not self.address): self.address = '0.0.0.0' self.port = int(configuration[(idx_semicolon + 1):]) self.debug('Network configuration: %s:%d', self.address, self.port) self._workflow = workflow self._launcher = workflow.workflow
def __init__(self, configuration, workflow): '\n \n ' super(NetworkAgent, self).__init__() self._mid = None self._pid = None idx_semicolon = configuration.find(':') assert (idx_semicolon >= 0) self.address = configuration[:idx_semicolon] if (not self.address): self.address = '0.0.0.0' self.port = int(configuration[(idx_semicolon + 1):]) self.debug('Network configuration: %s:%d', self.address, self.port) self._workflow = workflow self._launcher = workflow.workflow<|docstring|>Parses the configuration file and loads CONFIG_ADDRESS and CONFIG_PORT<|endoftext|>
595d0d72b93100ff9c959bb79c26ef7366112ec90de949b4a9c4061c2ae0919c
def __init__(self, UUID, cipherAlgorithm, macAlgorithm): ' A stub constructor for the xbmcdrm CryptoSession class '
A stub constructor for the xbmcdrm CryptoSession class
test/xbmcdrm.py
__init__
Spider-24/plugin.video.netflix
5
python
def __init__(self, UUID, cipherAlgorithm, macAlgorithm): ' '
def __init__(self, UUID, cipherAlgorithm, macAlgorithm): ' '<|docstring|>A stub constructor for the xbmcdrm CryptoSession class<|endoftext|>
a9c8042dd361af5e14639ea71e735cec8098948b502e837d3a6ad2958aacf787
def Decrypt(self, cipherKeyId, input, iv): ' A stub implementation for the xbmcdrm CryptoSession class Decrypt() method ' return ''
A stub implementation for the xbmcdrm CryptoSession class Decrypt() method
test/xbmcdrm.py
Decrypt
Spider-24/plugin.video.netflix
5
python
def Decrypt(self, cipherKeyId, input, iv): ' ' return
def Decrypt(self, cipherKeyId, input, iv): ' ' return <|docstring|>A stub implementation for the xbmcdrm CryptoSession class Decrypt() method<|endoftext|>
76390ad471b2a5c653d9102d23dbeb58e01e9dd382ca0725dacb1214223321de
def Encrypt(self, cipherKeyId, input, iv): ' A stub implementation for the xbmcdrm CryptoSession class Encrypt() method ' return ''
A stub implementation for the xbmcdrm CryptoSession class Encrypt() method
test/xbmcdrm.py
Encrypt
Spider-24/plugin.video.netflix
5
python
def Encrypt(self, cipherKeyId, input, iv): ' ' return
def Encrypt(self, cipherKeyId, input, iv): ' ' return <|docstring|>A stub implementation for the xbmcdrm CryptoSession class Encrypt() method<|endoftext|>
94f13ba4e6524b86f3f58ebc62c27755bb76136eab93e40f0313767f0697238c
def GetKeyRequest(self, init, mimeType, offlineKey, optionalParameters): ' A stub implementation for the xbmcdrm CryptoSession class GetKeyRequest() method ' return
A stub implementation for the xbmcdrm CryptoSession class GetKeyRequest() method
test/xbmcdrm.py
GetKeyRequest
Spider-24/plugin.video.netflix
5
python
def GetKeyRequest(self, init, mimeType, offlineKey, optionalParameters): ' ' return
def GetKeyRequest(self, init, mimeType, offlineKey, optionalParameters): ' ' return<|docstring|>A stub implementation for the xbmcdrm CryptoSession class GetKeyRequest() method<|endoftext|>
fe2f9e25f407d9dea84a0f25684747d96542531d6cc5391cfaca8921eb0d8a70
def GetPropertyString(self, name): ' A stub implementation for the xbmcdrm CryptoSession class GetPropertyString() method ' return
A stub implementation for the xbmcdrm CryptoSession class GetPropertyString() method
test/xbmcdrm.py
GetPropertyString
Spider-24/plugin.video.netflix
5
python
def GetPropertyString(self, name): ' ' return
def GetPropertyString(self, name): ' ' return<|docstring|>A stub implementation for the xbmcdrm CryptoSession class GetPropertyString() method<|endoftext|>
b9ab99ea057603a2dfd4b490ad560f01b18624afd789e420277e40d854c0e5ac
def ProvideKeyResponse(self, response): ' A stub implementation for the xbmcdrm CryptoSession class ProvideKeyResponse() method ' return
A stub implementation for the xbmcdrm CryptoSession class ProvideKeyResponse() method
test/xbmcdrm.py
ProvideKeyResponse
Spider-24/plugin.video.netflix
5
python
def ProvideKeyResponse(self, response): ' ' return
def ProvideKeyResponse(self, response): ' ' return<|docstring|>A stub implementation for the xbmcdrm CryptoSession class ProvideKeyResponse() method<|endoftext|>
cc9d084c0d860b4e07296c47b6a714472f3df95c01361cb51b70843cda33e4e5
def RemoveKeys(self): ' A stub implementation for the xbmcdrm CryptoSession class RemoveKeys() method '
A stub implementation for the xbmcdrm CryptoSession class RemoveKeys() method
test/xbmcdrm.py
RemoveKeys
Spider-24/plugin.video.netflix
5
python
def RemoveKeys(self): ' '
def RemoveKeys(self): ' '<|docstring|>A stub implementation for the xbmcdrm CryptoSession class RemoveKeys() method<|endoftext|>
7c73ec16a4c014137bde715e801babda6df8e664acee7a261a335aed4ea24387
def RestoreKeys(self, keySetId): ' A stub implementation for the xbmcdrm CryptoSession class RestoreKeys() method '
A stub implementation for the xbmcdrm CryptoSession class RestoreKeys() method
test/xbmcdrm.py
RestoreKeys
Spider-24/plugin.video.netflix
5
python
def RestoreKeys(self, keySetId): ' '
def RestoreKeys(self, keySetId): ' '<|docstring|>A stub implementation for the xbmcdrm CryptoSession class RestoreKeys() method<|endoftext|>
a02b07318c24050bd469a7c09923d4e3378dea2009d290c68a2aae966587dc6c
def SetPropertyString(self, name, value): ' A stub implementation for the xbmcdrm CryptoSession class SetPropertyString() method ' return value
A stub implementation for the xbmcdrm CryptoSession class SetPropertyString() method
test/xbmcdrm.py
SetPropertyString
Spider-24/plugin.video.netflix
5
python
def SetPropertyString(self, name, value): ' ' return value
def SetPropertyString(self, name, value): ' ' return value<|docstring|>A stub implementation for the xbmcdrm CryptoSession class SetPropertyString() method<|endoftext|>
8e751ffcefef30617c83779586066ffca6b5d8b89d178f49bb76f2a034650021
def Sign(self, macKeyId, message): ' A stub implementation for the xbmcdrm CryptoSession class Sign() method ' return b''
A stub implementation for the xbmcdrm CryptoSession class Sign() method
test/xbmcdrm.py
Sign
Spider-24/plugin.video.netflix
5
python
def Sign(self, macKeyId, message): ' ' return b
def Sign(self, macKeyId, message): ' ' return b<|docstring|>A stub implementation for the xbmcdrm CryptoSession class Sign() method<|endoftext|>
8fd07b82af60e0191074b9eb6e71a22caf46dba9516aef4870ab461a825a33bb
def Verify(self, macKeyId, message, signature): ' A stub implementation for the xbmcdrm CryptoSession class Verify() method ' return True
A stub implementation for the xbmcdrm CryptoSession class Verify() method
test/xbmcdrm.py
Verify
Spider-24/plugin.video.netflix
5
python
def Verify(self, macKeyId, message, signature): ' ' return True
def Verify(self, macKeyId, message, signature): ' ' return True<|docstring|>A stub implementation for the xbmcdrm CryptoSession class Verify() method<|endoftext|>
dc39bff5ad0695d7716554b9cfc2776cb726dafbf75d90e711fafd9b8b2a7795
def normalise_encoding(encoding): 'Replace encoding name with normalised variant.' encoding = encoding.lower().replace('_', '-') return _ENCODING_ALIASES.get(encoding, encoding)
Replace encoding name with normalised variant.
monobit/encoding.py
normalise_encoding
trevorld/monobit
0
python
def normalise_encoding(encoding): encoding = encoding.lower().replace('_', '-') return _ENCODING_ALIASES.get(encoding, encoding)
def normalise_encoding(encoding): encoding = encoding.lower().replace('_', '-') return _ENCODING_ALIASES.get(encoding, encoding)<|docstring|>Replace encoding name with normalised variant.<|endoftext|>
1131a86f463eb737a3dcad280fd46d4874f00a8368c5c6d7b5069ca7b283e401
def get_encoder(encoding_name, default=''): 'Find an encoding by name and return codec.' encoding_name = (encoding_name or default) if encoding_name: encoding_name = encoding_name.lower().replace('-', '_') if (normalise_encoding(encoding_name) == 'unicode'): return Unicode try: return Codepage(encoding_name) except LookupError: pass try: return Codec(encoding_name) except LookupError: pass logging.debug('Unknown encoding `%s`.', encoding_name) return None
Find an encoding by name and return codec.
monobit/encoding.py
get_encoder
trevorld/monobit
0
python
def get_encoder(encoding_name, default=): encoding_name = (encoding_name or default) if encoding_name: encoding_name = encoding_name.lower().replace('-', '_') if (normalise_encoding(encoding_name) == 'unicode'): return Unicode try: return Codepage(encoding_name) except LookupError: pass try: return Codec(encoding_name) except LookupError: pass logging.debug('Unknown encoding `%s`.', encoding_name) return None
def get_encoder(encoding_name, default=): encoding_name = (encoding_name or default) if encoding_name: encoding_name = encoding_name.lower().replace('-', '_') if (normalise_encoding(encoding_name) == 'unicode'): return Unicode try: return Codepage(encoding_name) except LookupError: pass try: return Codec(encoding_name) except LookupError: pass logging.debug('Unknown encoding `%s`.', encoding_name) return None<|docstring|>Find an encoding by name and return codec.<|endoftext|>
1a8b9b971b1de8eca30a316f1d630e23078d7634eff5618f04e1a4363b278784
def __init__(self, encoding): 'Set up codec.' b'x'.decode(encoding) 'x'.encode(encoding) self._encoding = encoding
Set up codec.
monobit/encoding.py
__init__
trevorld/monobit
0
python
def __init__(self, encoding): b'x'.decode(encoding) 'x'.encode(encoding) self._encoding = encoding
def __init__(self, encoding): b'x'.decode(encoding) 'x'.encode(encoding) self._encoding = encoding<|docstring|>Set up codec.<|endoftext|>
83ac5bdcd6fa9ee31bf94f794c245f6edccc1ee58073ed58c83e6c9769609de2
def chr(self, ordinal): 'Convert ordinal to character, return empty string if missing.' if (ordinal is None): return '' byte = bytes([int(ordinal)]) char = byte.decode(self._encoding, errors='ignore') return char
Convert ordinal to character, return empty string if missing.
monobit/encoding.py
chr
trevorld/monobit
0
python
def chr(self, ordinal): if (ordinal is None): return byte = bytes([int(ordinal)]) char = byte.decode(self._encoding, errors='ignore') return char
def chr(self, ordinal): if (ordinal is None): return byte = bytes([int(ordinal)]) char = byte.decode(self._encoding, errors='ignore') return char<|docstring|>Convert ordinal to character, return empty string if missing.<|endoftext|>
79c63fa589f86ea1697afe085ab7fa207f761310b28133f6ed15b163c9610d81
def ord(self, char): 'Convert character to ordinal, return None if missing.' byte = char.encode(self._encoding, errors='ignore') if (not byte): return None return byte[0]
Convert character to ordinal, return None if missing.
monobit/encoding.py
ord
trevorld/monobit
0
python
def ord(self, char): byte = char.encode(self._encoding, errors='ignore') if (not byte): return None return byte[0]
def ord(self, char): byte = char.encode(self._encoding, errors='ignore') if (not byte): return None return byte[0]<|docstring|>Convert character to ordinal, return None if missing.<|endoftext|>
a57b7a35fdf577b851404576389e91483baa071389a02cfc2b07542984c7379d
def __init__(self, codepage_name): 'Read a codepage file and convert to codepage dict.' codepage_name = codepage_name.lower().replace('_', '-') if (codepage_name in self._registered): with open(self._registered[codepage_name], 'rb') as custom_cp: data = custom_cp.read() else: try: data = pkgutil.get_data(__name__, 'codepages/{}.ucp'.format(codepage_name)) except EnvironmentError: data = None if (data is None): raise LookupError(codepage_name) self._mapping = self._mapping_from_ucp_data(data) self._inv_mapping = {_v: _k for (_k, _v) in self._mapping.items()}
Read a codepage file and convert to codepage dict.
monobit/encoding.py
__init__
trevorld/monobit
0
python
def __init__(self, codepage_name): codepage_name = codepage_name.lower().replace('_', '-') if (codepage_name in self._registered): with open(self._registered[codepage_name], 'rb') as custom_cp: data = custom_cp.read() else: try: data = pkgutil.get_data(__name__, 'codepages/{}.ucp'.format(codepage_name)) except EnvironmentError: data = None if (data is None): raise LookupError(codepage_name) self._mapping = self._mapping_from_ucp_data(data) self._inv_mapping = {_v: _k for (_k, _v) in self._mapping.items()}
def __init__(self, codepage_name): codepage_name = codepage_name.lower().replace('_', '-') if (codepage_name in self._registered): with open(self._registered[codepage_name], 'rb') as custom_cp: data = custom_cp.read() else: try: data = pkgutil.get_data(__name__, 'codepages/{}.ucp'.format(codepage_name)) except EnvironmentError: data = None if (data is None): raise LookupError(codepage_name) self._mapping = self._mapping_from_ucp_data(data) self._inv_mapping = {_v: _k for (_k, _v) in self._mapping.items()}<|docstring|>Read a codepage file and convert to codepage dict.<|endoftext|>
f0cc18eea55be1cb7e887105377929340b6578b525ef4eddea562dd6b38a5298
def _mapping_from_ucp_data(self, data): 'Extract codepage mapping from ucp file data (as bytes).' mapping = {} for line in data.decode('utf-8-sig').splitlines(): if ((not line) or (line[0] == '#')): continue splitline = line.split('#')[0].split(':') if (len(splitline) < 2): continue try: cp_point = int(splitline[0].strip(), 16) mapping[cp_point] = ''.join((chr(int(ucs_str.strip(), 16)) for ucs_str in splitline[1].split(','))) except (ValueError, TypeError): logging.warning('Could not parse line in codepage file: %s', repr(line)) return mapping
Extract codepage mapping from ucp file data (as bytes).
monobit/encoding.py
_mapping_from_ucp_data
trevorld/monobit
0
python
def _mapping_from_ucp_data(self, data): mapping = {} for line in data.decode('utf-8-sig').splitlines(): if ((not line) or (line[0] == '#')): continue splitline = line.split('#')[0].split(':') if (len(splitline) < 2): continue try: cp_point = int(splitline[0].strip(), 16) mapping[cp_point] = .join((chr(int(ucs_str.strip(), 16)) for ucs_str in splitline[1].split(','))) except (ValueError, TypeError): logging.warning('Could not parse line in codepage file: %s', repr(line)) return mapping
def _mapping_from_ucp_data(self, data): mapping = {} for line in data.decode('utf-8-sig').splitlines(): if ((not line) or (line[0] == '#')): continue splitline = line.split('#')[0].split(':') if (len(splitline) < 2): continue try: cp_point = int(splitline[0].strip(), 16) mapping[cp_point] = .join((chr(int(ucs_str.strip(), 16)) for ucs_str in splitline[1].split(','))) except (ValueError, TypeError): logging.warning('Could not parse line in codepage file: %s', repr(line)) return mapping<|docstring|>Extract codepage mapping from ucp file data (as bytes).<|endoftext|>
b528e601a96b83e1b5a88b3a3d07f1e30b2a19225e7020512a8458b3be77e6ce
def chr(self, ordinal): 'Convert ordinal to character, return empty string if missing.' try: return self._mapping[int(ordinal)] except (KeyError, TypeError) as e: return ''
Convert ordinal to character, return empty string if missing.
monobit/encoding.py
chr
trevorld/monobit
0
python
def chr(self, ordinal): try: return self._mapping[int(ordinal)] except (KeyError, TypeError) as e: return
def chr(self, ordinal): try: return self._mapping[int(ordinal)] except (KeyError, TypeError) as e: return <|docstring|>Convert ordinal to character, return empty string if missing.<|endoftext|>
33608f10d22b51b7ffd11a5fcdb616d6c9f81394c0e5074550546402974242a4
def ord(self, char): 'Convert character to ordinal, return None if missing.' try: return self._inv_mapping[char] except KeyError as e: return None
Convert character to ordinal, return None if missing.
monobit/encoding.py
ord
trevorld/monobit
0
python
def ord(self, char): try: return self._inv_mapping[char] except KeyError as e: return None
def ord(self, char): try: return self._inv_mapping[char] except KeyError as e: return None<|docstring|>Convert character to ordinal, return None if missing.<|endoftext|>
20d0f3255a3596f8faca83d5365384da3413d76f1f0177f53f925cdb7054e319
@classmethod def override(cls, name, filename): 'Override an existing codepage or register an unknown one.' cls._registered[name] = filename
Override an existing codepage or register an unknown one.
monobit/encoding.py
override
trevorld/monobit
0
python
@classmethod def override(cls, name, filename): cls._registered[name] = filename
@classmethod def override(cls, name, filename): cls._registered[name] = filename<|docstring|>Override an existing codepage or register an unknown one.<|endoftext|>
a423b71b5852d032c11a5405f8f55f9a6ccd3b431edc3f7808587433602b642d
@staticmethod def chr(ordinal): 'Convert ordinal to character.' if (ordinal is None): return '' return chr(int(ordinal))
Convert ordinal to character.
monobit/encoding.py
chr
trevorld/monobit
0
python
@staticmethod def chr(ordinal): if (ordinal is None): return return chr(int(ordinal))
@staticmethod def chr(ordinal): if (ordinal is None): return return chr(int(ordinal))<|docstring|>Convert ordinal to character.<|endoftext|>
7597a4fd94eb3d3a05fb446871f0f440cc82fb9e6c01bb75809179066415ad81
@staticmethod def ord(char): 'Convert character to ordinal.' if (len(char) != 1): return None return ord(char)
Convert character to ordinal.
monobit/encoding.py
ord
trevorld/monobit
0
python
@staticmethod def ord(char): if (len(char) != 1): return None return ord(char)
@staticmethod def ord(char): if (len(char) != 1): return None return ord(char)<|docstring|>Convert character to ordinal.<|endoftext|>
40d6b1f20378b9fcd89934a7108794d234b0d213831210e84941885fbae7a6b3
def external_sort(infile, outfile, sep, key=1): 'Externally sort and make unique csv files using built-in GNU Coreutils.\n\n Args:\n infile (str): the csv file to sort\n outfile (str): the location to write the sorted csv file\n sep (str): the separator for the columns\n key (int): the column containing the id to sort on\n\n Returns:\n (str): the path to the outfile created\n\n References:\n GNU Coreutils: https://www.gnu.org/software/coreutils/coreutils.html\n\n ' try: sortoptions = ['sort', '-t', sep, '-k', str(key), '-s', '-u', '-o', outfile, infile] subprocess.run(sortoptions, check=True) return outfile except: logging.exception('Input files could not be sorterd')
Externally sort and make unique csv files using built-in GNU Coreutils. Args: infile (str): the csv file to sort outfile (str): the location to write the sorted csv file sep (str): the separator for the columns key (int): the column containing the id to sort on Returns: (str): the path to the outfile created References: GNU Coreutils: https://www.gnu.org/software/coreutils/coreutils.html
vica/tfrecord_maker.py
external_sort
USDA-ARS-GBRU/vica
4
python
def external_sort(infile, outfile, sep, key=1): 'Externally sort and make unique csv files using built-in GNU Coreutils.\n\n Args:\n infile (str): the csv file to sort\n outfile (str): the location to write the sorted csv file\n sep (str): the separator for the columns\n key (int): the column containing the id to sort on\n\n Returns:\n (str): the path to the outfile created\n\n References:\n GNU Coreutils: https://www.gnu.org/software/coreutils/coreutils.html\n\n ' try: sortoptions = ['sort', '-t', sep, '-k', str(key), '-s', '-u', '-o', outfile, infile] subprocess.run(sortoptions, check=True) return outfile except: logging.exception('Input files could not be sorterd')
def external_sort(infile, outfile, sep, key=1): 'Externally sort and make unique csv files using built-in GNU Coreutils.\n\n Args:\n infile (str): the csv file to sort\n outfile (str): the location to write the sorted csv file\n sep (str): the separator for the columns\n key (int): the column containing the id to sort on\n\n Returns:\n (str): the path to the outfile created\n\n References:\n GNU Coreutils: https://www.gnu.org/software/coreutils/coreutils.html\n\n ' try: sortoptions = ['sort', '-t', sep, '-k', str(key), '-s', '-u', '-o', outfile, infile] subprocess.run(sortoptions, check=True) return outfile except: logging.exception('Input files could not be sorterd')<|docstring|>Externally sort and make unique csv files using built-in GNU Coreutils. Args: infile (str): the csv file to sort outfile (str): the location to write the sorted csv file sep (str): the separator for the columns key (int): the column containing the id to sort on Returns: (str): the path to the outfile created References: GNU Coreutils: https://www.gnu.org/software/coreutils/coreutils.html<|endoftext|>
c97927c6612f7143a61dfcf2a1eb216492d4de5b22d8e3e890433553950eab30
def join(kmerfile, codonfile, minhashfile, dtemp): 'Externally join with built-in GNU Coreutils in the order\n label, kmers, codons ,minhash\n\n Args:\n kmerfile (str): Kmer csv file\n codonfile (str): Codon csv file\n minhashfile (str): Minhash csv file\n dtemp (str): the path to a temporary directory\n\n Returns:\n (str) the path of the merged file created\n\n References:\n GNU Coreutils: https://www.gnu.org/software/coreutils/coreutils.html\n\n ' kcfile = os.path.join(dtemp, 'kcfile.csv') mergefile = os.path.join(dtemp, 'mergefile.csv') try: with open(kcfile, 'w') as kcf: options = ['join', '-t', ',', '-1', '1', '-2', '1', kmerfile, codonfile] subprocess.run(options, check=True, stdout=kcf) with open(mergefile, 'w') as mf: options2 = ['join', '-t', ',', '-1', '1', '-2', '1', kcfile, minhashfile] subprocess.run(options2, check=True, stdout=mf) os.remove(kcfile) return mergefile except RuntimeError: logging.exception('Could not merge csv files using unix join command')
Externally join with built-in GNU Coreutils in the order label, kmers, codons ,minhash Args: kmerfile (str): Kmer csv file codonfile (str): Codon csv file minhashfile (str): Minhash csv file dtemp (str): the path to a temporary directory Returns: (str) the path of the merged file created References: GNU Coreutils: https://www.gnu.org/software/coreutils/coreutils.html
vica/tfrecord_maker.py
join
USDA-ARS-GBRU/vica
4
python
def join(kmerfile, codonfile, minhashfile, dtemp): 'Externally join with built-in GNU Coreutils in the order\n label, kmers, codons ,minhash\n\n Args:\n kmerfile (str): Kmer csv file\n codonfile (str): Codon csv file\n minhashfile (str): Minhash csv file\n dtemp (str): the path to a temporary directory\n\n Returns:\n (str) the path of the merged file created\n\n References:\n GNU Coreutils: https://www.gnu.org/software/coreutils/coreutils.html\n\n ' kcfile = os.path.join(dtemp, 'kcfile.csv') mergefile = os.path.join(dtemp, 'mergefile.csv') try: with open(kcfile, 'w') as kcf: options = ['join', '-t', ',', '-1', '1', '-2', '1', kmerfile, codonfile] subprocess.run(options, check=True, stdout=kcf) with open(mergefile, 'w') as mf: options2 = ['join', '-t', ',', '-1', '1', '-2', '1', kcfile, minhashfile] subprocess.run(options2, check=True, stdout=mf) os.remove(kcfile) return mergefile except RuntimeError: logging.exception('Could not merge csv files using unix join command')
def join(kmerfile, codonfile, minhashfile, dtemp): 'Externally join with built-in GNU Coreutils in the order\n label, kmers, codons ,minhash\n\n Args:\n kmerfile (str): Kmer csv file\n codonfile (str): Codon csv file\n minhashfile (str): Minhash csv file\n dtemp (str): the path to a temporary directory\n\n Returns:\n (str) the path of the merged file created\n\n References:\n GNU Coreutils: https://www.gnu.org/software/coreutils/coreutils.html\n\n ' kcfile = os.path.join(dtemp, 'kcfile.csv') mergefile = os.path.join(dtemp, 'mergefile.csv') try: with open(kcfile, 'w') as kcf: options = ['join', '-t', ',', '-1', '1', '-2', '1', kmerfile, codonfile] subprocess.run(options, check=True, stdout=kcf) with open(mergefile, 'w') as mf: options2 = ['join', '-t', ',', '-1', '1', '-2', '1', kcfile, minhashfile] subprocess.run(options2, check=True, stdout=mf) os.remove(kcfile) return mergefile except RuntimeError: logging.exception('Could not merge csv files using unix join command')<|docstring|>Externally join with built-in GNU Coreutils in the order label, kmers, codons ,minhash Args: kmerfile (str): Kmer csv file codonfile (str): Codon csv file minhashfile (str): Minhash csv file dtemp (str): the path to a temporary directory Returns: (str) the path of the merged file created References: GNU Coreutils: https://www.gnu.org/software/coreutils/coreutils.html<|endoftext|>
3aaf815538f65e8acc2c98ba56edec1d7f2bcc9761e7c3d99d44817e5ef6e2ab
def count_features(**kwargs): 'Given key-value pairs of fileypes: file locations return a dictionary\n of filetypes: feature lengths.\n\n Args:\n **bar (**kwargs): Key-Value pairs of file_type: file_path\n\n Returns:\n (dict): A dict with {file_type (str): feature_length (int)}\n\n ' featuredict = {} for (key, val) in kwargs.items(): with open(val, 'r') as f: features = (len(f.readline().strip().split(',')) - 1) featuredict[key] = features return featuredict
Given key-value pairs of fileypes: file locations return a dictionary of filetypes: feature lengths. Args: **bar (**kwargs): Key-Value pairs of file_type: file_path Returns: (dict): A dict with {file_type (str): feature_length (int)}
vica/tfrecord_maker.py
count_features
USDA-ARS-GBRU/vica
4
python
def count_features(**kwargs): 'Given key-value pairs of fileypes: file locations return a dictionary\n of filetypes: feature lengths.\n\n Args:\n **bar (**kwargs): Key-Value pairs of file_type: file_path\n\n Returns:\n (dict): A dict with {file_type (str): feature_length (int)}\n\n ' featuredict = {} for (key, val) in kwargs.items(): with open(val, 'r') as f: features = (len(f.readline().strip().split(',')) - 1) featuredict[key] = features return featuredict
def count_features(**kwargs): 'Given key-value pairs of fileypes: file locations return a dictionary\n of filetypes: feature lengths.\n\n Args:\n **bar (**kwargs): Key-Value pairs of file_type: file_path\n\n Returns:\n (dict): A dict with {file_type (str): feature_length (int)}\n\n ' featuredict = {} for (key, val) in kwargs.items(): with open(val, 'r') as f: features = (len(f.readline().strip().split(',')) - 1) featuredict[key] = features return featuredict<|docstring|>Given key-value pairs of fileypes: file locations return a dictionary of filetypes: feature lengths. Args: **bar (**kwargs): Key-Value pairs of file_type: file_path Returns: (dict): A dict with {file_type (str): feature_length (int)}<|endoftext|>
de80f069e79f3817465afdbbdcc1fe42e82ae6eef85aebfc8e95c0907795d98d
def _csv_to_tfrecords(kmerfile, codonfile, minhashfile, mergefile, tfrecordfile, label): 'Convert csv files of features created by Vica into a TFRecords file.\n\n Args:\n kmerfile (str): a csv file contianing ilr transformed kmer count data\n codonfile (str): a csv file contianing ilr transformed codon count data\n minhashfile (str): a csv file containing scores for selected\n phylogenetic levels generated by the vica.minhash module\n mergefile (str): a merged CSV file created by `vica.tfrecord_maker.join`\n tfrecordfile (str): the location to write the TTRecords files\n label (int): an integer label to add to each TFRecords example. Use\n one sequential integer for each class.\n\n Returns:\n None\n\n ' writer = tf.python_io.TFRecordWriter(tfrecordfile) features = count_features(kmers=kmerfile, codons=codonfile, minhash=minhashfile) kstart = 1 kend = (features['kmers'] + 1) cend = (kend + features['codons']) i = 0 with open(mergefile, 'r') as mergedata: for (i, lstring) in enumerate(mergedata, 1): line = lstring.strip().split(',') lab = line[0] kdat = np.array(line[kstart:kend], dtype='float32') cdat = np.array(line[kend:cend], dtype='float32') mdat = np.array(line[cend:], dtype='float32') example = tf.train.Example(features=tf.train.Features(feature={'id': tf.train.Feature(bytes_list=tf.train.BytesList(value=[tf.compat.as_bytes(lab)])), 'label': tf.train.Feature(int64_list=tf.train.Int64List(value=[int(label)])), 'kmer': tf.train.Feature(float_list=tf.train.FloatList(value=kdat)), 'codon': tf.train.Feature(float_list=tf.train.FloatList(value=cdat)), 'minhash': tf.train.Feature(float_list=tf.train.FloatList(value=mdat))})) writer.write(example.SerializeToString()) writer.close() logging.info('Successfully converted {} records to to TFrecord format'.format(i))
Convert csv files of features created by Vica into a TFRecords file. Args: kmerfile (str): a csv file contianing ilr transformed kmer count data codonfile (str): a csv file contianing ilr transformed codon count data minhashfile (str): a csv file containing scores for selected phylogenetic levels generated by the vica.minhash module mergefile (str): a merged CSV file created by `vica.tfrecord_maker.join` tfrecordfile (str): the location to write the TTRecords files label (int): an integer label to add to each TFRecords example. Use one sequential integer for each class. Returns: None
vica/tfrecord_maker.py
_csv_to_tfrecords
USDA-ARS-GBRU/vica
4
python
def _csv_to_tfrecords(kmerfile, codonfile, minhashfile, mergefile, tfrecordfile, label): 'Convert csv files of features created by Vica into a TFRecords file.\n\n Args:\n kmerfile (str): a csv file contianing ilr transformed kmer count data\n codonfile (str): a csv file contianing ilr transformed codon count data\n minhashfile (str): a csv file containing scores for selected\n phylogenetic levels generated by the vica.minhash module\n mergefile (str): a merged CSV file created by `vica.tfrecord_maker.join`\n tfrecordfile (str): the location to write the TTRecords files\n label (int): an integer label to add to each TFRecords example. Use\n one sequential integer for each class.\n\n Returns:\n None\n\n ' writer = tf.python_io.TFRecordWriter(tfrecordfile) features = count_features(kmers=kmerfile, codons=codonfile, minhash=minhashfile) kstart = 1 kend = (features['kmers'] + 1) cend = (kend + features['codons']) i = 0 with open(mergefile, 'r') as mergedata: for (i, lstring) in enumerate(mergedata, 1): line = lstring.strip().split(',') lab = line[0] kdat = np.array(line[kstart:kend], dtype='float32') cdat = np.array(line[kend:cend], dtype='float32') mdat = np.array(line[cend:], dtype='float32') example = tf.train.Example(features=tf.train.Features(feature={'id': tf.train.Feature(bytes_list=tf.train.BytesList(value=[tf.compat.as_bytes(lab)])), 'label': tf.train.Feature(int64_list=tf.train.Int64List(value=[int(label)])), 'kmer': tf.train.Feature(float_list=tf.train.FloatList(value=kdat)), 'codon': tf.train.Feature(float_list=tf.train.FloatList(value=cdat)), 'minhash': tf.train.Feature(float_list=tf.train.FloatList(value=mdat))})) writer.write(example.SerializeToString()) writer.close() logging.info('Successfully converted {} records to to TFrecord format'.format(i))
def _csv_to_tfrecords(kmerfile, codonfile, minhashfile, mergefile, tfrecordfile, label): 'Convert csv files of features created by Vica into a TFRecords file.\n\n Args:\n kmerfile (str): a csv file contianing ilr transformed kmer count data\n codonfile (str): a csv file contianing ilr transformed codon count data\n minhashfile (str): a csv file containing scores for selected\n phylogenetic levels generated by the vica.minhash module\n mergefile (str): a merged CSV file created by `vica.tfrecord_maker.join`\n tfrecordfile (str): the location to write the TTRecords files\n label (int): an integer label to add to each TFRecords example. Use\n one sequential integer for each class.\n\n Returns:\n None\n\n ' writer = tf.python_io.TFRecordWriter(tfrecordfile) features = count_features(kmers=kmerfile, codons=codonfile, minhash=minhashfile) kstart = 1 kend = (features['kmers'] + 1) cend = (kend + features['codons']) i = 0 with open(mergefile, 'r') as mergedata: for (i, lstring) in enumerate(mergedata, 1): line = lstring.strip().split(',') lab = line[0] kdat = np.array(line[kstart:kend], dtype='float32') cdat = np.array(line[kend:cend], dtype='float32') mdat = np.array(line[cend:], dtype='float32') example = tf.train.Example(features=tf.train.Features(feature={'id': tf.train.Feature(bytes_list=tf.train.BytesList(value=[tf.compat.as_bytes(lab)])), 'label': tf.train.Feature(int64_list=tf.train.Int64List(value=[int(label)])), 'kmer': tf.train.Feature(float_list=tf.train.FloatList(value=kdat)), 'codon': tf.train.Feature(float_list=tf.train.FloatList(value=cdat)), 'minhash': tf.train.Feature(float_list=tf.train.FloatList(value=mdat))})) writer.write(example.SerializeToString()) writer.close() logging.info('Successfully converted {} records to to TFrecord format'.format(i))<|docstring|>Convert csv files of features created by Vica into a TFRecords file. Args: kmerfile (str): a csv file contianing ilr transformed kmer count data codonfile (str): a csv file contianing ilr transformed codon count data minhashfile (str): a csv file containing scores for selected phylogenetic levels generated by the vica.minhash module mergefile (str): a merged CSV file created by `vica.tfrecord_maker.join` tfrecordfile (str): the location to write the TTRecords files label (int): an integer label to add to each TFRecords example. Use one sequential integer for each class. Returns: None<|endoftext|>
8dc2586d29463e2f9ed3f69d9cdf055cac7fb791421424acb177a21fd8959012
def convert_to_tfrecords(dtemp, kmerfile, codonfile, minhashfile, tfrecordfile, label, sort=False): 'Combines features files created by Vica into a TFRecords file.\n\n Args:\n dtemp (str): a temporary directory path\n kmerfile (str): a csv file containing ilr transformed kmer count data\n codonfile (str): a csv file containing ilr transformed codon count data\n minhashfile (str): a csv file containing scores for selected\n phylogenetic levels generated by the vica.minhash module\n tfrecordfile (str): the location to write the TTRecords files\n label (int): an integer label to add to each TFRecords example. Use\n one sequential integer for each class.\n sort (bool): Whether to externally sot the files brior to attempting to\n merge them. Doing so allows missing data lines in files to be\n fixed. Requires GNU Utils sort, which is standard on POSIX systems.\n\n Returns:\n None\n\n ' if sort: ksorted = os.path.join(dtemp, 'kmer_sorted.csv') csorted = os.path.join(dtemp, 'codon_sorted.csv') msorted = os.path.join(dtemp, 'minhash_sorted.csv') mergefile = os.path.join(dtemp, 'mergefile.csv') external_sort(infile=kmerfile, outfile=ksorted, sep=',') external_sort(infile=codonfile, outfile=csorted, sep=',') external_sort(infile=minhashfile, outfile=msorted, sep=',') else: ksorted = kmerfile csorted = codonfile msorted = minhashfile mergefile = join(kmerfile=ksorted, codonfile=csorted, minhashfile=msorted, dtemp=dtemp) _csv_to_tfrecords(kmerfile=ksorted, codonfile=csorted, minhashfile=msorted, mergefile=mergefile, tfrecordfile=tfrecordfile, label=label)
Combines features files created by Vica into a TFRecords file. Args: dtemp (str): a temporary directory path kmerfile (str): a csv file containing ilr transformed kmer count data codonfile (str): a csv file containing ilr transformed codon count data minhashfile (str): a csv file containing scores for selected phylogenetic levels generated by the vica.minhash module tfrecordfile (str): the location to write the TTRecords files label (int): an integer label to add to each TFRecords example. Use one sequential integer for each class. sort (bool): Whether to externally sot the files brior to attempting to merge them. Doing so allows missing data lines in files to be fixed. Requires GNU Utils sort, which is standard on POSIX systems. Returns: None
vica/tfrecord_maker.py
convert_to_tfrecords
USDA-ARS-GBRU/vica
4
python
def convert_to_tfrecords(dtemp, kmerfile, codonfile, minhashfile, tfrecordfile, label, sort=False): 'Combines features files created by Vica into a TFRecords file.\n\n Args:\n dtemp (str): a temporary directory path\n kmerfile (str): a csv file containing ilr transformed kmer count data\n codonfile (str): a csv file containing ilr transformed codon count data\n minhashfile (str): a csv file containing scores for selected\n phylogenetic levels generated by the vica.minhash module\n tfrecordfile (str): the location to write the TTRecords files\n label (int): an integer label to add to each TFRecords example. Use\n one sequential integer for each class.\n sort (bool): Whether to externally sot the files brior to attempting to\n merge them. Doing so allows missing data lines in files to be\n fixed. Requires GNU Utils sort, which is standard on POSIX systems.\n\n Returns:\n None\n\n ' if sort: ksorted = os.path.join(dtemp, 'kmer_sorted.csv') csorted = os.path.join(dtemp, 'codon_sorted.csv') msorted = os.path.join(dtemp, 'minhash_sorted.csv') mergefile = os.path.join(dtemp, 'mergefile.csv') external_sort(infile=kmerfile, outfile=ksorted, sep=',') external_sort(infile=codonfile, outfile=csorted, sep=',') external_sort(infile=minhashfile, outfile=msorted, sep=',') else: ksorted = kmerfile csorted = codonfile msorted = minhashfile mergefile = join(kmerfile=ksorted, codonfile=csorted, minhashfile=msorted, dtemp=dtemp) _csv_to_tfrecords(kmerfile=ksorted, codonfile=csorted, minhashfile=msorted, mergefile=mergefile, tfrecordfile=tfrecordfile, label=label)
def convert_to_tfrecords(dtemp, kmerfile, codonfile, minhashfile, tfrecordfile, label, sort=False): 'Combines features files created by Vica into a TFRecords file.\n\n Args:\n dtemp (str): a temporary directory path\n kmerfile (str): a csv file containing ilr transformed kmer count data\n codonfile (str): a csv file containing ilr transformed codon count data\n minhashfile (str): a csv file containing scores for selected\n phylogenetic levels generated by the vica.minhash module\n tfrecordfile (str): the location to write the TTRecords files\n label (int): an integer label to add to each TFRecords example. Use\n one sequential integer for each class.\n sort (bool): Whether to externally sot the files brior to attempting to\n merge them. Doing so allows missing data lines in files to be\n fixed. Requires GNU Utils sort, which is standard on POSIX systems.\n\n Returns:\n None\n\n ' if sort: ksorted = os.path.join(dtemp, 'kmer_sorted.csv') csorted = os.path.join(dtemp, 'codon_sorted.csv') msorted = os.path.join(dtemp, 'minhash_sorted.csv') mergefile = os.path.join(dtemp, 'mergefile.csv') external_sort(infile=kmerfile, outfile=ksorted, sep=',') external_sort(infile=codonfile, outfile=csorted, sep=',') external_sort(infile=minhashfile, outfile=msorted, sep=',') else: ksorted = kmerfile csorted = codonfile msorted = minhashfile mergefile = join(kmerfile=ksorted, codonfile=csorted, minhashfile=msorted, dtemp=dtemp) _csv_to_tfrecords(kmerfile=ksorted, codonfile=csorted, minhashfile=msorted, mergefile=mergefile, tfrecordfile=tfrecordfile, label=label)<|docstring|>Combines features files created by Vica into a TFRecords file. Args: dtemp (str): a temporary directory path kmerfile (str): a csv file containing ilr transformed kmer count data codonfile (str): a csv file containing ilr transformed codon count data minhashfile (str): a csv file containing scores for selected phylogenetic levels generated by the vica.minhash module tfrecordfile (str): the location to write the TTRecords files label (int): an integer label to add to each TFRecords example. Use one sequential integer for each class. sort (bool): Whether to externally sot the files brior to attempting to merge them. Doing so allows missing data lines in files to be fixed. Requires GNU Utils sort, which is standard on POSIX systems. Returns: None<|endoftext|>
1686aa9a11fe2e1868becb850b2e199eaf7582cb36ac4f1dbe11576f9427d7be
def query(db_session: Session, qualifier: str=None, qualifier_begin_date: str=None, qualifier_end_date: str=None, station_timezone: int=None, station_network_type: str=None, belongs_to: str=None, limit: int=25, offset: int=0) -> List[stationqualifier_schema.StationQualifier]: '\n This function builds a query based on the given parameter and returns `limit` numbers of `station_qualifier` row skipping\n `offset` number of rows\n ' try: q = db_session.query(models.Stationqualifier) if (qualifier is not None): q = q.filter_by(qualifier=qualifier) if (qualifier_begin_date is not None): q = q.filter_by(qualifierBeginDate=qualifier_begin_date) if (qualifier_end_date is not None): q = q.filter_by(qualifierEndDate=qualifier_end_date) if (station_timezone is not None): q = q.filter_by(stationTimeZone=station_timezone) if (station_network_type is not None): q = q.filter_by(stationNetworkType=station_network_type) if (belongs_to is not None): q = q.filter_by(belongsTo=belongs_to) return [stationqualifier_schema.StationQualifier.from_orm(s) for s in q.offset(offset).limit(limit).all()] except Exception as e: logger.exception(e) raise FailedGettingStationQualifierList('Failed getting station_qualifier list.')
This function builds a query based on the given parameter and returns `limit` numbers of `station_qualifier` row skipping `offset` number of rows
src/apps/climsoft/services/stationqualifier_service.py
query
opencdms/opencdms-api
3
python
def query(db_session: Session, qualifier: str=None, qualifier_begin_date: str=None, qualifier_end_date: str=None, station_timezone: int=None, station_network_type: str=None, belongs_to: str=None, limit: int=25, offset: int=0) -> List[stationqualifier_schema.StationQualifier]: '\n This function builds a query based on the given parameter and returns `limit` numbers of `station_qualifier` row skipping\n `offset` number of rows\n ' try: q = db_session.query(models.Stationqualifier) if (qualifier is not None): q = q.filter_by(qualifier=qualifier) if (qualifier_begin_date is not None): q = q.filter_by(qualifierBeginDate=qualifier_begin_date) if (qualifier_end_date is not None): q = q.filter_by(qualifierEndDate=qualifier_end_date) if (station_timezone is not None): q = q.filter_by(stationTimeZone=station_timezone) if (station_network_type is not None): q = q.filter_by(stationNetworkType=station_network_type) if (belongs_to is not None): q = q.filter_by(belongsTo=belongs_to) return [stationqualifier_schema.StationQualifier.from_orm(s) for s in q.offset(offset).limit(limit).all()] except Exception as e: logger.exception(e) raise FailedGettingStationQualifierList('Failed getting station_qualifier list.')
def query(db_session: Session, qualifier: str=None, qualifier_begin_date: str=None, qualifier_end_date: str=None, station_timezone: int=None, station_network_type: str=None, belongs_to: str=None, limit: int=25, offset: int=0) -> List[stationqualifier_schema.StationQualifier]: '\n This function builds a query based on the given parameter and returns `limit` numbers of `station_qualifier` row skipping\n `offset` number of rows\n ' try: q = db_session.query(models.Stationqualifier) if (qualifier is not None): q = q.filter_by(qualifier=qualifier) if (qualifier_begin_date is not None): q = q.filter_by(qualifierBeginDate=qualifier_begin_date) if (qualifier_end_date is not None): q = q.filter_by(qualifierEndDate=qualifier_end_date) if (station_timezone is not None): q = q.filter_by(stationTimeZone=station_timezone) if (station_network_type is not None): q = q.filter_by(stationNetworkType=station_network_type) if (belongs_to is not None): q = q.filter_by(belongsTo=belongs_to) return [stationqualifier_schema.StationQualifier.from_orm(s) for s in q.offset(offset).limit(limit).all()] except Exception as e: logger.exception(e) raise FailedGettingStationQualifierList('Failed getting station_qualifier list.')<|docstring|>This function builds a query based on the given parameter and returns `limit` numbers of `station_qualifier` row skipping `offset` number of rows<|endoftext|>
1a6bce0e6c56d1bf456fbb9109189512c1a3c3ffb6a30b3d0591db85b51f68f9
def from_didl_string(string): "Convert a unicode xml string to a list of `DIDLObjects <DidlObject>`.\n\n Args:\n string (str): A unicode string containing an XML representation of one\n or more DIDL-Lite items (in the form ``'<DIDL-Lite ...>\n ...</DIDL-Lite>'``)\n\n Returns:\n list: A list of one or more instances of `DidlObject` or a subclass\n " items = [] root = XML.fromstring(string.encode('utf-8')) for elt in root: if (elt.tag.endswith('item') or elt.tag.endswith('container')): item_class = elt.findtext(ns_tag('upnp', 'class')) cls = didl_class_to_soco_class(item_class) item = cls.from_element(elt) item = attempt_datastructure_upgrade(item) items.append(item) else: raise DIDLMetadataError(('Illegal child of DIDL element: <%s>' % elt.tag)) _LOG.debug('Created data structures: %.20s (CUT) from Didl string "%.20s" (CUT)', items, string) return items
Convert a unicode xml string to a list of `DIDLObjects <DidlObject>`. Args: string (str): A unicode string containing an XML representation of one or more DIDL-Lite items (in the form ``'<DIDL-Lite ...> ...</DIDL-Lite>'``) Returns: list: A list of one or more instances of `DidlObject` or a subclass
soco/data_structures_entry.py
from_didl_string
busterbeam/SoCo
1,149
python
def from_didl_string(string): "Convert a unicode xml string to a list of `DIDLObjects <DidlObject>`.\n\n Args:\n string (str): A unicode string containing an XML representation of one\n or more DIDL-Lite items (in the form ``'<DIDL-Lite ...>\n ...</DIDL-Lite>'``)\n\n Returns:\n list: A list of one or more instances of `DidlObject` or a subclass\n " items = [] root = XML.fromstring(string.encode('utf-8')) for elt in root: if (elt.tag.endswith('item') or elt.tag.endswith('container')): item_class = elt.findtext(ns_tag('upnp', 'class')) cls = didl_class_to_soco_class(item_class) item = cls.from_element(elt) item = attempt_datastructure_upgrade(item) items.append(item) else: raise DIDLMetadataError(('Illegal child of DIDL element: <%s>' % elt.tag)) _LOG.debug('Created data structures: %.20s (CUT) from Didl string "%.20s" (CUT)', items, string) return items
def from_didl_string(string): "Convert a unicode xml string to a list of `DIDLObjects <DidlObject>`.\n\n Args:\n string (str): A unicode string containing an XML representation of one\n or more DIDL-Lite items (in the form ``'<DIDL-Lite ...>\n ...</DIDL-Lite>'``)\n\n Returns:\n list: A list of one or more instances of `DidlObject` or a subclass\n " items = [] root = XML.fromstring(string.encode('utf-8')) for elt in root: if (elt.tag.endswith('item') or elt.tag.endswith('container')): item_class = elt.findtext(ns_tag('upnp', 'class')) cls = didl_class_to_soco_class(item_class) item = cls.from_element(elt) item = attempt_datastructure_upgrade(item) items.append(item) else: raise DIDLMetadataError(('Illegal child of DIDL element: <%s>' % elt.tag)) _LOG.debug('Created data structures: %.20s (CUT) from Didl string "%.20s" (CUT)', items, string) return items<|docstring|>Convert a unicode xml string to a list of `DIDLObjects <DidlObject>`. Args: string (str): A unicode string containing an XML representation of one or more DIDL-Lite items (in the form ``'<DIDL-Lite ...> ...</DIDL-Lite>'``) Returns: list: A list of one or more instances of `DidlObject` or a subclass<|endoftext|>
d3391566b155ff97531c9bdc52ca772dc3d8be7d32d5c47d73876212a3830ea3
def attempt_datastructure_upgrade(didl_item): 'Attempt to upgrade a didl_item to a music services data structure\n if it originates from a music services\n\n ' try: resource = didl_item.resources[0] except IndexError: _LOG.debug('Upgrade not possible, no resources') return didl_item if (resource.uri and resource.uri.startswith('x-sonos-http')): uri = resource.uri path = urlparse(uri).path path = path.rsplit('.', 1)[0] item_id = '11111111{}'.format(path) metadata = {} for (key, value) in didl_item.to_dict().items(): if (key not in metadata): metadata[key] = value try: cls = get_class(DIDL_NAME_TO_QUALIFIED_MS_NAME[didl_item.__class__.__name__]) except KeyError: class_name = didl_item.__class__.__name__ message = 'DATA STRUCTURE UPGRADE FAIL. Unable to upgrade music library data structure to music service data structure because an entry is missing for %s in DIDL_NAME_TO_QUALIFIED_MS_NAME.' if (class_name in DIDL_UPGRADE_NAMES_TO_IGNORE): _LOG.debug(message, class_name) else: _LOG.warning(message, class_name) return didl_item upgraded_item = cls(item_id=item_id, desc=desc_from_uri(resource.uri), resources=didl_item.resources, uri=uri, metadata_dict=metadata) _LOG.debug('Item %s upgraded to %s', didl_item, upgraded_item) return upgraded_item _LOG.debug('Upgrade not necessary') return didl_item
Attempt to upgrade a didl_item to a music services data structure if it originates from a music services
soco/data_structures_entry.py
attempt_datastructure_upgrade
busterbeam/SoCo
1,149
python
def attempt_datastructure_upgrade(didl_item): 'Attempt to upgrade a didl_item to a music services data structure\n if it originates from a music services\n\n ' try: resource = didl_item.resources[0] except IndexError: _LOG.debug('Upgrade not possible, no resources') return didl_item if (resource.uri and resource.uri.startswith('x-sonos-http')): uri = resource.uri path = urlparse(uri).path path = path.rsplit('.', 1)[0] item_id = '11111111{}'.format(path) metadata = {} for (key, value) in didl_item.to_dict().items(): if (key not in metadata): metadata[key] = value try: cls = get_class(DIDL_NAME_TO_QUALIFIED_MS_NAME[didl_item.__class__.__name__]) except KeyError: class_name = didl_item.__class__.__name__ message = 'DATA STRUCTURE UPGRADE FAIL. Unable to upgrade music library data structure to music service data structure because an entry is missing for %s in DIDL_NAME_TO_QUALIFIED_MS_NAME.' if (class_name in DIDL_UPGRADE_NAMES_TO_IGNORE): _LOG.debug(message, class_name) else: _LOG.warning(message, class_name) return didl_item upgraded_item = cls(item_id=item_id, desc=desc_from_uri(resource.uri), resources=didl_item.resources, uri=uri, metadata_dict=metadata) _LOG.debug('Item %s upgraded to %s', didl_item, upgraded_item) return upgraded_item _LOG.debug('Upgrade not necessary') return didl_item
def attempt_datastructure_upgrade(didl_item): 'Attempt to upgrade a didl_item to a music services data structure\n if it originates from a music services\n\n ' try: resource = didl_item.resources[0] except IndexError: _LOG.debug('Upgrade not possible, no resources') return didl_item if (resource.uri and resource.uri.startswith('x-sonos-http')): uri = resource.uri path = urlparse(uri).path path = path.rsplit('.', 1)[0] item_id = '11111111{}'.format(path) metadata = {} for (key, value) in didl_item.to_dict().items(): if (key not in metadata): metadata[key] = value try: cls = get_class(DIDL_NAME_TO_QUALIFIED_MS_NAME[didl_item.__class__.__name__]) except KeyError: class_name = didl_item.__class__.__name__ message = 'DATA STRUCTURE UPGRADE FAIL. Unable to upgrade music library data structure to music service data structure because an entry is missing for %s in DIDL_NAME_TO_QUALIFIED_MS_NAME.' if (class_name in DIDL_UPGRADE_NAMES_TO_IGNORE): _LOG.debug(message, class_name) else: _LOG.warning(message, class_name) return didl_item upgraded_item = cls(item_id=item_id, desc=desc_from_uri(resource.uri), resources=didl_item.resources, uri=uri, metadata_dict=metadata) _LOG.debug('Item %s upgraded to %s', didl_item, upgraded_item) return upgraded_item _LOG.debug('Upgrade not necessary') return didl_item<|docstring|>Attempt to upgrade a didl_item to a music services data structure if it originates from a music services<|endoftext|>
91603962caa3416603b8d365cb833cfe65ac4fa3bdcd0f1b6dd43b5bb674695f
def sortedSquares(self, A): '\n :type A: List[int]\n :rtype: List[int]\n ' return self.mergeSort([(x ** 2) for x in A])
:type A: List[int] :rtype: List[int]
leetcode/arrays/squares_sorted_array.py
sortedSquares
Gaurav-Pande/DataStructures
5
python
def sortedSquares(self, A): '\n :type A: List[int]\n :rtype: List[int]\n ' return self.mergeSort([(x ** 2) for x in A])
def sortedSquares(self, A): '\n :type A: List[int]\n :rtype: List[int]\n ' return self.mergeSort([(x ** 2) for x in A])<|docstring|>:type A: List[int] :rtype: List[int]<|endoftext|>
2fa500c831a5724a0d9f13892d53b7cde483a6b25ed7e6f14edd29da1082b1bb
def planarfit(indirpf, rawfile, outfile, pfmat='pfitmatrix.csv', pf0file='pfitdata0.csv', pf1file='pfitdata1.csv', pf2file='pfitdata2.csv', histsteps=50, plot=False): '\n Extracts raw wind speeds from the raw flux file of EddyFlux as input for\n EDDYPFit. When EDDYFit is finished, the script loads the results and\n do plots. If user is satisfied, results are saved.\n\n\n Definition\n ----------\n planarfit(indirpf, rawfile, outfile, pfmat=\'pfitmatrix.csv\',\n pf0file=\'pfitdata0.csv\', pf1file=\'pfitdata1.csv\',\n pf2file=\'pfitdata2.csv\', histsteps=50):\n\n Input\n -----\n indirpf str, path of the folder where results will be saved\n rawfile str, path of the file with raw wind speeds from EddyFlux\n outfile str, name of the output file\n\n\n Optional Input\n --------------\n pfmat str, name of the pfitmatix file, default: \'pfitmatrix.csv\'\n pf0file str, name of the original wind speed file of EDDYPFit, default: \'pfitdata0.csv\'\n pf1file str, name of the one plane fit wind speed file of EDDYPFit, default: \'pfitdata1.csv\'\n pf2file str, name of the sectorial fit wind speed file of EDDYPFit, default: \'pfitdata2.csv\'\n histstep int, histogram steps for plotting (default=50)\n\n\n Output\n ------\n X_pfit.pdf plot with planar fit\n X_uvw.csv file with raw wind speeds\n X_wd.pdf plot with wind rose\n X_wdis.pdf plot with wind speed distributions\n\n\n License\n -------\n This file is part of the JAMS Python package, distributed under the MIT\n License. The JAMS Python package originates from the former UFZ Python library,\n Department of Computational Hydrosystems, Helmholtz Centre for Environmental\n Research - UFZ, Leipzig, Germany.\n\n Copyright (c) 2014 Arndt Piayda\n\n Permission is hereby granted, free of charge, to any person obtaining a copy\n of this software and associated documentation files (the "Software"), to deal\n in the Software without restriction, including without limitation the rights\n to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n copies of the Software, and to permit persons to whom the Software is\n furnished to do so, subject to the following conditions:\n\n The above copyright notice and this permission notice shall be included in all\n copies or substantial portions of the Software.\n\n THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n SOFTWARE.\n\n\n History\n -------\n Written, AP, Aug 2014\n ' uvw = np.array(fread(('%s' % rawfile), skip=1, cskip=13, nc=3)) wdhor = np.array(fread(('%s' % rawfile), skip=1, cskip=20, nc=1)) header = np.array(sread(('%s' % rawfile), cskip=13, nc=3), dtype='|S5') alpha = ((120.0 * np.pi) / 180.0) uvw_trans = np.copy(uvw) uvw_trans[(:, 0)] = np.where(((uvw[(:, 0)] != (- 9999)) & (uvw[(:, 1)] != (- 9999))), ((uvw[(:, 0)] * np.cos(alpha)) + (uvw[(:, 1)] * np.sin(alpha))), (- 9999)) uvw_trans[(:, 1)] = np.where(((uvw[(:, 0)] != (- 9999)) & (uvw[(:, 1)] != (- 9999))), (((- uvw[(:, 0)]) * np.sin(alpha)) + (uvw[(:, 1)] * np.cos(alpha))), (- 9999)) file1 = open(('%s/%s_uvw.csv' % (indirpf, outfile[:(- 4)])), 'w') output = csv.writer(file1) output.writerow(header[0]) for i in xrange(np.shape(uvw_trans)[0]): output.writerow(uvw_trans[i]) file1.close() print("Do EddyPFit with the 'uvw.csv' file now!") ui1 = raw_input('Ready or quit (y/n)?: ').lower() if (ui1 != 'y'): sys.exit() header0 = np.array(sread(('%s/%s' % (indirpf, pf0file))), dtype='|S1') uvw0 = np.array(fread(('%s/%s' % (indirpf, pf0file)), skip=1), dtype=np.float) uvw0_trans = np.copy(uvw0) uvw0_trans[(:, 0)] = ((uvw0[(:, 0)] * np.cos(alpha)) - (uvw0[(:, 1)] * np.sin(alpha))) uvw0_trans[(:, 1)] = ((uvw0[(:, 0)] * np.sin(alpha)) + (uvw0[(:, 1)] * np.cos(alpha))) header1 = np.array(sread(('%s/%s' % (indirpf, pf1file))), dtype='|S1') uvw1 = np.array(fread(('%s/%s' % (indirpf, pf1file)), skip=1), dtype=np.float) uvw1_trans = np.copy(uvw1) uvw1_trans[(:, 0)] = ((uvw1[(:, 0)] * np.cos(alpha)) - (uvw1[(:, 1)] * np.sin(alpha))) uvw1_trans[(:, 1)] = ((uvw1[(:, 0)] * np.sin(alpha)) + (uvw1[(:, 1)] * np.cos(alpha))) header2 = np.array(sread(('%s/%s' % (indirpf, pf2file))), dtype='|S1') uvw2 = np.array(fread(('%s/%s' % (indirpf, pf2file)), skip=1), dtype=np.float) uvw2_trans = np.copy(uvw2) uvw2_trans[(:, 0)] = ((uvw2[(:, 0)] * np.cos(alpha)) - (uvw2[(:, 1)] * np.sin(alpha))) uvw2_trans[(:, 1)] = ((uvw2[(:, 0)] * np.sin(alpha)) + (uvw2[(:, 1)] * np.cos(alpha))) if plot: import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec from matplotlib.mlab import griddata import matplotlib.cm as cm import matplotlib.mlab as mlab import matplotlib.backends.backend_pdf as pdf x0 = np.linspace(np.min(np.minimum(uvw0_trans[(:, 0)], uvw0_trans[(:, 1)])), np.max(np.maximum(uvw0_trans[(:, 0)], uvw0_trans[(:, 1)])), 500) y0 = np.linspace(np.min(np.minimum(uvw0_trans[(:, 0)], uvw0_trans[(:, 1)])), np.max(np.maximum(uvw0_trans[(:, 0)], uvw0_trans[(:, 1)])), 500) x1 = np.linspace(np.min(np.minimum(uvw1_trans[(:, 0)], uvw1_trans[(:, 1)])), np.max(np.maximum(uvw1_trans[(:, 0)], uvw1_trans[(:, 1)])), 500) y1 = np.linspace(np.min(np.minimum(uvw1_trans[(:, 0)], uvw1_trans[(:, 1)])), np.max(np.maximum(uvw1_trans[(:, 0)], uvw1_trans[(:, 1)])), 500) x2 = np.linspace(np.min(np.minimum(uvw2_trans[(:, 0)], uvw2_trans[(:, 1)])), np.max(np.maximum(uvw2_trans[(:, 0)], uvw2_trans[(:, 1)])), 500) y2 = np.linspace(np.min(np.minimum(uvw2_trans[(:, 0)], uvw2_trans[(:, 1)])), np.max(np.maximum(uvw2_trans[(:, 0)], uvw2_trans[(:, 1)])), 500) z0 = griddata(uvw0_trans[(:, 0)], uvw0_trans[(:, 1)], uvw0_trans[(:, 2)], x0, y0, interp='linear') z1 = griddata(uvw1_trans[(:, 0)], uvw1_trans[(:, 1)], uvw1_trans[(:, 2)], x1, y1, interp='linear') z2 = griddata(uvw2_trans[(:, 0)], uvw2_trans[(:, 1)], uvw2_trans[(:, 2)], x2, y2, interp='linear') fig1 = plt.figure(1, figsize=(6, 13)) sub1 = fig1.add_subplot(311, aspect=1) fillings = sub1.contourf(x0, y0, z0, 20, cmap=plt.cm.jet) scat = sub1.scatter(uvw0_trans[(:, 0)], uvw0_trans[(:, 1)], marker='o', c='b', s=0.2, zorder=10) cbar = fig1.colorbar(fillings, orientation='vertical') xlimits = sub1.get_xlim() sub1.plot(np.array([xlimits[0], xlimits[1]]), np.array([0, 0]), c='k') ylimits = sub1.get_ylim() sub1.plot(np.array([0, 0]), np.array([ylimits[0], ylimits[1]]), c='k') sub1.set_title('Original wind components\nwith point data') cbar.set_label('w [m/s]') plt.ylabel('v [m/s]') sub2 = fig1.add_subplot(312, aspect=1) fillings = sub2.contourf(x1, y1, z1, 20, cmap=plt.cm.jet) cbar = fig1.colorbar(fillings, orientation='vertical') xlimits = sub2.get_xlim() sub2.plot(np.array([xlimits[0], xlimits[1]]), np.array([0, 0]), c='k') ylimits = sub2.get_ylim() sub2.plot(np.array([0, 0]), np.array([ylimits[0], ylimits[1]]), c='k') sub2.set_title('One plane') cbar.set_label('w [m/s]') plt.ylabel('v [m/s]') sub3 = fig1.add_subplot(313, aspect=1) fillings = sub3.contourf(x2, y2, z2, 20, cmap=plt.cm.jet) cbar = fig1.colorbar(fillings, orientation='vertical') xlimits = sub3.get_xlim() sub3.plot(np.array([xlimits[0], xlimits[1]]), np.array([0, 0]), c='k') ylimits = sub3.get_ylim() sub3.plot(np.array([0, 0]), np.array([ylimits[0], ylimits[1]]), c='k') sub3.set_title('Sectorial') cbar.set_label('w [m/s]') plt.xlabel('u [m/s]') plt.ylabel('v [m/s]') mi = np.min(np.array([np.min(uvw0_trans[(:, 2)]), np.min(uvw1_trans[(:, 2)]), np.min(uvw2_trans[(:, 2)])])) ma = np.max(np.array([np.max(uvw0_trans[(:, 2)]), np.max(uvw1_trans[(:, 2)]), np.max(uvw2_trans[(:, 2)])])) steps = np.abs(((ma - mi) / histsteps)) bins = np.arange(mi, (ma + steps), steps) fig2 = plt.figure(2, figsize=(6, 13)) fig2.subplots_adjust(hspace=0.3) sub4 = fig2.add_subplot(311) (n0, bins0, patches0) = sub4.hist(uvw0_trans[(:, 2)], bins, color='b', histtype='bar') ylimits = sub4.get_ylim() sub4.plot(np.array([0, 0]), np.array([ylimits[0], ylimits[1]]), c='y', lw=3) plt.ylabel('count') plt.title(('Original w-component:\navg(w)= %.2f, var(w)= %.4f, skew(w)= %.4f' % (np.mean(uvw0_trans[(:, 2)]), np.var(uvw0_trans[(:, 2)]), skew(uvw0_trans[(:, 2)])))) sub5 = fig2.add_subplot(312) (n1, bins1, patches1) = sub5.hist(uvw1_trans[(:, 2)], bins, color='g', histtype='bar') ylimits = sub5.get_ylim() sub5.plot(np.array([0, 0]), np.array([ylimits[0], ylimits[1]]), c='y', lw=3) plt.ylabel('count') plt.title(('One plane w-component:\navg(w)= %.2f, var(w)= %.4f, skew(w)= %.4f' % (np.mean(uvw1_trans[(:, 2)]), np.var(uvw1_trans[(:, 2)]), skew(uvw1_trans[(:, 2)])))) sub6 = fig2.add_subplot(313) (n2, bins2, patches2) = sub6.hist(uvw2_trans[(:, 2)], bins, color='r', histtype='bar') ylimits = sub6.get_ylim() sub6.plot(np.array([0, 0]), np.array([ylimits[0], ylimits[1]]), c='y', lw=3) plt.xlabel('Classes [m/s]') plt.ylabel('count') plt.title(('Sectorial w-component:\navg(w)= %.2f, var(w)= %.4f, skew(w)= %.4f' % (np.mean(uvw2_trans[(:, 2)]), np.var(uvw2_trans[(:, 2)]), skew(uvw2_trans[(:, 2)])))) fig3 = plt.figure(3, figsize=(6, 6)) pol = fig3.add_subplot(111, polar=True) (hist, bin_edges) = np.histogram(wdhor, bins=36, range=(0, 360)) x = (90 - np.arange(5, 365, 10)) x = [((i * pi) / 180.0) for i in x] pol.bar(x, hist, width=((10 * pi) / 180)) pol.set_xticklabels(['$\\sf{90\\degree}$', '$\\sf{45\\degree}$', '$\\sf{0\\degree}$', '$\\sf{315\\degree}$', '$\\sf{270\\degree}$', '$\\sf{225\\degree}$', '$\\sf{180\\degree}$', '$\\sf{135\\degree}$'], fontsize=15) plt.title('Horizontal wind direction frequency') plt.show() print('Satisfied with the fit?\ny will save the figures, n will exit without saving!') ui2 = raw_input('(y/n)?: ').lower() if (ui2 != 'y'): sys.exit() if plot: pp1 = pdf.PdfPages(('%s/%s_pfit.pdf' % (indirpf, outfile[:(- 4)]))) pp2 = pdf.PdfPages(('%s/%s_wdis.pdf' % (indirpf, outfile[:(- 4)]))) pp3 = pdf.PdfPages(('%s/%s_wd.pdf' % (indirpf, outfile[:(- 4)]))) fig1.savefig(pp1, format='pdf') fig2.savefig(pp2, format='pdf') fig3.savefig(pp3, format='pdf') pp1.close() pp2.close() pp3.close() print('Rename EddyPFit files?') ui3 = raw_input('(y/n)?: ').lower() if (ui3 != 'y'): sys.exit() os.rename(('%s/%s' % (indirpf, pf0file)), ('%s/%s_%s.csv' % (indirpf, outfile[:(- 4)], pf0file[:(- 4)]))) os.rename(('%s/%s' % (indirpf, pf1file)), ('%s/%s_%s.csv' % (indirpf, outfile[:(- 4)], pf1file[:(- 4)]))) os.rename(('%s/%s' % (indirpf, pf2file)), ('%s/%s_%s.csv' % (indirpf, outfile[:(- 4)], pf2file[:(- 4)]))) os.rename(('%s/%s' % (indirpf, pfmat)), ('%s/%s_%s.csv' % (indirpf, outfile[:(- 4)], pfmat[:(- 4)])))
Extracts raw wind speeds from the raw flux file of EddyFlux as input for EDDYPFit. When EDDYFit is finished, the script loads the results and do plots. If user is satisfied, results are saved. Definition ---------- planarfit(indirpf, rawfile, outfile, pfmat='pfitmatrix.csv', pf0file='pfitdata0.csv', pf1file='pfitdata1.csv', pf2file='pfitdata2.csv', histsteps=50): Input ----- indirpf str, path of the folder where results will be saved rawfile str, path of the file with raw wind speeds from EddyFlux outfile str, name of the output file Optional Input -------------- pfmat str, name of the pfitmatix file, default: 'pfitmatrix.csv' pf0file str, name of the original wind speed file of EDDYPFit, default: 'pfitdata0.csv' pf1file str, name of the one plane fit wind speed file of EDDYPFit, default: 'pfitdata1.csv' pf2file str, name of the sectorial fit wind speed file of EDDYPFit, default: 'pfitdata2.csv' histstep int, histogram steps for plotting (default=50) Output ------ X_pfit.pdf plot with planar fit X_uvw.csv file with raw wind speeds X_wd.pdf plot with wind rose X_wdis.pdf plot with wind speed distributions License ------- This file is part of the JAMS Python package, distributed under the MIT License. The JAMS Python package originates from the former UFZ Python library, Department of Computational Hydrosystems, Helmholtz Centre for Environmental Research - UFZ, Leipzig, Germany. Copyright (c) 2014 Arndt Piayda Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. History ------- Written, AP, Aug 2014
jams/eddybox/planarfit.py
planarfit
MuellerSeb/jams_python
9
python
def planarfit(indirpf, rawfile, outfile, pfmat='pfitmatrix.csv', pf0file='pfitdata0.csv', pf1file='pfitdata1.csv', pf2file='pfitdata2.csv', histsteps=50, plot=False): '\n Extracts raw wind speeds from the raw flux file of EddyFlux as input for\n EDDYPFit. When EDDYFit is finished, the script loads the results and\n do plots. If user is satisfied, results are saved.\n\n\n Definition\n ----------\n planarfit(indirpf, rawfile, outfile, pfmat=\'pfitmatrix.csv\',\n pf0file=\'pfitdata0.csv\', pf1file=\'pfitdata1.csv\',\n pf2file=\'pfitdata2.csv\', histsteps=50):\n\n Input\n -----\n indirpf str, path of the folder where results will be saved\n rawfile str, path of the file with raw wind speeds from EddyFlux\n outfile str, name of the output file\n\n\n Optional Input\n --------------\n pfmat str, name of the pfitmatix file, default: \'pfitmatrix.csv\'\n pf0file str, name of the original wind speed file of EDDYPFit, default: \'pfitdata0.csv\'\n pf1file str, name of the one plane fit wind speed file of EDDYPFit, default: \'pfitdata1.csv\'\n pf2file str, name of the sectorial fit wind speed file of EDDYPFit, default: \'pfitdata2.csv\'\n histstep int, histogram steps for plotting (default=50)\n\n\n Output\n ------\n X_pfit.pdf plot with planar fit\n X_uvw.csv file with raw wind speeds\n X_wd.pdf plot with wind rose\n X_wdis.pdf plot with wind speed distributions\n\n\n License\n -------\n This file is part of the JAMS Python package, distributed under the MIT\n License. The JAMS Python package originates from the former UFZ Python library,\n Department of Computational Hydrosystems, Helmholtz Centre for Environmental\n Research - UFZ, Leipzig, Germany.\n\n Copyright (c) 2014 Arndt Piayda\n\n Permission is hereby granted, free of charge, to any person obtaining a copy\n of this software and associated documentation files (the "Software"), to deal\n in the Software without restriction, including without limitation the rights\n to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n copies of the Software, and to permit persons to whom the Software is\n furnished to do so, subject to the following conditions:\n\n The above copyright notice and this permission notice shall be included in all\n copies or substantial portions of the Software.\n\n THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n SOFTWARE.\n\n\n History\n -------\n Written, AP, Aug 2014\n ' uvw = np.array(fread(('%s' % rawfile), skip=1, cskip=13, nc=3)) wdhor = np.array(fread(('%s' % rawfile), skip=1, cskip=20, nc=1)) header = np.array(sread(('%s' % rawfile), cskip=13, nc=3), dtype='|S5') alpha = ((120.0 * np.pi) / 180.0) uvw_trans = np.copy(uvw) uvw_trans[(:, 0)] = np.where(((uvw[(:, 0)] != (- 9999)) & (uvw[(:, 1)] != (- 9999))), ((uvw[(:, 0)] * np.cos(alpha)) + (uvw[(:, 1)] * np.sin(alpha))), (- 9999)) uvw_trans[(:, 1)] = np.where(((uvw[(:, 0)] != (- 9999)) & (uvw[(:, 1)] != (- 9999))), (((- uvw[(:, 0)]) * np.sin(alpha)) + (uvw[(:, 1)] * np.cos(alpha))), (- 9999)) file1 = open(('%s/%s_uvw.csv' % (indirpf, outfile[:(- 4)])), 'w') output = csv.writer(file1) output.writerow(header[0]) for i in xrange(np.shape(uvw_trans)[0]): output.writerow(uvw_trans[i]) file1.close() print("Do EddyPFit with the 'uvw.csv' file now!") ui1 = raw_input('Ready or quit (y/n)?: ').lower() if (ui1 != 'y'): sys.exit() header0 = np.array(sread(('%s/%s' % (indirpf, pf0file))), dtype='|S1') uvw0 = np.array(fread(('%s/%s' % (indirpf, pf0file)), skip=1), dtype=np.float) uvw0_trans = np.copy(uvw0) uvw0_trans[(:, 0)] = ((uvw0[(:, 0)] * np.cos(alpha)) - (uvw0[(:, 1)] * np.sin(alpha))) uvw0_trans[(:, 1)] = ((uvw0[(:, 0)] * np.sin(alpha)) + (uvw0[(:, 1)] * np.cos(alpha))) header1 = np.array(sread(('%s/%s' % (indirpf, pf1file))), dtype='|S1') uvw1 = np.array(fread(('%s/%s' % (indirpf, pf1file)), skip=1), dtype=np.float) uvw1_trans = np.copy(uvw1) uvw1_trans[(:, 0)] = ((uvw1[(:, 0)] * np.cos(alpha)) - (uvw1[(:, 1)] * np.sin(alpha))) uvw1_trans[(:, 1)] = ((uvw1[(:, 0)] * np.sin(alpha)) + (uvw1[(:, 1)] * np.cos(alpha))) header2 = np.array(sread(('%s/%s' % (indirpf, pf2file))), dtype='|S1') uvw2 = np.array(fread(('%s/%s' % (indirpf, pf2file)), skip=1), dtype=np.float) uvw2_trans = np.copy(uvw2) uvw2_trans[(:, 0)] = ((uvw2[(:, 0)] * np.cos(alpha)) - (uvw2[(:, 1)] * np.sin(alpha))) uvw2_trans[(:, 1)] = ((uvw2[(:, 0)] * np.sin(alpha)) + (uvw2[(:, 1)] * np.cos(alpha))) if plot: import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec from matplotlib.mlab import griddata import matplotlib.cm as cm import matplotlib.mlab as mlab import matplotlib.backends.backend_pdf as pdf x0 = np.linspace(np.min(np.minimum(uvw0_trans[(:, 0)], uvw0_trans[(:, 1)])), np.max(np.maximum(uvw0_trans[(:, 0)], uvw0_trans[(:, 1)])), 500) y0 = np.linspace(np.min(np.minimum(uvw0_trans[(:, 0)], uvw0_trans[(:, 1)])), np.max(np.maximum(uvw0_trans[(:, 0)], uvw0_trans[(:, 1)])), 500) x1 = np.linspace(np.min(np.minimum(uvw1_trans[(:, 0)], uvw1_trans[(:, 1)])), np.max(np.maximum(uvw1_trans[(:, 0)], uvw1_trans[(:, 1)])), 500) y1 = np.linspace(np.min(np.minimum(uvw1_trans[(:, 0)], uvw1_trans[(:, 1)])), np.max(np.maximum(uvw1_trans[(:, 0)], uvw1_trans[(:, 1)])), 500) x2 = np.linspace(np.min(np.minimum(uvw2_trans[(:, 0)], uvw2_trans[(:, 1)])), np.max(np.maximum(uvw2_trans[(:, 0)], uvw2_trans[(:, 1)])), 500) y2 = np.linspace(np.min(np.minimum(uvw2_trans[(:, 0)], uvw2_trans[(:, 1)])), np.max(np.maximum(uvw2_trans[(:, 0)], uvw2_trans[(:, 1)])), 500) z0 = griddata(uvw0_trans[(:, 0)], uvw0_trans[(:, 1)], uvw0_trans[(:, 2)], x0, y0, interp='linear') z1 = griddata(uvw1_trans[(:, 0)], uvw1_trans[(:, 1)], uvw1_trans[(:, 2)], x1, y1, interp='linear') z2 = griddata(uvw2_trans[(:, 0)], uvw2_trans[(:, 1)], uvw2_trans[(:, 2)], x2, y2, interp='linear') fig1 = plt.figure(1, figsize=(6, 13)) sub1 = fig1.add_subplot(311, aspect=1) fillings = sub1.contourf(x0, y0, z0, 20, cmap=plt.cm.jet) scat = sub1.scatter(uvw0_trans[(:, 0)], uvw0_trans[(:, 1)], marker='o', c='b', s=0.2, zorder=10) cbar = fig1.colorbar(fillings, orientation='vertical') xlimits = sub1.get_xlim() sub1.plot(np.array([xlimits[0], xlimits[1]]), np.array([0, 0]), c='k') ylimits = sub1.get_ylim() sub1.plot(np.array([0, 0]), np.array([ylimits[0], ylimits[1]]), c='k') sub1.set_title('Original wind components\nwith point data') cbar.set_label('w [m/s]') plt.ylabel('v [m/s]') sub2 = fig1.add_subplot(312, aspect=1) fillings = sub2.contourf(x1, y1, z1, 20, cmap=plt.cm.jet) cbar = fig1.colorbar(fillings, orientation='vertical') xlimits = sub2.get_xlim() sub2.plot(np.array([xlimits[0], xlimits[1]]), np.array([0, 0]), c='k') ylimits = sub2.get_ylim() sub2.plot(np.array([0, 0]), np.array([ylimits[0], ylimits[1]]), c='k') sub2.set_title('One plane') cbar.set_label('w [m/s]') plt.ylabel('v [m/s]') sub3 = fig1.add_subplot(313, aspect=1) fillings = sub3.contourf(x2, y2, z2, 20, cmap=plt.cm.jet) cbar = fig1.colorbar(fillings, orientation='vertical') xlimits = sub3.get_xlim() sub3.plot(np.array([xlimits[0], xlimits[1]]), np.array([0, 0]), c='k') ylimits = sub3.get_ylim() sub3.plot(np.array([0, 0]), np.array([ylimits[0], ylimits[1]]), c='k') sub3.set_title('Sectorial') cbar.set_label('w [m/s]') plt.xlabel('u [m/s]') plt.ylabel('v [m/s]') mi = np.min(np.array([np.min(uvw0_trans[(:, 2)]), np.min(uvw1_trans[(:, 2)]), np.min(uvw2_trans[(:, 2)])])) ma = np.max(np.array([np.max(uvw0_trans[(:, 2)]), np.max(uvw1_trans[(:, 2)]), np.max(uvw2_trans[(:, 2)])])) steps = np.abs(((ma - mi) / histsteps)) bins = np.arange(mi, (ma + steps), steps) fig2 = plt.figure(2, figsize=(6, 13)) fig2.subplots_adjust(hspace=0.3) sub4 = fig2.add_subplot(311) (n0, bins0, patches0) = sub4.hist(uvw0_trans[(:, 2)], bins, color='b', histtype='bar') ylimits = sub4.get_ylim() sub4.plot(np.array([0, 0]), np.array([ylimits[0], ylimits[1]]), c='y', lw=3) plt.ylabel('count') plt.title(('Original w-component:\navg(w)= %.2f, var(w)= %.4f, skew(w)= %.4f' % (np.mean(uvw0_trans[(:, 2)]), np.var(uvw0_trans[(:, 2)]), skew(uvw0_trans[(:, 2)])))) sub5 = fig2.add_subplot(312) (n1, bins1, patches1) = sub5.hist(uvw1_trans[(:, 2)], bins, color='g', histtype='bar') ylimits = sub5.get_ylim() sub5.plot(np.array([0, 0]), np.array([ylimits[0], ylimits[1]]), c='y', lw=3) plt.ylabel('count') plt.title(('One plane w-component:\navg(w)= %.2f, var(w)= %.4f, skew(w)= %.4f' % (np.mean(uvw1_trans[(:, 2)]), np.var(uvw1_trans[(:, 2)]), skew(uvw1_trans[(:, 2)])))) sub6 = fig2.add_subplot(313) (n2, bins2, patches2) = sub6.hist(uvw2_trans[(:, 2)], bins, color='r', histtype='bar') ylimits = sub6.get_ylim() sub6.plot(np.array([0, 0]), np.array([ylimits[0], ylimits[1]]), c='y', lw=3) plt.xlabel('Classes [m/s]') plt.ylabel('count') plt.title(('Sectorial w-component:\navg(w)= %.2f, var(w)= %.4f, skew(w)= %.4f' % (np.mean(uvw2_trans[(:, 2)]), np.var(uvw2_trans[(:, 2)]), skew(uvw2_trans[(:, 2)])))) fig3 = plt.figure(3, figsize=(6, 6)) pol = fig3.add_subplot(111, polar=True) (hist, bin_edges) = np.histogram(wdhor, bins=36, range=(0, 360)) x = (90 - np.arange(5, 365, 10)) x = [((i * pi) / 180.0) for i in x] pol.bar(x, hist, width=((10 * pi) / 180)) pol.set_xticklabels(['$\\sf{90\\degree}$', '$\\sf{45\\degree}$', '$\\sf{0\\degree}$', '$\\sf{315\\degree}$', '$\\sf{270\\degree}$', '$\\sf{225\\degree}$', '$\\sf{180\\degree}$', '$\\sf{135\\degree}$'], fontsize=15) plt.title('Horizontal wind direction frequency') plt.show() print('Satisfied with the fit?\ny will save the figures, n will exit without saving!') ui2 = raw_input('(y/n)?: ').lower() if (ui2 != 'y'): sys.exit() if plot: pp1 = pdf.PdfPages(('%s/%s_pfit.pdf' % (indirpf, outfile[:(- 4)]))) pp2 = pdf.PdfPages(('%s/%s_wdis.pdf' % (indirpf, outfile[:(- 4)]))) pp3 = pdf.PdfPages(('%s/%s_wd.pdf' % (indirpf, outfile[:(- 4)]))) fig1.savefig(pp1, format='pdf') fig2.savefig(pp2, format='pdf') fig3.savefig(pp3, format='pdf') pp1.close() pp2.close() pp3.close() print('Rename EddyPFit files?') ui3 = raw_input('(y/n)?: ').lower() if (ui3 != 'y'): sys.exit() os.rename(('%s/%s' % (indirpf, pf0file)), ('%s/%s_%s.csv' % (indirpf, outfile[:(- 4)], pf0file[:(- 4)]))) os.rename(('%s/%s' % (indirpf, pf1file)), ('%s/%s_%s.csv' % (indirpf, outfile[:(- 4)], pf1file[:(- 4)]))) os.rename(('%s/%s' % (indirpf, pf2file)), ('%s/%s_%s.csv' % (indirpf, outfile[:(- 4)], pf2file[:(- 4)]))) os.rename(('%s/%s' % (indirpf, pfmat)), ('%s/%s_%s.csv' % (indirpf, outfile[:(- 4)], pfmat[:(- 4)])))
def planarfit(indirpf, rawfile, outfile, pfmat='pfitmatrix.csv', pf0file='pfitdata0.csv', pf1file='pfitdata1.csv', pf2file='pfitdata2.csv', histsteps=50, plot=False): '\n Extracts raw wind speeds from the raw flux file of EddyFlux as input for\n EDDYPFit. When EDDYFit is finished, the script loads the results and\n do plots. If user is satisfied, results are saved.\n\n\n Definition\n ----------\n planarfit(indirpf, rawfile, outfile, pfmat=\'pfitmatrix.csv\',\n pf0file=\'pfitdata0.csv\', pf1file=\'pfitdata1.csv\',\n pf2file=\'pfitdata2.csv\', histsteps=50):\n\n Input\n -----\n indirpf str, path of the folder where results will be saved\n rawfile str, path of the file with raw wind speeds from EddyFlux\n outfile str, name of the output file\n\n\n Optional Input\n --------------\n pfmat str, name of the pfitmatix file, default: \'pfitmatrix.csv\'\n pf0file str, name of the original wind speed file of EDDYPFit, default: \'pfitdata0.csv\'\n pf1file str, name of the one plane fit wind speed file of EDDYPFit, default: \'pfitdata1.csv\'\n pf2file str, name of the sectorial fit wind speed file of EDDYPFit, default: \'pfitdata2.csv\'\n histstep int, histogram steps for plotting (default=50)\n\n\n Output\n ------\n X_pfit.pdf plot with planar fit\n X_uvw.csv file with raw wind speeds\n X_wd.pdf plot with wind rose\n X_wdis.pdf plot with wind speed distributions\n\n\n License\n -------\n This file is part of the JAMS Python package, distributed under the MIT\n License. The JAMS Python package originates from the former UFZ Python library,\n Department of Computational Hydrosystems, Helmholtz Centre for Environmental\n Research - UFZ, Leipzig, Germany.\n\n Copyright (c) 2014 Arndt Piayda\n\n Permission is hereby granted, free of charge, to any person obtaining a copy\n of this software and associated documentation files (the "Software"), to deal\n in the Software without restriction, including without limitation the rights\n to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n copies of the Software, and to permit persons to whom the Software is\n furnished to do so, subject to the following conditions:\n\n The above copyright notice and this permission notice shall be included in all\n copies or substantial portions of the Software.\n\n THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n SOFTWARE.\n\n\n History\n -------\n Written, AP, Aug 2014\n ' uvw = np.array(fread(('%s' % rawfile), skip=1, cskip=13, nc=3)) wdhor = np.array(fread(('%s' % rawfile), skip=1, cskip=20, nc=1)) header = np.array(sread(('%s' % rawfile), cskip=13, nc=3), dtype='|S5') alpha = ((120.0 * np.pi) / 180.0) uvw_trans = np.copy(uvw) uvw_trans[(:, 0)] = np.where(((uvw[(:, 0)] != (- 9999)) & (uvw[(:, 1)] != (- 9999))), ((uvw[(:, 0)] * np.cos(alpha)) + (uvw[(:, 1)] * np.sin(alpha))), (- 9999)) uvw_trans[(:, 1)] = np.where(((uvw[(:, 0)] != (- 9999)) & (uvw[(:, 1)] != (- 9999))), (((- uvw[(:, 0)]) * np.sin(alpha)) + (uvw[(:, 1)] * np.cos(alpha))), (- 9999)) file1 = open(('%s/%s_uvw.csv' % (indirpf, outfile[:(- 4)])), 'w') output = csv.writer(file1) output.writerow(header[0]) for i in xrange(np.shape(uvw_trans)[0]): output.writerow(uvw_trans[i]) file1.close() print("Do EddyPFit with the 'uvw.csv' file now!") ui1 = raw_input('Ready or quit (y/n)?: ').lower() if (ui1 != 'y'): sys.exit() header0 = np.array(sread(('%s/%s' % (indirpf, pf0file))), dtype='|S1') uvw0 = np.array(fread(('%s/%s' % (indirpf, pf0file)), skip=1), dtype=np.float) uvw0_trans = np.copy(uvw0) uvw0_trans[(:, 0)] = ((uvw0[(:, 0)] * np.cos(alpha)) - (uvw0[(:, 1)] * np.sin(alpha))) uvw0_trans[(:, 1)] = ((uvw0[(:, 0)] * np.sin(alpha)) + (uvw0[(:, 1)] * np.cos(alpha))) header1 = np.array(sread(('%s/%s' % (indirpf, pf1file))), dtype='|S1') uvw1 = np.array(fread(('%s/%s' % (indirpf, pf1file)), skip=1), dtype=np.float) uvw1_trans = np.copy(uvw1) uvw1_trans[(:, 0)] = ((uvw1[(:, 0)] * np.cos(alpha)) - (uvw1[(:, 1)] * np.sin(alpha))) uvw1_trans[(:, 1)] = ((uvw1[(:, 0)] * np.sin(alpha)) + (uvw1[(:, 1)] * np.cos(alpha))) header2 = np.array(sread(('%s/%s' % (indirpf, pf2file))), dtype='|S1') uvw2 = np.array(fread(('%s/%s' % (indirpf, pf2file)), skip=1), dtype=np.float) uvw2_trans = np.copy(uvw2) uvw2_trans[(:, 0)] = ((uvw2[(:, 0)] * np.cos(alpha)) - (uvw2[(:, 1)] * np.sin(alpha))) uvw2_trans[(:, 1)] = ((uvw2[(:, 0)] * np.sin(alpha)) + (uvw2[(:, 1)] * np.cos(alpha))) if plot: import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec from matplotlib.mlab import griddata import matplotlib.cm as cm import matplotlib.mlab as mlab import matplotlib.backends.backend_pdf as pdf x0 = np.linspace(np.min(np.minimum(uvw0_trans[(:, 0)], uvw0_trans[(:, 1)])), np.max(np.maximum(uvw0_trans[(:, 0)], uvw0_trans[(:, 1)])), 500) y0 = np.linspace(np.min(np.minimum(uvw0_trans[(:, 0)], uvw0_trans[(:, 1)])), np.max(np.maximum(uvw0_trans[(:, 0)], uvw0_trans[(:, 1)])), 500) x1 = np.linspace(np.min(np.minimum(uvw1_trans[(:, 0)], uvw1_trans[(:, 1)])), np.max(np.maximum(uvw1_trans[(:, 0)], uvw1_trans[(:, 1)])), 500) y1 = np.linspace(np.min(np.minimum(uvw1_trans[(:, 0)], uvw1_trans[(:, 1)])), np.max(np.maximum(uvw1_trans[(:, 0)], uvw1_trans[(:, 1)])), 500) x2 = np.linspace(np.min(np.minimum(uvw2_trans[(:, 0)], uvw2_trans[(:, 1)])), np.max(np.maximum(uvw2_trans[(:, 0)], uvw2_trans[(:, 1)])), 500) y2 = np.linspace(np.min(np.minimum(uvw2_trans[(:, 0)], uvw2_trans[(:, 1)])), np.max(np.maximum(uvw2_trans[(:, 0)], uvw2_trans[(:, 1)])), 500) z0 = griddata(uvw0_trans[(:, 0)], uvw0_trans[(:, 1)], uvw0_trans[(:, 2)], x0, y0, interp='linear') z1 = griddata(uvw1_trans[(:, 0)], uvw1_trans[(:, 1)], uvw1_trans[(:, 2)], x1, y1, interp='linear') z2 = griddata(uvw2_trans[(:, 0)], uvw2_trans[(:, 1)], uvw2_trans[(:, 2)], x2, y2, interp='linear') fig1 = plt.figure(1, figsize=(6, 13)) sub1 = fig1.add_subplot(311, aspect=1) fillings = sub1.contourf(x0, y0, z0, 20, cmap=plt.cm.jet) scat = sub1.scatter(uvw0_trans[(:, 0)], uvw0_trans[(:, 1)], marker='o', c='b', s=0.2, zorder=10) cbar = fig1.colorbar(fillings, orientation='vertical') xlimits = sub1.get_xlim() sub1.plot(np.array([xlimits[0], xlimits[1]]), np.array([0, 0]), c='k') ylimits = sub1.get_ylim() sub1.plot(np.array([0, 0]), np.array([ylimits[0], ylimits[1]]), c='k') sub1.set_title('Original wind components\nwith point data') cbar.set_label('w [m/s]') plt.ylabel('v [m/s]') sub2 = fig1.add_subplot(312, aspect=1) fillings = sub2.contourf(x1, y1, z1, 20, cmap=plt.cm.jet) cbar = fig1.colorbar(fillings, orientation='vertical') xlimits = sub2.get_xlim() sub2.plot(np.array([xlimits[0], xlimits[1]]), np.array([0, 0]), c='k') ylimits = sub2.get_ylim() sub2.plot(np.array([0, 0]), np.array([ylimits[0], ylimits[1]]), c='k') sub2.set_title('One plane') cbar.set_label('w [m/s]') plt.ylabel('v [m/s]') sub3 = fig1.add_subplot(313, aspect=1) fillings = sub3.contourf(x2, y2, z2, 20, cmap=plt.cm.jet) cbar = fig1.colorbar(fillings, orientation='vertical') xlimits = sub3.get_xlim() sub3.plot(np.array([xlimits[0], xlimits[1]]), np.array([0, 0]), c='k') ylimits = sub3.get_ylim() sub3.plot(np.array([0, 0]), np.array([ylimits[0], ylimits[1]]), c='k') sub3.set_title('Sectorial') cbar.set_label('w [m/s]') plt.xlabel('u [m/s]') plt.ylabel('v [m/s]') mi = np.min(np.array([np.min(uvw0_trans[(:, 2)]), np.min(uvw1_trans[(:, 2)]), np.min(uvw2_trans[(:, 2)])])) ma = np.max(np.array([np.max(uvw0_trans[(:, 2)]), np.max(uvw1_trans[(:, 2)]), np.max(uvw2_trans[(:, 2)])])) steps = np.abs(((ma - mi) / histsteps)) bins = np.arange(mi, (ma + steps), steps) fig2 = plt.figure(2, figsize=(6, 13)) fig2.subplots_adjust(hspace=0.3) sub4 = fig2.add_subplot(311) (n0, bins0, patches0) = sub4.hist(uvw0_trans[(:, 2)], bins, color='b', histtype='bar') ylimits = sub4.get_ylim() sub4.plot(np.array([0, 0]), np.array([ylimits[0], ylimits[1]]), c='y', lw=3) plt.ylabel('count') plt.title(('Original w-component:\navg(w)= %.2f, var(w)= %.4f, skew(w)= %.4f' % (np.mean(uvw0_trans[(:, 2)]), np.var(uvw0_trans[(:, 2)]), skew(uvw0_trans[(:, 2)])))) sub5 = fig2.add_subplot(312) (n1, bins1, patches1) = sub5.hist(uvw1_trans[(:, 2)], bins, color='g', histtype='bar') ylimits = sub5.get_ylim() sub5.plot(np.array([0, 0]), np.array([ylimits[0], ylimits[1]]), c='y', lw=3) plt.ylabel('count') plt.title(('One plane w-component:\navg(w)= %.2f, var(w)= %.4f, skew(w)= %.4f' % (np.mean(uvw1_trans[(:, 2)]), np.var(uvw1_trans[(:, 2)]), skew(uvw1_trans[(:, 2)])))) sub6 = fig2.add_subplot(313) (n2, bins2, patches2) = sub6.hist(uvw2_trans[(:, 2)], bins, color='r', histtype='bar') ylimits = sub6.get_ylim() sub6.plot(np.array([0, 0]), np.array([ylimits[0], ylimits[1]]), c='y', lw=3) plt.xlabel('Classes [m/s]') plt.ylabel('count') plt.title(('Sectorial w-component:\navg(w)= %.2f, var(w)= %.4f, skew(w)= %.4f' % (np.mean(uvw2_trans[(:, 2)]), np.var(uvw2_trans[(:, 2)]), skew(uvw2_trans[(:, 2)])))) fig3 = plt.figure(3, figsize=(6, 6)) pol = fig3.add_subplot(111, polar=True) (hist, bin_edges) = np.histogram(wdhor, bins=36, range=(0, 360)) x = (90 - np.arange(5, 365, 10)) x = [((i * pi) / 180.0) for i in x] pol.bar(x, hist, width=((10 * pi) / 180)) pol.set_xticklabels(['$\\sf{90\\degree}$', '$\\sf{45\\degree}$', '$\\sf{0\\degree}$', '$\\sf{315\\degree}$', '$\\sf{270\\degree}$', '$\\sf{225\\degree}$', '$\\sf{180\\degree}$', '$\\sf{135\\degree}$'], fontsize=15) plt.title('Horizontal wind direction frequency') plt.show() print('Satisfied with the fit?\ny will save the figures, n will exit without saving!') ui2 = raw_input('(y/n)?: ').lower() if (ui2 != 'y'): sys.exit() if plot: pp1 = pdf.PdfPages(('%s/%s_pfit.pdf' % (indirpf, outfile[:(- 4)]))) pp2 = pdf.PdfPages(('%s/%s_wdis.pdf' % (indirpf, outfile[:(- 4)]))) pp3 = pdf.PdfPages(('%s/%s_wd.pdf' % (indirpf, outfile[:(- 4)]))) fig1.savefig(pp1, format='pdf') fig2.savefig(pp2, format='pdf') fig3.savefig(pp3, format='pdf') pp1.close() pp2.close() pp3.close() print('Rename EddyPFit files?') ui3 = raw_input('(y/n)?: ').lower() if (ui3 != 'y'): sys.exit() os.rename(('%s/%s' % (indirpf, pf0file)), ('%s/%s_%s.csv' % (indirpf, outfile[:(- 4)], pf0file[:(- 4)]))) os.rename(('%s/%s' % (indirpf, pf1file)), ('%s/%s_%s.csv' % (indirpf, outfile[:(- 4)], pf1file[:(- 4)]))) os.rename(('%s/%s' % (indirpf, pf2file)), ('%s/%s_%s.csv' % (indirpf, outfile[:(- 4)], pf2file[:(- 4)]))) os.rename(('%s/%s' % (indirpf, pfmat)), ('%s/%s_%s.csv' % (indirpf, outfile[:(- 4)], pfmat[:(- 4)])))<|docstring|>Extracts raw wind speeds from the raw flux file of EddyFlux as input for EDDYPFit. When EDDYFit is finished, the script loads the results and do plots. If user is satisfied, results are saved. Definition ---------- planarfit(indirpf, rawfile, outfile, pfmat='pfitmatrix.csv', pf0file='pfitdata0.csv', pf1file='pfitdata1.csv', pf2file='pfitdata2.csv', histsteps=50): Input ----- indirpf str, path of the folder where results will be saved rawfile str, path of the file with raw wind speeds from EddyFlux outfile str, name of the output file Optional Input -------------- pfmat str, name of the pfitmatix file, default: 'pfitmatrix.csv' pf0file str, name of the original wind speed file of EDDYPFit, default: 'pfitdata0.csv' pf1file str, name of the one plane fit wind speed file of EDDYPFit, default: 'pfitdata1.csv' pf2file str, name of the sectorial fit wind speed file of EDDYPFit, default: 'pfitdata2.csv' histstep int, histogram steps for plotting (default=50) Output ------ X_pfit.pdf plot with planar fit X_uvw.csv file with raw wind speeds X_wd.pdf plot with wind rose X_wdis.pdf plot with wind speed distributions License ------- This file is part of the JAMS Python package, distributed under the MIT License. The JAMS Python package originates from the former UFZ Python library, Department of Computational Hydrosystems, Helmholtz Centre for Environmental Research - UFZ, Leipzig, Germany. Copyright (c) 2014 Arndt Piayda Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. History ------- Written, AP, Aug 2014<|endoftext|>
31ddd95052100c488bc1a007611988c0050323966b35b75fd94fd8ed8b973241
async def __handle_analytic_task(self, task: object) -> dict: '\n returns payload or None\n ' analytic_unit_id: AnalyticUnitId = task['analyticUnitId'] log.debug('Analytics get task with type: {} for unit: {}'.format(task['type'], analytic_unit_id)) if (task['type'] == 'CANCEL'): if (analytic_unit_id in self.analytic_workers): self.analytic_workers[analytic_unit_id].cancel() return payload = task['payload'] worker = self.__ensure_worker(analytic_unit_id, payload['detector'], payload['analyticUnitType']) data = payload.get('data') if (task['type'] == 'PUSH'): res = (await worker.consume_data(data, payload['cache'])) if res: res.update({'analyticUnitId': analytic_unit_id}) return res elif (task['type'] == 'LEARN'): if ('segments' in payload): segments = payload['segments'] segments = [Segment.from_json(segment) for segment in segments] return (await worker.do_train(segments, data, payload['cache'])) elif ('threshold' in payload): return (await worker.do_train(payload['threshold'], data, payload['cache'])) elif ('anomaly' in payload): return (await worker.do_train(payload['anomaly'], data, payload['cache'])) else: raise ValueError('No segments or threshold in LEARN payload') elif (task['type'] == 'DETECT'): return (await worker.do_detect(data, payload['cache'])) elif (task['type'] == 'PROCESS'): return (await worker.process_data(data, payload['cache'])) raise ValueError(('Unknown task type "%s"' % task['type']))
returns payload or None
analytics/analytics/analytic_unit_manager.py
__handle_analytic_task
jonyrock-back/hastic-server
0
python
async def __handle_analytic_task(self, task: object) -> dict: '\n \n ' analytic_unit_id: AnalyticUnitId = task['analyticUnitId'] log.debug('Analytics get task with type: {} for unit: {}'.format(task['type'], analytic_unit_id)) if (task['type'] == 'CANCEL'): if (analytic_unit_id in self.analytic_workers): self.analytic_workers[analytic_unit_id].cancel() return payload = task['payload'] worker = self.__ensure_worker(analytic_unit_id, payload['detector'], payload['analyticUnitType']) data = payload.get('data') if (task['type'] == 'PUSH'): res = (await worker.consume_data(data, payload['cache'])) if res: res.update({'analyticUnitId': analytic_unit_id}) return res elif (task['type'] == 'LEARN'): if ('segments' in payload): segments = payload['segments'] segments = [Segment.from_json(segment) for segment in segments] return (await worker.do_train(segments, data, payload['cache'])) elif ('threshold' in payload): return (await worker.do_train(payload['threshold'], data, payload['cache'])) elif ('anomaly' in payload): return (await worker.do_train(payload['anomaly'], data, payload['cache'])) else: raise ValueError('No segments or threshold in LEARN payload') elif (task['type'] == 'DETECT'): return (await worker.do_detect(data, payload['cache'])) elif (task['type'] == 'PROCESS'): return (await worker.process_data(data, payload['cache'])) raise ValueError(('Unknown task type "%s"' % task['type']))
async def __handle_analytic_task(self, task: object) -> dict: '\n \n ' analytic_unit_id: AnalyticUnitId = task['analyticUnitId'] log.debug('Analytics get task with type: {} for unit: {}'.format(task['type'], analytic_unit_id)) if (task['type'] == 'CANCEL'): if (analytic_unit_id in self.analytic_workers): self.analytic_workers[analytic_unit_id].cancel() return payload = task['payload'] worker = self.__ensure_worker(analytic_unit_id, payload['detector'], payload['analyticUnitType']) data = payload.get('data') if (task['type'] == 'PUSH'): res = (await worker.consume_data(data, payload['cache'])) if res: res.update({'analyticUnitId': analytic_unit_id}) return res elif (task['type'] == 'LEARN'): if ('segments' in payload): segments = payload['segments'] segments = [Segment.from_json(segment) for segment in segments] return (await worker.do_train(segments, data, payload['cache'])) elif ('threshold' in payload): return (await worker.do_train(payload['threshold'], data, payload['cache'])) elif ('anomaly' in payload): return (await worker.do_train(payload['anomaly'], data, payload['cache'])) else: raise ValueError('No segments or threshold in LEARN payload') elif (task['type'] == 'DETECT'): return (await worker.do_detect(data, payload['cache'])) elif (task['type'] == 'PROCESS'): return (await worker.process_data(data, payload['cache'])) raise ValueError(('Unknown task type "%s"' % task['type']))<|docstring|>returns payload or None<|endoftext|>
4bca361346b9aec50e1811984ab29132f7019f74b012e6849dda0593e2d4c325
@cy_log def map_visual_property(visual_prop, table_column, mapping_type, table_column_values=[], visual_prop_values=[], network=None, base_url=DEFAULT_BASE_URL): 'Create a mapping between an attribute and a visual property.\n\n Generates the appropriate data structure for the "mapping" parameter in ``update_style_mapping()``.\n\n The paired list of values must be of the same length or mapping will fail. For gradient mapping,\n you may include two additional ``visual_prop_values`` in the first and last positions to map respectively\n to values less than and greater than those specified in ``table_column_values``. Mapping will also fail if the\n data type of ``table_column_values`` does not match that of the existing ``table_column``. Note that all imported\n numeric data are stored as Integers or Doubles in Cytosacpe tables; and character or mixed data are\n stored as Strings.\n\n Args:\n visual_prop (str): name of visual property to map\n table_column (str): name of table column to map\n mapping_type (str): continuous, discrete or passthrough (c,d,p)\n table_column_values (list): list of values paired with ``visual_prop_values``; skip for passthrough mapping\n visual_prop_values (list): list of values paired with ``table_column_values``; skip for passthrough mapping\n network (SUID or str or None): Name or SUID of a network. Default is the\n "current" network active in Cytoscape.\n base_url (str): Ignore unless you need to specify a custom domain,\n port or version to connect to the CyREST API. Default is http://localhost:1234\n and the latest version of the CyREST API supported by this version of py4cytoscape.\n\n Returns:\n dict: {\'mappingType\': type of mapping, \'mappingColumn\': column to map, \'mappingColumnType\': column data type, \'visualProperty\': name of property, cargo}\n\n Raises:\n CyError: if network name or SUID doesn\'t exist\n requests.exceptions.RequestException: if can\'t connect to Cytoscape or Cytoscape returns an error\n\n Examples:\n >>> map_visual_property(\'node fill color\', \'gal1RGexp\', \'c\', [-2.426, 0.0, 2.058], [\'#0066CC\', \'#FFFFFF\',\'#FFFF00\'])\n {\'mappingType\': \'continuous\', \'mappingColumn\': \'gal1RGexp\', \'mappingColumnType\': \'Double\', \'visualProperty\': \'NODE_FILL_COLOR\', \'points\': [{\'value\': -2.426, \'lesser\': \'#0066CC\', \'equal\': \'#0066CC\', \'greater\': \'#0066CC\'}, {\'value\': 0.0, \'lesser\': \'#FFFFFF\', \'equal\': \'#FFFFFF\', \'greater\': \'#FFFFFF\'}, {\'value\': 2.058, \'lesser\': \'#FFFF00\', \'equal\': \'#FFFF00\', \'greater\': \'#FFFF00\'}]}\n >>> map_visual_property(\'node shape\', \'degree.layout\', \'d\', [1, 2], [\'ellipse\', \'rectangle\'])\n {\'mappingType\': \'discrete\', \'mappingColumn\': \'degree.layout\', \'mappingColumnType\': \'Integer\', \'visualProperty\': \'NODE_SHAPE\', \'map\': [{\'key\': 1, \'value\': \'ellipse\'}, {\'key\': 2, \'value\': \'rectangle\'}]}\n >>> map_visual_property(\'node label\', \'COMMON\', \'p\')\n {\'mappingType\': \'passthrough\', \'mappingColumn\': \'COMMON\', \'mappingColumnType\': \'String\', \'visualProperty\': \'NODE_LABEL\'}\n\n Note:\n For the return value, ``mapping type`` can be \'continuous\', \'discrete\' or \'passthrough\'. For the\n ``mappingColumn``, the name of the column. For the ``mappingColumnType``, the Cytoscape data type (Double,\n Integer, String, Boolean). For the ``visualProperty``, the canonical name of the visual property. The ``cargo``\n depends on the ``mapping type``. For \'continuous\', it\'s a list of way points as \'points\': [waypoint, waypoint, ...]\n where a waypoint is {\'value\': a Double, \'lesser\': a color, \'equal\': a color, \'greater\': a color}. For \'discrete\',\n it\'s a list of mappings as \'map\': [key-value, key-value, ...] where a key-value is {\'key\': column data value,\n \'value\': value appropriate for ``visualProperty``}.\n\n See Also:\n :meth:`update_style_mapping`, :meth:`get_visual_property_names`\n ' MAPPING_TYPES = {'c': 'continuous', 'd': 'discrete', 'p': 'passthrough'} PROPERTY_NAMES = {'EDGE_COLOR': 'EDGE_UNSELECTED_PAINT', 'EDGE_THICKNESS': 'EDGE_WIDTH', 'NODE_BORDER_COLOR': 'NODE_BORDER_PAINT', 'NODE_BORDER_LINE_TYPE': 'NODE_BORDER_STROKE'} suid = networks.get_network_suid(network, base_url=base_url) mapping_type_name = (MAPPING_TYPES[mapping_type] if (mapping_type in MAPPING_TYPES) else mapping_type) visual_prop_name = re.sub('\\s+', '_', visual_prop).upper() if (visual_prop_name in PROPERTY_NAMES): visual_prop_name = PROPERTY_NAMES[visual_prop_name] if (visual_prop_name not in styles.get_visual_property_names(base_url=base_url)): raise CyError(f'Could not find visual property "{visual_prop_name}". For valid ones, check get_visual_property_names().') tp = visual_prop_name.split('_')[0].lower() table = ('default' + tp) res = commands.cyrest_get((((('networks/' + str(suid)) + '/tables/') + table) + '/columns'), base_url=base_url) table_column_type = None for col in res: if (col['name'] == table_column): table_column_type = col['type'] break if (table_column_type is None): raise CyError(f'Could not find "{table_column}" column in "{table}" table.') visual_prop_map = {'mappingType': mapping_type_name, 'mappingColumn': table_column, 'mappingColumnType': table_column_type, 'visualProperty': visual_prop_name} if (mapping_type_name == 'discrete'): visual_prop_map['map'] = [{'key': col_val, 'value': prop_val} for (col_val, prop_val) in zip(table_column_values, visual_prop_values)] elif (mapping_type_name == 'continuous'): prop_val_count = len(visual_prop_values) col_val_count = len(table_column_values) if ((prop_val_count - col_val_count) == 2): matched_visual_prop_values = visual_prop_values[1:] points = [{'value': col_val, 'lesser': prop_val, 'equal': prop_val, 'greater': prop_val} for (col_val, prop_val) in zip(table_column_values, matched_visual_prop_values)] points[0]['lesser'] = visual_prop_values[0] points[(col_val_count - 1)]['greater'] = visual_prop_values[(- 1)] elif ((prop_val_count - col_val_count) == 0): points = [{'value': col_val, 'lesser': prop_val, 'equal': prop_val, 'greater': prop_val} for (col_val, prop_val) in zip(table_column_values, visual_prop_values)] else: raise CyError(f"""table_column_values "{table_column_values}" and visual_prop_values "{visual_prop_values}" don't match up.""") visual_prop_map['points'] = points return visual_prop_map
Create a mapping between an attribute and a visual property. Generates the appropriate data structure for the "mapping" parameter in ``update_style_mapping()``. The paired list of values must be of the same length or mapping will fail. For gradient mapping, you may include two additional ``visual_prop_values`` in the first and last positions to map respectively to values less than and greater than those specified in ``table_column_values``. Mapping will also fail if the data type of ``table_column_values`` does not match that of the existing ``table_column``. Note that all imported numeric data are stored as Integers or Doubles in Cytosacpe tables; and character or mixed data are stored as Strings. Args: visual_prop (str): name of visual property to map table_column (str): name of table column to map mapping_type (str): continuous, discrete or passthrough (c,d,p) table_column_values (list): list of values paired with ``visual_prop_values``; skip for passthrough mapping visual_prop_values (list): list of values paired with ``table_column_values``; skip for passthrough mapping network (SUID or str or None): Name or SUID of a network. Default is the "current" network active in Cytoscape. base_url (str): Ignore unless you need to specify a custom domain, port or version to connect to the CyREST API. Default is http://localhost:1234 and the latest version of the CyREST API supported by this version of py4cytoscape. Returns: dict: {'mappingType': type of mapping, 'mappingColumn': column to map, 'mappingColumnType': column data type, 'visualProperty': name of property, cargo} Raises: CyError: if network name or SUID doesn't exist requests.exceptions.RequestException: if can't connect to Cytoscape or Cytoscape returns an error Examples: >>> map_visual_property('node fill color', 'gal1RGexp', 'c', [-2.426, 0.0, 2.058], ['#0066CC', '#FFFFFF','#FFFF00']) {'mappingType': 'continuous', 'mappingColumn': 'gal1RGexp', 'mappingColumnType': 'Double', 'visualProperty': 'NODE_FILL_COLOR', 'points': [{'value': -2.426, 'lesser': '#0066CC', 'equal': '#0066CC', 'greater': '#0066CC'}, {'value': 0.0, 'lesser': '#FFFFFF', 'equal': '#FFFFFF', 'greater': '#FFFFFF'}, {'value': 2.058, 'lesser': '#FFFF00', 'equal': '#FFFF00', 'greater': '#FFFF00'}]} >>> map_visual_property('node shape', 'degree.layout', 'd', [1, 2], ['ellipse', 'rectangle']) {'mappingType': 'discrete', 'mappingColumn': 'degree.layout', 'mappingColumnType': 'Integer', 'visualProperty': 'NODE_SHAPE', 'map': [{'key': 1, 'value': 'ellipse'}, {'key': 2, 'value': 'rectangle'}]} >>> map_visual_property('node label', 'COMMON', 'p') {'mappingType': 'passthrough', 'mappingColumn': 'COMMON', 'mappingColumnType': 'String', 'visualProperty': 'NODE_LABEL'} Note: For the return value, ``mapping type`` can be 'continuous', 'discrete' or 'passthrough'. For the ``mappingColumn``, the name of the column. For the ``mappingColumnType``, the Cytoscape data type (Double, Integer, String, Boolean). For the ``visualProperty``, the canonical name of the visual property. The ``cargo`` depends on the ``mapping type``. For 'continuous', it's a list of way points as 'points': [waypoint, waypoint, ...] where a waypoint is {'value': a Double, 'lesser': a color, 'equal': a color, 'greater': a color}. For 'discrete', it's a list of mappings as 'map': [key-value, key-value, ...] where a key-value is {'key': column data value, 'value': value appropriate for ``visualProperty``}. See Also: :meth:`update_style_mapping`, :meth:`get_visual_property_names`
py4cytoscape/style_mappings.py
map_visual_property
tyasird/py4cytoscape
0
python
@cy_log def map_visual_property(visual_prop, table_column, mapping_type, table_column_values=[], visual_prop_values=[], network=None, base_url=DEFAULT_BASE_URL): 'Create a mapping between an attribute and a visual property.\n\n Generates the appropriate data structure for the "mapping" parameter in ``update_style_mapping()``.\n\n The paired list of values must be of the same length or mapping will fail. For gradient mapping,\n you may include two additional ``visual_prop_values`` in the first and last positions to map respectively\n to values less than and greater than those specified in ``table_column_values``. Mapping will also fail if the\n data type of ``table_column_values`` does not match that of the existing ``table_column``. Note that all imported\n numeric data are stored as Integers or Doubles in Cytosacpe tables; and character or mixed data are\n stored as Strings.\n\n Args:\n visual_prop (str): name of visual property to map\n table_column (str): name of table column to map\n mapping_type (str): continuous, discrete or passthrough (c,d,p)\n table_column_values (list): list of values paired with ``visual_prop_values``; skip for passthrough mapping\n visual_prop_values (list): list of values paired with ``table_column_values``; skip for passthrough mapping\n network (SUID or str or None): Name or SUID of a network. Default is the\n "current" network active in Cytoscape.\n base_url (str): Ignore unless you need to specify a custom domain,\n port or version to connect to the CyREST API. Default is http://localhost:1234\n and the latest version of the CyREST API supported by this version of py4cytoscape.\n\n Returns:\n dict: {\'mappingType\': type of mapping, \'mappingColumn\': column to map, \'mappingColumnType\': column data type, \'visualProperty\': name of property, cargo}\n\n Raises:\n CyError: if network name or SUID doesn\'t exist\n requests.exceptions.RequestException: if can\'t connect to Cytoscape or Cytoscape returns an error\n\n Examples:\n >>> map_visual_property(\'node fill color\', \'gal1RGexp\', \'c\', [-2.426, 0.0, 2.058], [\'#0066CC\', \'#FFFFFF\',\'#FFFF00\'])\n {\'mappingType\': \'continuous\', \'mappingColumn\': \'gal1RGexp\', \'mappingColumnType\': \'Double\', \'visualProperty\': \'NODE_FILL_COLOR\', \'points\': [{\'value\': -2.426, \'lesser\': \'#0066CC\', \'equal\': \'#0066CC\', \'greater\': \'#0066CC\'}, {\'value\': 0.0, \'lesser\': \'#FFFFFF\', \'equal\': \'#FFFFFF\', \'greater\': \'#FFFFFF\'}, {\'value\': 2.058, \'lesser\': \'#FFFF00\', \'equal\': \'#FFFF00\', \'greater\': \'#FFFF00\'}]}\n >>> map_visual_property(\'node shape\', \'degree.layout\', \'d\', [1, 2], [\'ellipse\', \'rectangle\'])\n {\'mappingType\': \'discrete\', \'mappingColumn\': \'degree.layout\', \'mappingColumnType\': \'Integer\', \'visualProperty\': \'NODE_SHAPE\', \'map\': [{\'key\': 1, \'value\': \'ellipse\'}, {\'key\': 2, \'value\': \'rectangle\'}]}\n >>> map_visual_property(\'node label\', \'COMMON\', \'p\')\n {\'mappingType\': \'passthrough\', \'mappingColumn\': \'COMMON\', \'mappingColumnType\': \'String\', \'visualProperty\': \'NODE_LABEL\'}\n\n Note:\n For the return value, ``mapping type`` can be \'continuous\', \'discrete\' or \'passthrough\'. For the\n ``mappingColumn``, the name of the column. For the ``mappingColumnType``, the Cytoscape data type (Double,\n Integer, String, Boolean). For the ``visualProperty``, the canonical name of the visual property. The ``cargo``\n depends on the ``mapping type``. For \'continuous\', it\'s a list of way points as \'points\': [waypoint, waypoint, ...]\n where a waypoint is {\'value\': a Double, \'lesser\': a color, \'equal\': a color, \'greater\': a color}. For \'discrete\',\n it\'s a list of mappings as \'map\': [key-value, key-value, ...] where a key-value is {\'key\': column data value,\n \'value\': value appropriate for ``visualProperty``}.\n\n See Also:\n :meth:`update_style_mapping`, :meth:`get_visual_property_names`\n ' MAPPING_TYPES = {'c': 'continuous', 'd': 'discrete', 'p': 'passthrough'} PROPERTY_NAMES = {'EDGE_COLOR': 'EDGE_UNSELECTED_PAINT', 'EDGE_THICKNESS': 'EDGE_WIDTH', 'NODE_BORDER_COLOR': 'NODE_BORDER_PAINT', 'NODE_BORDER_LINE_TYPE': 'NODE_BORDER_STROKE'} suid = networks.get_network_suid(network, base_url=base_url) mapping_type_name = (MAPPING_TYPES[mapping_type] if (mapping_type in MAPPING_TYPES) else mapping_type) visual_prop_name = re.sub('\\s+', '_', visual_prop).upper() if (visual_prop_name in PROPERTY_NAMES): visual_prop_name = PROPERTY_NAMES[visual_prop_name] if (visual_prop_name not in styles.get_visual_property_names(base_url=base_url)): raise CyError(f'Could not find visual property "{visual_prop_name}". For valid ones, check get_visual_property_names().') tp = visual_prop_name.split('_')[0].lower() table = ('default' + tp) res = commands.cyrest_get((((('networks/' + str(suid)) + '/tables/') + table) + '/columns'), base_url=base_url) table_column_type = None for col in res: if (col['name'] == table_column): table_column_type = col['type'] break if (table_column_type is None): raise CyError(f'Could not find "{table_column}" column in "{table}" table.') visual_prop_map = {'mappingType': mapping_type_name, 'mappingColumn': table_column, 'mappingColumnType': table_column_type, 'visualProperty': visual_prop_name} if (mapping_type_name == 'discrete'): visual_prop_map['map'] = [{'key': col_val, 'value': prop_val} for (col_val, prop_val) in zip(table_column_values, visual_prop_values)] elif (mapping_type_name == 'continuous'): prop_val_count = len(visual_prop_values) col_val_count = len(table_column_values) if ((prop_val_count - col_val_count) == 2): matched_visual_prop_values = visual_prop_values[1:] points = [{'value': col_val, 'lesser': prop_val, 'equal': prop_val, 'greater': prop_val} for (col_val, prop_val) in zip(table_column_values, matched_visual_prop_values)] points[0]['lesser'] = visual_prop_values[0] points[(col_val_count - 1)]['greater'] = visual_prop_values[(- 1)] elif ((prop_val_count - col_val_count) == 0): points = [{'value': col_val, 'lesser': prop_val, 'equal': prop_val, 'greater': prop_val} for (col_val, prop_val) in zip(table_column_values, visual_prop_values)] else: raise CyError(f"table_column_values "{table_column_values}" and visual_prop_values "{visual_prop_values}" don't match up.") visual_prop_map['points'] = points return visual_prop_map
@cy_log def map_visual_property(visual_prop, table_column, mapping_type, table_column_values=[], visual_prop_values=[], network=None, base_url=DEFAULT_BASE_URL): 'Create a mapping between an attribute and a visual property.\n\n Generates the appropriate data structure for the "mapping" parameter in ``update_style_mapping()``.\n\n The paired list of values must be of the same length or mapping will fail. For gradient mapping,\n you may include two additional ``visual_prop_values`` in the first and last positions to map respectively\n to values less than and greater than those specified in ``table_column_values``. Mapping will also fail if the\n data type of ``table_column_values`` does not match that of the existing ``table_column``. Note that all imported\n numeric data are stored as Integers or Doubles in Cytosacpe tables; and character or mixed data are\n stored as Strings.\n\n Args:\n visual_prop (str): name of visual property to map\n table_column (str): name of table column to map\n mapping_type (str): continuous, discrete or passthrough (c,d,p)\n table_column_values (list): list of values paired with ``visual_prop_values``; skip for passthrough mapping\n visual_prop_values (list): list of values paired with ``table_column_values``; skip for passthrough mapping\n network (SUID or str or None): Name or SUID of a network. Default is the\n "current" network active in Cytoscape.\n base_url (str): Ignore unless you need to specify a custom domain,\n port or version to connect to the CyREST API. Default is http://localhost:1234\n and the latest version of the CyREST API supported by this version of py4cytoscape.\n\n Returns:\n dict: {\'mappingType\': type of mapping, \'mappingColumn\': column to map, \'mappingColumnType\': column data type, \'visualProperty\': name of property, cargo}\n\n Raises:\n CyError: if network name or SUID doesn\'t exist\n requests.exceptions.RequestException: if can\'t connect to Cytoscape or Cytoscape returns an error\n\n Examples:\n >>> map_visual_property(\'node fill color\', \'gal1RGexp\', \'c\', [-2.426, 0.0, 2.058], [\'#0066CC\', \'#FFFFFF\',\'#FFFF00\'])\n {\'mappingType\': \'continuous\', \'mappingColumn\': \'gal1RGexp\', \'mappingColumnType\': \'Double\', \'visualProperty\': \'NODE_FILL_COLOR\', \'points\': [{\'value\': -2.426, \'lesser\': \'#0066CC\', \'equal\': \'#0066CC\', \'greater\': \'#0066CC\'}, {\'value\': 0.0, \'lesser\': \'#FFFFFF\', \'equal\': \'#FFFFFF\', \'greater\': \'#FFFFFF\'}, {\'value\': 2.058, \'lesser\': \'#FFFF00\', \'equal\': \'#FFFF00\', \'greater\': \'#FFFF00\'}]}\n >>> map_visual_property(\'node shape\', \'degree.layout\', \'d\', [1, 2], [\'ellipse\', \'rectangle\'])\n {\'mappingType\': \'discrete\', \'mappingColumn\': \'degree.layout\', \'mappingColumnType\': \'Integer\', \'visualProperty\': \'NODE_SHAPE\', \'map\': [{\'key\': 1, \'value\': \'ellipse\'}, {\'key\': 2, \'value\': \'rectangle\'}]}\n >>> map_visual_property(\'node label\', \'COMMON\', \'p\')\n {\'mappingType\': \'passthrough\', \'mappingColumn\': \'COMMON\', \'mappingColumnType\': \'String\', \'visualProperty\': \'NODE_LABEL\'}\n\n Note:\n For the return value, ``mapping type`` can be \'continuous\', \'discrete\' or \'passthrough\'. For the\n ``mappingColumn``, the name of the column. For the ``mappingColumnType``, the Cytoscape data type (Double,\n Integer, String, Boolean). For the ``visualProperty``, the canonical name of the visual property. The ``cargo``\n depends on the ``mapping type``. For \'continuous\', it\'s a list of way points as \'points\': [waypoint, waypoint, ...]\n where a waypoint is {\'value\': a Double, \'lesser\': a color, \'equal\': a color, \'greater\': a color}. For \'discrete\',\n it\'s a list of mappings as \'map\': [key-value, key-value, ...] where a key-value is {\'key\': column data value,\n \'value\': value appropriate for ``visualProperty``}.\n\n See Also:\n :meth:`update_style_mapping`, :meth:`get_visual_property_names`\n ' MAPPING_TYPES = {'c': 'continuous', 'd': 'discrete', 'p': 'passthrough'} PROPERTY_NAMES = {'EDGE_COLOR': 'EDGE_UNSELECTED_PAINT', 'EDGE_THICKNESS': 'EDGE_WIDTH', 'NODE_BORDER_COLOR': 'NODE_BORDER_PAINT', 'NODE_BORDER_LINE_TYPE': 'NODE_BORDER_STROKE'} suid = networks.get_network_suid(network, base_url=base_url) mapping_type_name = (MAPPING_TYPES[mapping_type] if (mapping_type in MAPPING_TYPES) else mapping_type) visual_prop_name = re.sub('\\s+', '_', visual_prop).upper() if (visual_prop_name in PROPERTY_NAMES): visual_prop_name = PROPERTY_NAMES[visual_prop_name] if (visual_prop_name not in styles.get_visual_property_names(base_url=base_url)): raise CyError(f'Could not find visual property "{visual_prop_name}". For valid ones, check get_visual_property_names().') tp = visual_prop_name.split('_')[0].lower() table = ('default' + tp) res = commands.cyrest_get((((('networks/' + str(suid)) + '/tables/') + table) + '/columns'), base_url=base_url) table_column_type = None for col in res: if (col['name'] == table_column): table_column_type = col['type'] break if (table_column_type is None): raise CyError(f'Could not find "{table_column}" column in "{table}" table.') visual_prop_map = {'mappingType': mapping_type_name, 'mappingColumn': table_column, 'mappingColumnType': table_column_type, 'visualProperty': visual_prop_name} if (mapping_type_name == 'discrete'): visual_prop_map['map'] = [{'key': col_val, 'value': prop_val} for (col_val, prop_val) in zip(table_column_values, visual_prop_values)] elif (mapping_type_name == 'continuous'): prop_val_count = len(visual_prop_values) col_val_count = len(table_column_values) if ((prop_val_count - col_val_count) == 2): matched_visual_prop_values = visual_prop_values[1:] points = [{'value': col_val, 'lesser': prop_val, 'equal': prop_val, 'greater': prop_val} for (col_val, prop_val) in zip(table_column_values, matched_visual_prop_values)] points[0]['lesser'] = visual_prop_values[0] points[(col_val_count - 1)]['greater'] = visual_prop_values[(- 1)] elif ((prop_val_count - col_val_count) == 0): points = [{'value': col_val, 'lesser': prop_val, 'equal': prop_val, 'greater': prop_val} for (col_val, prop_val) in zip(table_column_values, visual_prop_values)] else: raise CyError(f"table_column_values "{table_column_values}" and visual_prop_values "{visual_prop_values}" don't match up.") visual_prop_map['points'] = points return visual_prop_map<|docstring|>Create a mapping between an attribute and a visual property. Generates the appropriate data structure for the "mapping" parameter in ``update_style_mapping()``. The paired list of values must be of the same length or mapping will fail. For gradient mapping, you may include two additional ``visual_prop_values`` in the first and last positions to map respectively to values less than and greater than those specified in ``table_column_values``. Mapping will also fail if the data type of ``table_column_values`` does not match that of the existing ``table_column``. Note that all imported numeric data are stored as Integers or Doubles in Cytosacpe tables; and character or mixed data are stored as Strings. Args: visual_prop (str): name of visual property to map table_column (str): name of table column to map mapping_type (str): continuous, discrete or passthrough (c,d,p) table_column_values (list): list of values paired with ``visual_prop_values``; skip for passthrough mapping visual_prop_values (list): list of values paired with ``table_column_values``; skip for passthrough mapping network (SUID or str or None): Name or SUID of a network. Default is the "current" network active in Cytoscape. base_url (str): Ignore unless you need to specify a custom domain, port or version to connect to the CyREST API. Default is http://localhost:1234 and the latest version of the CyREST API supported by this version of py4cytoscape. Returns: dict: {'mappingType': type of mapping, 'mappingColumn': column to map, 'mappingColumnType': column data type, 'visualProperty': name of property, cargo} Raises: CyError: if network name or SUID doesn't exist requests.exceptions.RequestException: if can't connect to Cytoscape or Cytoscape returns an error Examples: >>> map_visual_property('node fill color', 'gal1RGexp', 'c', [-2.426, 0.0, 2.058], ['#0066CC', '#FFFFFF','#FFFF00']) {'mappingType': 'continuous', 'mappingColumn': 'gal1RGexp', 'mappingColumnType': 'Double', 'visualProperty': 'NODE_FILL_COLOR', 'points': [{'value': -2.426, 'lesser': '#0066CC', 'equal': '#0066CC', 'greater': '#0066CC'}, {'value': 0.0, 'lesser': '#FFFFFF', 'equal': '#FFFFFF', 'greater': '#FFFFFF'}, {'value': 2.058, 'lesser': '#FFFF00', 'equal': '#FFFF00', 'greater': '#FFFF00'}]} >>> map_visual_property('node shape', 'degree.layout', 'd', [1, 2], ['ellipse', 'rectangle']) {'mappingType': 'discrete', 'mappingColumn': 'degree.layout', 'mappingColumnType': 'Integer', 'visualProperty': 'NODE_SHAPE', 'map': [{'key': 1, 'value': 'ellipse'}, {'key': 2, 'value': 'rectangle'}]} >>> map_visual_property('node label', 'COMMON', 'p') {'mappingType': 'passthrough', 'mappingColumn': 'COMMON', 'mappingColumnType': 'String', 'visualProperty': 'NODE_LABEL'} Note: For the return value, ``mapping type`` can be 'continuous', 'discrete' or 'passthrough'. For the ``mappingColumn``, the name of the column. For the ``mappingColumnType``, the Cytoscape data type (Double, Integer, String, Boolean). For the ``visualProperty``, the canonical name of the visual property. The ``cargo`` depends on the ``mapping type``. For 'continuous', it's a list of way points as 'points': [waypoint, waypoint, ...] where a waypoint is {'value': a Double, 'lesser': a color, 'equal': a color, 'greater': a color}. For 'discrete', it's a list of mappings as 'map': [key-value, key-value, ...] where a key-value is {'key': column data value, 'value': value appropriate for ``visualProperty``}. See Also: :meth:`update_style_mapping`, :meth:`get_visual_property_names`<|endoftext|>
c3eeb16df6606ca9412449957d30cba8dfcc9fb138e9fd3beb4b42358ed7ce64
@cy_log def update_style_mapping(style_name, mapping, base_url=DEFAULT_BASE_URL): "Update a visual property mapping in a style.\n\n Updates the visual property mapping, overriding any prior mapping. Creates a visual property mapping if it doesn't\n already exist in the style. Requires visual property mappings to be previously created, see ``map_visual_property()``.\n\n Args:\n style_name (str): name for style\n mapping (dict): a single visual property mapping, see ``map_visual_property()``\n base_url (str): Ignore unless you need to specify a custom domain,\n port or version to connect to the CyREST API. Default is http://localhost:1234\n and the latest version of the CyREST API supported by this version of py4cytoscape.\n\n Returns:\n str: ''\n\n Raises:\n CyError: if style doesn't exist\n TypeError: if mapping isn't a visual property mapping\n requests.exceptions.RequestException: if can't connect to Cytoscape or Cytoscape returns an error\n\n Examples:\n >>> update_style_mapping('galFiltered Style', map_visual_property('node label', 'name', 'p'))\n ''\n\n See Also:\n :meth:`map_visual_property`\n " visual_prop_name = mapping['visualProperty'] res = commands.cyrest_get(f'styles/{style_name}/mappings', base_url=base_url) vp_list = [prop['visualProperty'] for prop in res] exists = (visual_prop_name in vp_list) if exists: res = commands.cyrest_put(f'styles/{style_name}/mappings/{visual_prop_name}', body=[mapping], base_url=base_url, require_json=False) else: res = commands.cyrest_post(f'styles/{style_name}/mappings', body=[mapping], base_url=base_url, require_json=False) time.sleep(MODEL_PROPAGATION_SECS) return res
Update a visual property mapping in a style. Updates the visual property mapping, overriding any prior mapping. Creates a visual property mapping if it doesn't already exist in the style. Requires visual property mappings to be previously created, see ``map_visual_property()``. Args: style_name (str): name for style mapping (dict): a single visual property mapping, see ``map_visual_property()`` base_url (str): Ignore unless you need to specify a custom domain, port or version to connect to the CyREST API. Default is http://localhost:1234 and the latest version of the CyREST API supported by this version of py4cytoscape. Returns: str: '' Raises: CyError: if style doesn't exist TypeError: if mapping isn't a visual property mapping requests.exceptions.RequestException: if can't connect to Cytoscape or Cytoscape returns an error Examples: >>> update_style_mapping('galFiltered Style', map_visual_property('node label', 'name', 'p')) '' See Also: :meth:`map_visual_property`
py4cytoscape/style_mappings.py
update_style_mapping
tyasird/py4cytoscape
0
python
@cy_log def update_style_mapping(style_name, mapping, base_url=DEFAULT_BASE_URL): "Update a visual property mapping in a style.\n\n Updates the visual property mapping, overriding any prior mapping. Creates a visual property mapping if it doesn't\n already exist in the style. Requires visual property mappings to be previously created, see ``map_visual_property()``.\n\n Args:\n style_name (str): name for style\n mapping (dict): a single visual property mapping, see ``map_visual_property()``\n base_url (str): Ignore unless you need to specify a custom domain,\n port or version to connect to the CyREST API. Default is http://localhost:1234\n and the latest version of the CyREST API supported by this version of py4cytoscape.\n\n Returns:\n str: \n\n Raises:\n CyError: if style doesn't exist\n TypeError: if mapping isn't a visual property mapping\n requests.exceptions.RequestException: if can't connect to Cytoscape or Cytoscape returns an error\n\n Examples:\n >>> update_style_mapping('galFiltered Style', map_visual_property('node label', 'name', 'p'))\n \n\n See Also:\n :meth:`map_visual_property`\n " visual_prop_name = mapping['visualProperty'] res = commands.cyrest_get(f'styles/{style_name}/mappings', base_url=base_url) vp_list = [prop['visualProperty'] for prop in res] exists = (visual_prop_name in vp_list) if exists: res = commands.cyrest_put(f'styles/{style_name}/mappings/{visual_prop_name}', body=[mapping], base_url=base_url, require_json=False) else: res = commands.cyrest_post(f'styles/{style_name}/mappings', body=[mapping], base_url=base_url, require_json=False) time.sleep(MODEL_PROPAGATION_SECS) return res
@cy_log def update_style_mapping(style_name, mapping, base_url=DEFAULT_BASE_URL): "Update a visual property mapping in a style.\n\n Updates the visual property mapping, overriding any prior mapping. Creates a visual property mapping if it doesn't\n already exist in the style. Requires visual property mappings to be previously created, see ``map_visual_property()``.\n\n Args:\n style_name (str): name for style\n mapping (dict): a single visual property mapping, see ``map_visual_property()``\n base_url (str): Ignore unless you need to specify a custom domain,\n port or version to connect to the CyREST API. Default is http://localhost:1234\n and the latest version of the CyREST API supported by this version of py4cytoscape.\n\n Returns:\n str: \n\n Raises:\n CyError: if style doesn't exist\n TypeError: if mapping isn't a visual property mapping\n requests.exceptions.RequestException: if can't connect to Cytoscape or Cytoscape returns an error\n\n Examples:\n >>> update_style_mapping('galFiltered Style', map_visual_property('node label', 'name', 'p'))\n \n\n See Also:\n :meth:`map_visual_property`\n " visual_prop_name = mapping['visualProperty'] res = commands.cyrest_get(f'styles/{style_name}/mappings', base_url=base_url) vp_list = [prop['visualProperty'] for prop in res] exists = (visual_prop_name in vp_list) if exists: res = commands.cyrest_put(f'styles/{style_name}/mappings/{visual_prop_name}', body=[mapping], base_url=base_url, require_json=False) else: res = commands.cyrest_post(f'styles/{style_name}/mappings', body=[mapping], base_url=base_url, require_json=False) time.sleep(MODEL_PROPAGATION_SECS) return res<|docstring|>Update a visual property mapping in a style. Updates the visual property mapping, overriding any prior mapping. Creates a visual property mapping if it doesn't already exist in the style. Requires visual property mappings to be previously created, see ``map_visual_property()``. Args: style_name (str): name for style mapping (dict): a single visual property mapping, see ``map_visual_property()`` base_url (str): Ignore unless you need to specify a custom domain, port or version to connect to the CyREST API. Default is http://localhost:1234 and the latest version of the CyREST API supported by this version of py4cytoscape. Returns: str: '' Raises: CyError: if style doesn't exist TypeError: if mapping isn't a visual property mapping requests.exceptions.RequestException: if can't connect to Cytoscape or Cytoscape returns an error Examples: >>> update_style_mapping('galFiltered Style', map_visual_property('node label', 'name', 'p')) '' See Also: :meth:`map_visual_property`<|endoftext|>
f7bfcd1e7f02b345f2a27988ec0e5db628df5aafd8e9ff007c8434352c2bb436
@cy_log def delete_style_mapping(style_name, visual_prop, base_url=DEFAULT_BASE_URL): "Delete a specified visual style mapping from specified style.\n\n Args:\n style_name (str): name for style\n visual_prop (str): name of visual property to delete\n base_url (str): Ignore unless you need to specify a custom domain,\n port or version to connect to the CyREST API. Default is http://localhost:1234\n and the latest version of the CyREST API supported by this version of py4cytoscape.\n\n Returns:\n str or None: '' or None (if property doesn't exist)\n\n Raises:\n CyError: if style doesn't exist\n requests.exceptions.RequestException: if can't connect to Cytoscape or Cytoscape returns an error\n\n Examples:\n >>> delete_style_mapping('galFiltered Style', 'node label')\n ''\n " res = commands.cyrest_get((('styles/' + style_name) + '/mappings'), base_url=base_url) vp_list = [prop['visualProperty'] for prop in res] exists = (visual_prop in vp_list) if exists: res = commands.cyrest_delete(f'styles/{style_name}/mappings/{visual_prop}', base_url=base_url, require_json=False) else: res = None return res
Delete a specified visual style mapping from specified style. Args: style_name (str): name for style visual_prop (str): name of visual property to delete base_url (str): Ignore unless you need to specify a custom domain, port or version to connect to the CyREST API. Default is http://localhost:1234 and the latest version of the CyREST API supported by this version of py4cytoscape. Returns: str or None: '' or None (if property doesn't exist) Raises: CyError: if style doesn't exist requests.exceptions.RequestException: if can't connect to Cytoscape or Cytoscape returns an error Examples: >>> delete_style_mapping('galFiltered Style', 'node label') ''
py4cytoscape/style_mappings.py
delete_style_mapping
tyasird/py4cytoscape
0
python
@cy_log def delete_style_mapping(style_name, visual_prop, base_url=DEFAULT_BASE_URL): "Delete a specified visual style mapping from specified style.\n\n Args:\n style_name (str): name for style\n visual_prop (str): name of visual property to delete\n base_url (str): Ignore unless you need to specify a custom domain,\n port or version to connect to the CyREST API. Default is http://localhost:1234\n and the latest version of the CyREST API supported by this version of py4cytoscape.\n\n Returns:\n str or None: or None (if property doesn't exist)\n\n Raises:\n CyError: if style doesn't exist\n requests.exceptions.RequestException: if can't connect to Cytoscape or Cytoscape returns an error\n\n Examples:\n >>> delete_style_mapping('galFiltered Style', 'node label')\n \n " res = commands.cyrest_get((('styles/' + style_name) + '/mappings'), base_url=base_url) vp_list = [prop['visualProperty'] for prop in res] exists = (visual_prop in vp_list) if exists: res = commands.cyrest_delete(f'styles/{style_name}/mappings/{visual_prop}', base_url=base_url, require_json=False) else: res = None return res
@cy_log def delete_style_mapping(style_name, visual_prop, base_url=DEFAULT_BASE_URL): "Delete a specified visual style mapping from specified style.\n\n Args:\n style_name (str): name for style\n visual_prop (str): name of visual property to delete\n base_url (str): Ignore unless you need to specify a custom domain,\n port or version to connect to the CyREST API. Default is http://localhost:1234\n and the latest version of the CyREST API supported by this version of py4cytoscape.\n\n Returns:\n str or None: or None (if property doesn't exist)\n\n Raises:\n CyError: if style doesn't exist\n requests.exceptions.RequestException: if can't connect to Cytoscape or Cytoscape returns an error\n\n Examples:\n >>> delete_style_mapping('galFiltered Style', 'node label')\n \n " res = commands.cyrest_get((('styles/' + style_name) + '/mappings'), base_url=base_url) vp_list = [prop['visualProperty'] for prop in res] exists = (visual_prop in vp_list) if exists: res = commands.cyrest_delete(f'styles/{style_name}/mappings/{visual_prop}', base_url=base_url, require_json=False) else: res = None return res<|docstring|>Delete a specified visual style mapping from specified style. Args: style_name (str): name for style visual_prop (str): name of visual property to delete base_url (str): Ignore unless you need to specify a custom domain, port or version to connect to the CyREST API. Default is http://localhost:1234 and the latest version of the CyREST API supported by this version of py4cytoscape. Returns: str or None: '' or None (if property doesn't exist) Raises: CyError: if style doesn't exist requests.exceptions.RequestException: if can't connect to Cytoscape or Cytoscape returns an error Examples: >>> delete_style_mapping('galFiltered Style', 'node label') ''<|endoftext|>
28b5156982194c15dde335a11fd9f2494a0eb645fedeb0f5598f2695e718c89b
@cy_log def get_style_mapping(style_name, visual_prop, base_url=DEFAULT_BASE_URL): 'Fetch a visual property mapping in a style.\n\n The property mapping is the same as a dict created by ``map_visual_property()``.\n\n Args:\n style_name (str): name for style\n visual_prop (str): the name of the visual property\n base_url (str): Ignore unless you need to specify a custom domain,\n port or version to connect to the CyREST API. Default is http://localhost:1234\n and the latest version of the CyREST API supported by this version of py4cytoscape.\n\n Returns:\n dict: see ``map_visual_property()``\n\n Raises:\n CyError: if style or property name doesn\'t exist\n requests.exceptions.RequestException: if can\'t connect to Cytoscape or Cytoscape returns an error\n\n Examples:\n >>> get_style_mapping(\'galFiltered Style\', \'node label\')\n {"mappingType": "passthrough", "mappingColumn": "COMMON", "mappingColumnType": "String", "visualProperty": "NODE_LABEL"}\n\n See Also:\n :meth:`map_visual_property`\n ' res = commands.cyrest_get(f'styles/{style_name}/mappings', base_url=base_url) for prop in res: if (prop['visualProperty'] == visual_prop): return prop raise CyError(f'Property "{visual_prop}" does not exist in style "{style_name}"')
Fetch a visual property mapping in a style. The property mapping is the same as a dict created by ``map_visual_property()``. Args: style_name (str): name for style visual_prop (str): the name of the visual property base_url (str): Ignore unless you need to specify a custom domain, port or version to connect to the CyREST API. Default is http://localhost:1234 and the latest version of the CyREST API supported by this version of py4cytoscape. Returns: dict: see ``map_visual_property()`` Raises: CyError: if style or property name doesn't exist requests.exceptions.RequestException: if can't connect to Cytoscape or Cytoscape returns an error Examples: >>> get_style_mapping('galFiltered Style', 'node label') {"mappingType": "passthrough", "mappingColumn": "COMMON", "mappingColumnType": "String", "visualProperty": "NODE_LABEL"} See Also: :meth:`map_visual_property`
py4cytoscape/style_mappings.py
get_style_mapping
tyasird/py4cytoscape
0
python
@cy_log def get_style_mapping(style_name, visual_prop, base_url=DEFAULT_BASE_URL): 'Fetch a visual property mapping in a style.\n\n The property mapping is the same as a dict created by ``map_visual_property()``.\n\n Args:\n style_name (str): name for style\n visual_prop (str): the name of the visual property\n base_url (str): Ignore unless you need to specify a custom domain,\n port or version to connect to the CyREST API. Default is http://localhost:1234\n and the latest version of the CyREST API supported by this version of py4cytoscape.\n\n Returns:\n dict: see ``map_visual_property()``\n\n Raises:\n CyError: if style or property name doesn\'t exist\n requests.exceptions.RequestException: if can\'t connect to Cytoscape or Cytoscape returns an error\n\n Examples:\n >>> get_style_mapping(\'galFiltered Style\', \'node label\')\n {"mappingType": "passthrough", "mappingColumn": "COMMON", "mappingColumnType": "String", "visualProperty": "NODE_LABEL"}\n\n See Also:\n :meth:`map_visual_property`\n ' res = commands.cyrest_get(f'styles/{style_name}/mappings', base_url=base_url) for prop in res: if (prop['visualProperty'] == visual_prop): return prop raise CyError(f'Property "{visual_prop}" does not exist in style "{style_name}"')
@cy_log def get_style_mapping(style_name, visual_prop, base_url=DEFAULT_BASE_URL): 'Fetch a visual property mapping in a style.\n\n The property mapping is the same as a dict created by ``map_visual_property()``.\n\n Args:\n style_name (str): name for style\n visual_prop (str): the name of the visual property\n base_url (str): Ignore unless you need to specify a custom domain,\n port or version to connect to the CyREST API. Default is http://localhost:1234\n and the latest version of the CyREST API supported by this version of py4cytoscape.\n\n Returns:\n dict: see ``map_visual_property()``\n\n Raises:\n CyError: if style or property name doesn\'t exist\n requests.exceptions.RequestException: if can\'t connect to Cytoscape or Cytoscape returns an error\n\n Examples:\n >>> get_style_mapping(\'galFiltered Style\', \'node label\')\n {"mappingType": "passthrough", "mappingColumn": "COMMON", "mappingColumnType": "String", "visualProperty": "NODE_LABEL"}\n\n See Also:\n :meth:`map_visual_property`\n ' res = commands.cyrest_get(f'styles/{style_name}/mappings', base_url=base_url) for prop in res: if (prop['visualProperty'] == visual_prop): return prop raise CyError(f'Property "{visual_prop}" does not exist in style "{style_name}"')<|docstring|>Fetch a visual property mapping in a style. The property mapping is the same as a dict created by ``map_visual_property()``. Args: style_name (str): name for style visual_prop (str): the name of the visual property base_url (str): Ignore unless you need to specify a custom domain, port or version to connect to the CyREST API. Default is http://localhost:1234 and the latest version of the CyREST API supported by this version of py4cytoscape. Returns: dict: see ``map_visual_property()`` Raises: CyError: if style or property name doesn't exist requests.exceptions.RequestException: if can't connect to Cytoscape or Cytoscape returns an error Examples: >>> get_style_mapping('galFiltered Style', 'node label') {"mappingType": "passthrough", "mappingColumn": "COMMON", "mappingColumnType": "String", "visualProperty": "NODE_LABEL"} See Also: :meth:`map_visual_property`<|endoftext|>
07dea129c3773cb8a737bde6e27f6a8b2fbeb994591738c7128fcacb22b8663b
@cy_log def get_style_all_mappings(style_name, base_url=DEFAULT_BASE_URL): 'Fetch all visual property mapping in a style.\n\n The property mappings are the same as a dict created by ``map_visual_property()``.\n\n Args:\n style_name (str): name for style\n base_url (str): Ignore unless you need to specify a custom domain,\n port or version to connect to the CyREST API. Default is http://localhost:1234\n and the latest version of the CyREST API supported by this version of py4cytoscape.\n\n Returns:\n list: list of dicts of the type created by ``map_visual_property()``\n\n Raises:\n CyError: if style or property name doesn\'t exist\n requests.exceptions.RequestException: if can\'t connect to Cytoscape or Cytoscape returns an error\n\n Examples:\n >>> get_style_all_mappings(\'galFiltered Style\')\n [{"mappingType": "passthrough", "mappingColumn": "name", "mappingColumnType": "String", "visualProperty": "NODE_LABEL"},\n {"mappingType": "passthrough", "mappingColumn": "interaction", "mappingColumnType": "String", "visualProperty": "EDGE_LABEL"}]\n\n See Also:\n :meth:`map_visual_property`\n ' res = commands.cyrest_get(f'styles/{style_name}/mappings', base_url=base_url) return res
Fetch all visual property mapping in a style. The property mappings are the same as a dict created by ``map_visual_property()``. Args: style_name (str): name for style base_url (str): Ignore unless you need to specify a custom domain, port or version to connect to the CyREST API. Default is http://localhost:1234 and the latest version of the CyREST API supported by this version of py4cytoscape. Returns: list: list of dicts of the type created by ``map_visual_property()`` Raises: CyError: if style or property name doesn't exist requests.exceptions.RequestException: if can't connect to Cytoscape or Cytoscape returns an error Examples: >>> get_style_all_mappings('galFiltered Style') [{"mappingType": "passthrough", "mappingColumn": "name", "mappingColumnType": "String", "visualProperty": "NODE_LABEL"}, {"mappingType": "passthrough", "mappingColumn": "interaction", "mappingColumnType": "String", "visualProperty": "EDGE_LABEL"}] See Also: :meth:`map_visual_property`
py4cytoscape/style_mappings.py
get_style_all_mappings
tyasird/py4cytoscape
0
python
@cy_log def get_style_all_mappings(style_name, base_url=DEFAULT_BASE_URL): 'Fetch all visual property mapping in a style.\n\n The property mappings are the same as a dict created by ``map_visual_property()``.\n\n Args:\n style_name (str): name for style\n base_url (str): Ignore unless you need to specify a custom domain,\n port or version to connect to the CyREST API. Default is http://localhost:1234\n and the latest version of the CyREST API supported by this version of py4cytoscape.\n\n Returns:\n list: list of dicts of the type created by ``map_visual_property()``\n\n Raises:\n CyError: if style or property name doesn\'t exist\n requests.exceptions.RequestException: if can\'t connect to Cytoscape or Cytoscape returns an error\n\n Examples:\n >>> get_style_all_mappings(\'galFiltered Style\')\n [{"mappingType": "passthrough", "mappingColumn": "name", "mappingColumnType": "String", "visualProperty": "NODE_LABEL"},\n {"mappingType": "passthrough", "mappingColumn": "interaction", "mappingColumnType": "String", "visualProperty": "EDGE_LABEL"}]\n\n See Also:\n :meth:`map_visual_property`\n ' res = commands.cyrest_get(f'styles/{style_name}/mappings', base_url=base_url) return res
@cy_log def get_style_all_mappings(style_name, base_url=DEFAULT_BASE_URL): 'Fetch all visual property mapping in a style.\n\n The property mappings are the same as a dict created by ``map_visual_property()``.\n\n Args:\n style_name (str): name for style\n base_url (str): Ignore unless you need to specify a custom domain,\n port or version to connect to the CyREST API. Default is http://localhost:1234\n and the latest version of the CyREST API supported by this version of py4cytoscape.\n\n Returns:\n list: list of dicts of the type created by ``map_visual_property()``\n\n Raises:\n CyError: if style or property name doesn\'t exist\n requests.exceptions.RequestException: if can\'t connect to Cytoscape or Cytoscape returns an error\n\n Examples:\n >>> get_style_all_mappings(\'galFiltered Style\')\n [{"mappingType": "passthrough", "mappingColumn": "name", "mappingColumnType": "String", "visualProperty": "NODE_LABEL"},\n {"mappingType": "passthrough", "mappingColumn": "interaction", "mappingColumnType": "String", "visualProperty": "EDGE_LABEL"}]\n\n See Also:\n :meth:`map_visual_property`\n ' res = commands.cyrest_get(f'styles/{style_name}/mappings', base_url=base_url) return res<|docstring|>Fetch all visual property mapping in a style. The property mappings are the same as a dict created by ``map_visual_property()``. Args: style_name (str): name for style base_url (str): Ignore unless you need to specify a custom domain, port or version to connect to the CyREST API. Default is http://localhost:1234 and the latest version of the CyREST API supported by this version of py4cytoscape. Returns: list: list of dicts of the type created by ``map_visual_property()`` Raises: CyError: if style or property name doesn't exist requests.exceptions.RequestException: if can't connect to Cytoscape or Cytoscape returns an error Examples: >>> get_style_all_mappings('galFiltered Style') [{"mappingType": "passthrough", "mappingColumn": "name", "mappingColumnType": "String", "visualProperty": "NODE_LABEL"}, {"mappingType": "passthrough", "mappingColumn": "interaction", "mappingColumnType": "String", "visualProperty": "EDGE_LABEL"}] See Also: :meth:`map_visual_property`<|endoftext|>
5abec5f3cd231cd57a5f362d2099c8396cbd32782f029c6ef8f4022a84d6d5f1
@cy_log def set_node_border_color_mapping(table_column, table_column_values=None, colors=None, mapping_type='c', default_color=None, style_name='default', network=None, base_url=DEFAULT_BASE_URL): 'Map table column values to colors to set the node border color.\n\n Args:\n table_column (str): Name of Cytoscape table column to map values from\n table_column_values (list): List of values from Cytoscape table to be used in mapping\n colors (list): list of hex colors\n mapping_type (str): continuous, discrete or passthrough (c,d,p); default is continuous\n default_color (str): Hex color to set as default\n style_name (str): name for style\n network (SUID or str or None): Name or SUID of a network or view. Default is the\n "current" network active in Cytoscape.\n base_url (str): Ignore unless you need to specify a custom domain,\n port or version to connect to the CyREST API. Default is http://localhost:1234\n and the latest version of the CyREST API supported by this version of py4cytoscape.\n\n Returns:\n str: \'\'\n\n Raises:\n CyError: if invalid color, table column doesn\'t exist, table column values doesn\'t match values list, or invalid style name, network or mapping type\n requests.exceptions.RequestException: if can\'t connect to Cytoscape or Cytoscape returns an error\n\n Examples:\n >>> set_node_border_color_mapping(\'AverageShortestPathLength\', [1.0, 16.36], [\'#FBE723\', \'#440256\'], style_name=\'galFiltered Style\')\n \'\'\n >>> set_node_border_color_mapping(\'Degree\', [\'1\', \'2\'], [\'#FFFF00\', \'#00FF00\'], \'d\', style_name=\'galFiltered Style\')\n \'\'\n >>> set_node_border_color_mapping(\'ColorCol\', mapping_type=\'p\', default_color=\'#654321\', style_name=\'galFiltered Style\')\n \'\'\n ' verify_hex_colors(colors) if (default_color is not None): style_defaults.set_node_border_color_default(default_color, style_name, base_url=base_url) return _update_visual_property('NODE_BORDER_PAINT', table_column, table_column_values=table_column_values, range_map=colors, mapping_type=mapping_type, style_name=style_name, network=network, base_url=base_url)
Map table column values to colors to set the node border color. Args: table_column (str): Name of Cytoscape table column to map values from table_column_values (list): List of values from Cytoscape table to be used in mapping colors (list): list of hex colors mapping_type (str): continuous, discrete or passthrough (c,d,p); default is continuous default_color (str): Hex color to set as default style_name (str): name for style network (SUID or str or None): Name or SUID of a network or view. Default is the "current" network active in Cytoscape. base_url (str): Ignore unless you need to specify a custom domain, port or version to connect to the CyREST API. Default is http://localhost:1234 and the latest version of the CyREST API supported by this version of py4cytoscape. Returns: str: '' Raises: CyError: if invalid color, table column doesn't exist, table column values doesn't match values list, or invalid style name, network or mapping type requests.exceptions.RequestException: if can't connect to Cytoscape or Cytoscape returns an error Examples: >>> set_node_border_color_mapping('AverageShortestPathLength', [1.0, 16.36], ['#FBE723', '#440256'], style_name='galFiltered Style') '' >>> set_node_border_color_mapping('Degree', ['1', '2'], ['#FFFF00', '#00FF00'], 'd', style_name='galFiltered Style') '' >>> set_node_border_color_mapping('ColorCol', mapping_type='p', default_color='#654321', style_name='galFiltered Style') ''
py4cytoscape/style_mappings.py
set_node_border_color_mapping
tyasird/py4cytoscape
0
python
@cy_log def set_node_border_color_mapping(table_column, table_column_values=None, colors=None, mapping_type='c', default_color=None, style_name='default', network=None, base_url=DEFAULT_BASE_URL): 'Map table column values to colors to set the node border color.\n\n Args:\n table_column (str): Name of Cytoscape table column to map values from\n table_column_values (list): List of values from Cytoscape table to be used in mapping\n colors (list): list of hex colors\n mapping_type (str): continuous, discrete or passthrough (c,d,p); default is continuous\n default_color (str): Hex color to set as default\n style_name (str): name for style\n network (SUID or str or None): Name or SUID of a network or view. Default is the\n "current" network active in Cytoscape.\n base_url (str): Ignore unless you need to specify a custom domain,\n port or version to connect to the CyREST API. Default is http://localhost:1234\n and the latest version of the CyREST API supported by this version of py4cytoscape.\n\n Returns:\n str: \'\'\n\n Raises:\n CyError: if invalid color, table column doesn\'t exist, table column values doesn\'t match values list, or invalid style name, network or mapping type\n requests.exceptions.RequestException: if can\'t connect to Cytoscape or Cytoscape returns an error\n\n Examples:\n >>> set_node_border_color_mapping(\'AverageShortestPathLength\', [1.0, 16.36], [\'#FBE723\', \'#440256\'], style_name=\'galFiltered Style\')\n \'\'\n >>> set_node_border_color_mapping(\'Degree\', [\'1\', \'2\'], [\'#FFFF00\', \'#00FF00\'], \'d\', style_name=\'galFiltered Style\')\n \'\'\n >>> set_node_border_color_mapping(\'ColorCol\', mapping_type=\'p\', default_color=\'#654321\', style_name=\'galFiltered Style\')\n \'\'\n ' verify_hex_colors(colors) if (default_color is not None): style_defaults.set_node_border_color_default(default_color, style_name, base_url=base_url) return _update_visual_property('NODE_BORDER_PAINT', table_column, table_column_values=table_column_values, range_map=colors, mapping_type=mapping_type, style_name=style_name, network=network, base_url=base_url)
@cy_log def set_node_border_color_mapping(table_column, table_column_values=None, colors=None, mapping_type='c', default_color=None, style_name='default', network=None, base_url=DEFAULT_BASE_URL): 'Map table column values to colors to set the node border color.\n\n Args:\n table_column (str): Name of Cytoscape table column to map values from\n table_column_values (list): List of values from Cytoscape table to be used in mapping\n colors (list): list of hex colors\n mapping_type (str): continuous, discrete or passthrough (c,d,p); default is continuous\n default_color (str): Hex color to set as default\n style_name (str): name for style\n network (SUID or str or None): Name or SUID of a network or view. Default is the\n "current" network active in Cytoscape.\n base_url (str): Ignore unless you need to specify a custom domain,\n port or version to connect to the CyREST API. Default is http://localhost:1234\n and the latest version of the CyREST API supported by this version of py4cytoscape.\n\n Returns:\n str: \'\'\n\n Raises:\n CyError: if invalid color, table column doesn\'t exist, table column values doesn\'t match values list, or invalid style name, network or mapping type\n requests.exceptions.RequestException: if can\'t connect to Cytoscape or Cytoscape returns an error\n\n Examples:\n >>> set_node_border_color_mapping(\'AverageShortestPathLength\', [1.0, 16.36], [\'#FBE723\', \'#440256\'], style_name=\'galFiltered Style\')\n \'\'\n >>> set_node_border_color_mapping(\'Degree\', [\'1\', \'2\'], [\'#FFFF00\', \'#00FF00\'], \'d\', style_name=\'galFiltered Style\')\n \'\'\n >>> set_node_border_color_mapping(\'ColorCol\', mapping_type=\'p\', default_color=\'#654321\', style_name=\'galFiltered Style\')\n \'\'\n ' verify_hex_colors(colors) if (default_color is not None): style_defaults.set_node_border_color_default(default_color, style_name, base_url=base_url) return _update_visual_property('NODE_BORDER_PAINT', table_column, table_column_values=table_column_values, range_map=colors, mapping_type=mapping_type, style_name=style_name, network=network, base_url=base_url)<|docstring|>Map table column values to colors to set the node border color. Args: table_column (str): Name of Cytoscape table column to map values from table_column_values (list): List of values from Cytoscape table to be used in mapping colors (list): list of hex colors mapping_type (str): continuous, discrete or passthrough (c,d,p); default is continuous default_color (str): Hex color to set as default style_name (str): name for style network (SUID or str or None): Name or SUID of a network or view. Default is the "current" network active in Cytoscape. base_url (str): Ignore unless you need to specify a custom domain, port or version to connect to the CyREST API. Default is http://localhost:1234 and the latest version of the CyREST API supported by this version of py4cytoscape. Returns: str: '' Raises: CyError: if invalid color, table column doesn't exist, table column values doesn't match values list, or invalid style name, network or mapping type requests.exceptions.RequestException: if can't connect to Cytoscape or Cytoscape returns an error Examples: >>> set_node_border_color_mapping('AverageShortestPathLength', [1.0, 16.36], ['#FBE723', '#440256'], style_name='galFiltered Style') '' >>> set_node_border_color_mapping('Degree', ['1', '2'], ['#FFFF00', '#00FF00'], 'd', style_name='galFiltered Style') '' >>> set_node_border_color_mapping('ColorCol', mapping_type='p', default_color='#654321', style_name='galFiltered Style') ''<|endoftext|>
c4f2556ebac92de2512c617a8a224b515fbb93bd6d07306265c925cbe715455b
@cy_log def set_node_border_opacity_mapping(table_column, table_column_values=None, opacities=None, mapping_type='c', default_opacity=None, style_name='default', network=None, base_url=DEFAULT_BASE_URL): 'Set opacity for node border only.\n\n Args:\n table_column (str): Name of Cytoscape table column to map values from\n table_column_values (list): List of values from Cytoscape table to be used in mapping\n opacities (list): int values between 0 and 255; 0 is invisible\n mapping_type (str): continuous, discrete or passthrough (c,d,p); default is continuous\n default_opacity (int): Opacity value to set as default for all unmapped values\n style_name (str): name for style\n network (SUID or str or None): Name or SUID of a network or view. Default is the\n "current" network active in Cytoscape.\n base_url (str): Ignore unless you need to specify a custom domain,\n port or version to connect to the CyREST API. Default is http://localhost:1234\n and the latest version of the CyREST API supported by this version of py4cytoscape.\n\n Returns:\n str: \'\'\n\n Raises:\n CyError: if table column doesn\'t exist, table column values doesn\'t match values list, or invalid style name, network or mapping type, or if invalid opacity\n requests.exceptions.RequestException: if can\'t connect to Cytoscape or Cytoscape returns an error\n\n Examples:\n >>> set_node_border_opacity_mapping(\'AverageShortestPathLength\', table_column_values=[1.0, 16.36], opacities=[50, 100], style_name=\'galFiltered Style\')\n \'\'\n >>> set_node_border_opacity_mapping(\'Degree\', table_column_values=[\'1\', \'2\'], opacities=[50, 100], mapping_type=\'d\', style_name=\'galFiltered Style\')\n \'\'\n >>> set_node_border_opacity_mapping(\'PassthruCol\', mapping_type=\'p\', default_opacity=225, style_name=\'galFiltered Style\')\n \'\'\n ' verify_opacities(opacities) if (default_opacity is not None): verify_opacities(default_opacity) style_defaults.set_visual_property_default({'visualProperty': 'NODE_BORDER_TRANSPARENCY', 'value': str(default_opacity)}, style_name=style_name, base_url=base_url) return _update_visual_property('NODE_BORDER_TRANSPARENCY', table_column, table_column_values=table_column_values, range_map=opacities, mapping_type=mapping_type, style_name=style_name, network=network, base_url=base_url)
Set opacity for node border only. Args: table_column (str): Name of Cytoscape table column to map values from table_column_values (list): List of values from Cytoscape table to be used in mapping opacities (list): int values between 0 and 255; 0 is invisible mapping_type (str): continuous, discrete or passthrough (c,d,p); default is continuous default_opacity (int): Opacity value to set as default for all unmapped values style_name (str): name for style network (SUID or str or None): Name or SUID of a network or view. Default is the "current" network active in Cytoscape. base_url (str): Ignore unless you need to specify a custom domain, port or version to connect to the CyREST API. Default is http://localhost:1234 and the latest version of the CyREST API supported by this version of py4cytoscape. Returns: str: '' Raises: CyError: if table column doesn't exist, table column values doesn't match values list, or invalid style name, network or mapping type, or if invalid opacity requests.exceptions.RequestException: if can't connect to Cytoscape or Cytoscape returns an error Examples: >>> set_node_border_opacity_mapping('AverageShortestPathLength', table_column_values=[1.0, 16.36], opacities=[50, 100], style_name='galFiltered Style') '' >>> set_node_border_opacity_mapping('Degree', table_column_values=['1', '2'], opacities=[50, 100], mapping_type='d', style_name='galFiltered Style') '' >>> set_node_border_opacity_mapping('PassthruCol', mapping_type='p', default_opacity=225, style_name='galFiltered Style') ''
py4cytoscape/style_mappings.py
set_node_border_opacity_mapping
tyasird/py4cytoscape
0
python
@cy_log def set_node_border_opacity_mapping(table_column, table_column_values=None, opacities=None, mapping_type='c', default_opacity=None, style_name='default', network=None, base_url=DEFAULT_BASE_URL): 'Set opacity for node border only.\n\n Args:\n table_column (str): Name of Cytoscape table column to map values from\n table_column_values (list): List of values from Cytoscape table to be used in mapping\n opacities (list): int values between 0 and 255; 0 is invisible\n mapping_type (str): continuous, discrete or passthrough (c,d,p); default is continuous\n default_opacity (int): Opacity value to set as default for all unmapped values\n style_name (str): name for style\n network (SUID or str or None): Name or SUID of a network or view. Default is the\n "current" network active in Cytoscape.\n base_url (str): Ignore unless you need to specify a custom domain,\n port or version to connect to the CyREST API. Default is http://localhost:1234\n and the latest version of the CyREST API supported by this version of py4cytoscape.\n\n Returns:\n str: \'\'\n\n Raises:\n CyError: if table column doesn\'t exist, table column values doesn\'t match values list, or invalid style name, network or mapping type, or if invalid opacity\n requests.exceptions.RequestException: if can\'t connect to Cytoscape or Cytoscape returns an error\n\n Examples:\n >>> set_node_border_opacity_mapping(\'AverageShortestPathLength\', table_column_values=[1.0, 16.36], opacities=[50, 100], style_name=\'galFiltered Style\')\n \'\'\n >>> set_node_border_opacity_mapping(\'Degree\', table_column_values=[\'1\', \'2\'], opacities=[50, 100], mapping_type=\'d\', style_name=\'galFiltered Style\')\n \'\'\n >>> set_node_border_opacity_mapping(\'PassthruCol\', mapping_type=\'p\', default_opacity=225, style_name=\'galFiltered Style\')\n \'\'\n ' verify_opacities(opacities) if (default_opacity is not None): verify_opacities(default_opacity) style_defaults.set_visual_property_default({'visualProperty': 'NODE_BORDER_TRANSPARENCY', 'value': str(default_opacity)}, style_name=style_name, base_url=base_url) return _update_visual_property('NODE_BORDER_TRANSPARENCY', table_column, table_column_values=table_column_values, range_map=opacities, mapping_type=mapping_type, style_name=style_name, network=network, base_url=base_url)
@cy_log def set_node_border_opacity_mapping(table_column, table_column_values=None, opacities=None, mapping_type='c', default_opacity=None, style_name='default', network=None, base_url=DEFAULT_BASE_URL): 'Set opacity for node border only.\n\n Args:\n table_column (str): Name of Cytoscape table column to map values from\n table_column_values (list): List of values from Cytoscape table to be used in mapping\n opacities (list): int values between 0 and 255; 0 is invisible\n mapping_type (str): continuous, discrete or passthrough (c,d,p); default is continuous\n default_opacity (int): Opacity value to set as default for all unmapped values\n style_name (str): name for style\n network (SUID or str or None): Name or SUID of a network or view. Default is the\n "current" network active in Cytoscape.\n base_url (str): Ignore unless you need to specify a custom domain,\n port or version to connect to the CyREST API. Default is http://localhost:1234\n and the latest version of the CyREST API supported by this version of py4cytoscape.\n\n Returns:\n str: \'\'\n\n Raises:\n CyError: if table column doesn\'t exist, table column values doesn\'t match values list, or invalid style name, network or mapping type, or if invalid opacity\n requests.exceptions.RequestException: if can\'t connect to Cytoscape or Cytoscape returns an error\n\n Examples:\n >>> set_node_border_opacity_mapping(\'AverageShortestPathLength\', table_column_values=[1.0, 16.36], opacities=[50, 100], style_name=\'galFiltered Style\')\n \'\'\n >>> set_node_border_opacity_mapping(\'Degree\', table_column_values=[\'1\', \'2\'], opacities=[50, 100], mapping_type=\'d\', style_name=\'galFiltered Style\')\n \'\'\n >>> set_node_border_opacity_mapping(\'PassthruCol\', mapping_type=\'p\', default_opacity=225, style_name=\'galFiltered Style\')\n \'\'\n ' verify_opacities(opacities) if (default_opacity is not None): verify_opacities(default_opacity) style_defaults.set_visual_property_default({'visualProperty': 'NODE_BORDER_TRANSPARENCY', 'value': str(default_opacity)}, style_name=style_name, base_url=base_url) return _update_visual_property('NODE_BORDER_TRANSPARENCY', table_column, table_column_values=table_column_values, range_map=opacities, mapping_type=mapping_type, style_name=style_name, network=network, base_url=base_url)<|docstring|>Set opacity for node border only. Args: table_column (str): Name of Cytoscape table column to map values from table_column_values (list): List of values from Cytoscape table to be used in mapping opacities (list): int values between 0 and 255; 0 is invisible mapping_type (str): continuous, discrete or passthrough (c,d,p); default is continuous default_opacity (int): Opacity value to set as default for all unmapped values style_name (str): name for style network (SUID or str or None): Name or SUID of a network or view. Default is the "current" network active in Cytoscape. base_url (str): Ignore unless you need to specify a custom domain, port or version to connect to the CyREST API. Default is http://localhost:1234 and the latest version of the CyREST API supported by this version of py4cytoscape. Returns: str: '' Raises: CyError: if table column doesn't exist, table column values doesn't match values list, or invalid style name, network or mapping type, or if invalid opacity requests.exceptions.RequestException: if can't connect to Cytoscape or Cytoscape returns an error Examples: >>> set_node_border_opacity_mapping('AverageShortestPathLength', table_column_values=[1.0, 16.36], opacities=[50, 100], style_name='galFiltered Style') '' >>> set_node_border_opacity_mapping('Degree', table_column_values=['1', '2'], opacities=[50, 100], mapping_type='d', style_name='galFiltered Style') '' >>> set_node_border_opacity_mapping('PassthruCol', mapping_type='p', default_opacity=225, style_name='galFiltered Style') ''<|endoftext|>
b6208799042286bf694c3e803f52368441c63c6b49c80152144943c20fe12843
@cy_log def set_node_border_width_mapping(table_column, table_column_values=None, widths=None, mapping_type='c', default_width=None, style_name='default', network=None, base_url=DEFAULT_BASE_URL): 'Map table column values to widths to set the node border width.\n\n Args:\n table_column (str): Name of Cytoscape table column to map values from\n table_column_values (list): List of values from Cytoscape table to be used in mapping\n widths (list): List of width values to map to ``table_column_values``\n mapping_type (str): continuous, discrete or passthrough (c,d,p); default is continuous\n default_width (int): Width value to set as default for all unmapped values\n style_name (str): name for style\n network (SUID or str or None): Name or SUID of a network or view. Default is the\n "current" network active in Cytoscape.\n base_url (str): Ignore unless you need to specify a custom domain,\n port or version to connect to the CyREST API. Default is http://localhost:1234\n and the latest version of the CyREST API supported by this version of py4cytoscape.\n\n Returns:\n str: \'\'\n\n Raises:\n CyError: if table column doesn\'t exist, table column values doesn\'t match values list, or invalid style name, network or mapping type, or if invalid width\n requests.exceptions.RequestException: if can\'t connect to Cytoscape or Cytoscape returns an error\n\n Examples:\n >>> set_node_border_width_mapping(\'AverageShortestPathLength\', table_column_values=[1.0, 16.36], widths=[5, 10], style_name=\'galFiltered Style\')\n \'\'\n >>> set_node_border_width_mapping(\'Degree\', table_column_values=[\'1\', \'2\'], widths=[5, 10], mapping_type=\'d\', style_name=\'galFiltered Style\')\n \'\'\n >>> set_node_border_width_mapping(\'PassthruCol\', mapping_type=\'p\', default_width=3, style_name=\'galFiltered Style\')\n \'\'\n ' verify_dimensions('width', widths) if (default_width is not None): style_defaults.set_node_border_width_default(default_width, style_name, base_url=base_url) return _update_visual_property('NODE_BORDER_WIDTH', table_column, table_column_values=table_column_values, range_map=widths, mapping_type=mapping_type, style_name=style_name, network=network, base_url=base_url)
Map table column values to widths to set the node border width. Args: table_column (str): Name of Cytoscape table column to map values from table_column_values (list): List of values from Cytoscape table to be used in mapping widths (list): List of width values to map to ``table_column_values`` mapping_type (str): continuous, discrete or passthrough (c,d,p); default is continuous default_width (int): Width value to set as default for all unmapped values style_name (str): name for style network (SUID or str or None): Name or SUID of a network or view. Default is the "current" network active in Cytoscape. base_url (str): Ignore unless you need to specify a custom domain, port or version to connect to the CyREST API. Default is http://localhost:1234 and the latest version of the CyREST API supported by this version of py4cytoscape. Returns: str: '' Raises: CyError: if table column doesn't exist, table column values doesn't match values list, or invalid style name, network or mapping type, or if invalid width requests.exceptions.RequestException: if can't connect to Cytoscape or Cytoscape returns an error Examples: >>> set_node_border_width_mapping('AverageShortestPathLength', table_column_values=[1.0, 16.36], widths=[5, 10], style_name='galFiltered Style') '' >>> set_node_border_width_mapping('Degree', table_column_values=['1', '2'], widths=[5, 10], mapping_type='d', style_name='galFiltered Style') '' >>> set_node_border_width_mapping('PassthruCol', mapping_type='p', default_width=3, style_name='galFiltered Style') ''
py4cytoscape/style_mappings.py
set_node_border_width_mapping
tyasird/py4cytoscape
0
python
@cy_log def set_node_border_width_mapping(table_column, table_column_values=None, widths=None, mapping_type='c', default_width=None, style_name='default', network=None, base_url=DEFAULT_BASE_URL): 'Map table column values to widths to set the node border width.\n\n Args:\n table_column (str): Name of Cytoscape table column to map values from\n table_column_values (list): List of values from Cytoscape table to be used in mapping\n widths (list): List of width values to map to ``table_column_values``\n mapping_type (str): continuous, discrete or passthrough (c,d,p); default is continuous\n default_width (int): Width value to set as default for all unmapped values\n style_name (str): name for style\n network (SUID or str or None): Name or SUID of a network or view. Default is the\n "current" network active in Cytoscape.\n base_url (str): Ignore unless you need to specify a custom domain,\n port or version to connect to the CyREST API. Default is http://localhost:1234\n and the latest version of the CyREST API supported by this version of py4cytoscape.\n\n Returns:\n str: \'\'\n\n Raises:\n CyError: if table column doesn\'t exist, table column values doesn\'t match values list, or invalid style name, network or mapping type, or if invalid width\n requests.exceptions.RequestException: if can\'t connect to Cytoscape or Cytoscape returns an error\n\n Examples:\n >>> set_node_border_width_mapping(\'AverageShortestPathLength\', table_column_values=[1.0, 16.36], widths=[5, 10], style_name=\'galFiltered Style\')\n \'\'\n >>> set_node_border_width_mapping(\'Degree\', table_column_values=[\'1\', \'2\'], widths=[5, 10], mapping_type=\'d\', style_name=\'galFiltered Style\')\n \'\'\n >>> set_node_border_width_mapping(\'PassthruCol\', mapping_type=\'p\', default_width=3, style_name=\'galFiltered Style\')\n \'\'\n ' verify_dimensions('width', widths) if (default_width is not None): style_defaults.set_node_border_width_default(default_width, style_name, base_url=base_url) return _update_visual_property('NODE_BORDER_WIDTH', table_column, table_column_values=table_column_values, range_map=widths, mapping_type=mapping_type, style_name=style_name, network=network, base_url=base_url)
@cy_log def set_node_border_width_mapping(table_column, table_column_values=None, widths=None, mapping_type='c', default_width=None, style_name='default', network=None, base_url=DEFAULT_BASE_URL): 'Map table column values to widths to set the node border width.\n\n Args:\n table_column (str): Name of Cytoscape table column to map values from\n table_column_values (list): List of values from Cytoscape table to be used in mapping\n widths (list): List of width values to map to ``table_column_values``\n mapping_type (str): continuous, discrete or passthrough (c,d,p); default is continuous\n default_width (int): Width value to set as default for all unmapped values\n style_name (str): name for style\n network (SUID or str or None): Name or SUID of a network or view. Default is the\n "current" network active in Cytoscape.\n base_url (str): Ignore unless you need to specify a custom domain,\n port or version to connect to the CyREST API. Default is http://localhost:1234\n and the latest version of the CyREST API supported by this version of py4cytoscape.\n\n Returns:\n str: \'\'\n\n Raises:\n CyError: if table column doesn\'t exist, table column values doesn\'t match values list, or invalid style name, network or mapping type, or if invalid width\n requests.exceptions.RequestException: if can\'t connect to Cytoscape or Cytoscape returns an error\n\n Examples:\n >>> set_node_border_width_mapping(\'AverageShortestPathLength\', table_column_values=[1.0, 16.36], widths=[5, 10], style_name=\'galFiltered Style\')\n \'\'\n >>> set_node_border_width_mapping(\'Degree\', table_column_values=[\'1\', \'2\'], widths=[5, 10], mapping_type=\'d\', style_name=\'galFiltered Style\')\n \'\'\n >>> set_node_border_width_mapping(\'PassthruCol\', mapping_type=\'p\', default_width=3, style_name=\'galFiltered Style\')\n \'\'\n ' verify_dimensions('width', widths) if (default_width is not None): style_defaults.set_node_border_width_default(default_width, style_name, base_url=base_url) return _update_visual_property('NODE_BORDER_WIDTH', table_column, table_column_values=table_column_values, range_map=widths, mapping_type=mapping_type, style_name=style_name, network=network, base_url=base_url)<|docstring|>Map table column values to widths to set the node border width. Args: table_column (str): Name of Cytoscape table column to map values from table_column_values (list): List of values from Cytoscape table to be used in mapping widths (list): List of width values to map to ``table_column_values`` mapping_type (str): continuous, discrete or passthrough (c,d,p); default is continuous default_width (int): Width value to set as default for all unmapped values style_name (str): name for style network (SUID or str or None): Name or SUID of a network or view. Default is the "current" network active in Cytoscape. base_url (str): Ignore unless you need to specify a custom domain, port or version to connect to the CyREST API. Default is http://localhost:1234 and the latest version of the CyREST API supported by this version of py4cytoscape. Returns: str: '' Raises: CyError: if table column doesn't exist, table column values doesn't match values list, or invalid style name, network or mapping type, or if invalid width requests.exceptions.RequestException: if can't connect to Cytoscape or Cytoscape returns an error Examples: >>> set_node_border_width_mapping('AverageShortestPathLength', table_column_values=[1.0, 16.36], widths=[5, 10], style_name='galFiltered Style') '' >>> set_node_border_width_mapping('Degree', table_column_values=['1', '2'], widths=[5, 10], mapping_type='d', style_name='galFiltered Style') '' >>> set_node_border_width_mapping('PassthruCol', mapping_type='p', default_width=3, style_name='galFiltered Style') ''<|endoftext|>
4d7e802e4b3e697bfbfb919800a0c9b6f6f6dbb5c867980f73cffa8864d0a0c1
@cy_log def set_node_color_mapping(table_column, table_column_values=None, colors=None, mapping_type='c', default_color=None, style_name='default', network=None, base_url=DEFAULT_BASE_URL): 'Map table column values to colors to set the node fill color.\n\n Args:\n table_column (str): Name of Cytoscape table column to map values from\n table_column_values (list): List of values from Cytoscape table to be used in mapping\n colors (list): list of hex colors to map to ``table_column_values``\n mapping_type (str): continuous, discrete or passthrough (c,d,p); default is continuous\n default_color (str): Hex color to set as default\n style_name (str): name for style\n network (SUID or str or None): Name or SUID of a network or view. Default is the\n "current" network active in Cytoscape.\n base_url (str): Ignore unless you need to specify a custom domain,\n port or version to connect to the CyREST API. Default is http://localhost:1234\n and the latest version of the CyREST API supported by this version of py4cytoscape.\n\n Returns:\n str: \'\'\n\n Raises:\n CyError: if invalid color, table column doesn\'t exist, table column values doesn\'t match values list, or invalid style name, network or mapping type\n requests.exceptions.RequestException: if can\'t connect to Cytoscape or Cytoscape returns an error\n\n Examples:\n >>> set_node_color_mapping(\'AverageShortestPathLength\', [1.0, 16.36], [\'#FBE723\', \'#440256\'], style_name=\'galFiltered Style\')\n \'\'\n >>> set_node_color_mapping(\'Degree\', [\'1\', \'2\'], [\'#FFFF00\', \'#00FF00\'], \'d\', style_name=\'galFiltered Style\')\n \'\'\n >>> set_node_color_mapping(\'ColorCol\', mapping_type=\'p\', default_color=\'#654321\', style_name=\'galFiltered Style\')\n \'\'\n ' verify_hex_colors(colors) if (default_color is not None): style_defaults.set_node_color_default(default_color, style_name, base_url=base_url) return _update_visual_property('NODE_FILL_COLOR', table_column, table_column_values=table_column_values, range_map=colors, mapping_type=mapping_type, style_name=style_name, network=network, base_url=base_url)
Map table column values to colors to set the node fill color. Args: table_column (str): Name of Cytoscape table column to map values from table_column_values (list): List of values from Cytoscape table to be used in mapping colors (list): list of hex colors to map to ``table_column_values`` mapping_type (str): continuous, discrete or passthrough (c,d,p); default is continuous default_color (str): Hex color to set as default style_name (str): name for style network (SUID or str or None): Name or SUID of a network or view. Default is the "current" network active in Cytoscape. base_url (str): Ignore unless you need to specify a custom domain, port or version to connect to the CyREST API. Default is http://localhost:1234 and the latest version of the CyREST API supported by this version of py4cytoscape. Returns: str: '' Raises: CyError: if invalid color, table column doesn't exist, table column values doesn't match values list, or invalid style name, network or mapping type requests.exceptions.RequestException: if can't connect to Cytoscape or Cytoscape returns an error Examples: >>> set_node_color_mapping('AverageShortestPathLength', [1.0, 16.36], ['#FBE723', '#440256'], style_name='galFiltered Style') '' >>> set_node_color_mapping('Degree', ['1', '2'], ['#FFFF00', '#00FF00'], 'd', style_name='galFiltered Style') '' >>> set_node_color_mapping('ColorCol', mapping_type='p', default_color='#654321', style_name='galFiltered Style') ''
py4cytoscape/style_mappings.py
set_node_color_mapping
tyasird/py4cytoscape
0
python
@cy_log def set_node_color_mapping(table_column, table_column_values=None, colors=None, mapping_type='c', default_color=None, style_name='default', network=None, base_url=DEFAULT_BASE_URL): 'Map table column values to colors to set the node fill color.\n\n Args:\n table_column (str): Name of Cytoscape table column to map values from\n table_column_values (list): List of values from Cytoscape table to be used in mapping\n colors (list): list of hex colors to map to ``table_column_values``\n mapping_type (str): continuous, discrete or passthrough (c,d,p); default is continuous\n default_color (str): Hex color to set as default\n style_name (str): name for style\n network (SUID or str or None): Name or SUID of a network or view. Default is the\n "current" network active in Cytoscape.\n base_url (str): Ignore unless you need to specify a custom domain,\n port or version to connect to the CyREST API. Default is http://localhost:1234\n and the latest version of the CyREST API supported by this version of py4cytoscape.\n\n Returns:\n str: \'\'\n\n Raises:\n CyError: if invalid color, table column doesn\'t exist, table column values doesn\'t match values list, or invalid style name, network or mapping type\n requests.exceptions.RequestException: if can\'t connect to Cytoscape or Cytoscape returns an error\n\n Examples:\n >>> set_node_color_mapping(\'AverageShortestPathLength\', [1.0, 16.36], [\'#FBE723\', \'#440256\'], style_name=\'galFiltered Style\')\n \'\'\n >>> set_node_color_mapping(\'Degree\', [\'1\', \'2\'], [\'#FFFF00\', \'#00FF00\'], \'d\', style_name=\'galFiltered Style\')\n \'\'\n >>> set_node_color_mapping(\'ColorCol\', mapping_type=\'p\', default_color=\'#654321\', style_name=\'galFiltered Style\')\n \'\'\n ' verify_hex_colors(colors) if (default_color is not None): style_defaults.set_node_color_default(default_color, style_name, base_url=base_url) return _update_visual_property('NODE_FILL_COLOR', table_column, table_column_values=table_column_values, range_map=colors, mapping_type=mapping_type, style_name=style_name, network=network, base_url=base_url)
@cy_log def set_node_color_mapping(table_column, table_column_values=None, colors=None, mapping_type='c', default_color=None, style_name='default', network=None, base_url=DEFAULT_BASE_URL): 'Map table column values to colors to set the node fill color.\n\n Args:\n table_column (str): Name of Cytoscape table column to map values from\n table_column_values (list): List of values from Cytoscape table to be used in mapping\n colors (list): list of hex colors to map to ``table_column_values``\n mapping_type (str): continuous, discrete or passthrough (c,d,p); default is continuous\n default_color (str): Hex color to set as default\n style_name (str): name for style\n network (SUID or str or None): Name or SUID of a network or view. Default is the\n "current" network active in Cytoscape.\n base_url (str): Ignore unless you need to specify a custom domain,\n port or version to connect to the CyREST API. Default is http://localhost:1234\n and the latest version of the CyREST API supported by this version of py4cytoscape.\n\n Returns:\n str: \'\'\n\n Raises:\n CyError: if invalid color, table column doesn\'t exist, table column values doesn\'t match values list, or invalid style name, network or mapping type\n requests.exceptions.RequestException: if can\'t connect to Cytoscape or Cytoscape returns an error\n\n Examples:\n >>> set_node_color_mapping(\'AverageShortestPathLength\', [1.0, 16.36], [\'#FBE723\', \'#440256\'], style_name=\'galFiltered Style\')\n \'\'\n >>> set_node_color_mapping(\'Degree\', [\'1\', \'2\'], [\'#FFFF00\', \'#00FF00\'], \'d\', style_name=\'galFiltered Style\')\n \'\'\n >>> set_node_color_mapping(\'ColorCol\', mapping_type=\'p\', default_color=\'#654321\', style_name=\'galFiltered Style\')\n \'\'\n ' verify_hex_colors(colors) if (default_color is not None): style_defaults.set_node_color_default(default_color, style_name, base_url=base_url) return _update_visual_property('NODE_FILL_COLOR', table_column, table_column_values=table_column_values, range_map=colors, mapping_type=mapping_type, style_name=style_name, network=network, base_url=base_url)<|docstring|>Map table column values to colors to set the node fill color. Args: table_column (str): Name of Cytoscape table column to map values from table_column_values (list): List of values from Cytoscape table to be used in mapping colors (list): list of hex colors to map to ``table_column_values`` mapping_type (str): continuous, discrete or passthrough (c,d,p); default is continuous default_color (str): Hex color to set as default style_name (str): name for style network (SUID or str or None): Name or SUID of a network or view. Default is the "current" network active in Cytoscape. base_url (str): Ignore unless you need to specify a custom domain, port or version to connect to the CyREST API. Default is http://localhost:1234 and the latest version of the CyREST API supported by this version of py4cytoscape. Returns: str: '' Raises: CyError: if invalid color, table column doesn't exist, table column values doesn't match values list, or invalid style name, network or mapping type requests.exceptions.RequestException: if can't connect to Cytoscape or Cytoscape returns an error Examples: >>> set_node_color_mapping('AverageShortestPathLength', [1.0, 16.36], ['#FBE723', '#440256'], style_name='galFiltered Style') '' >>> set_node_color_mapping('Degree', ['1', '2'], ['#FFFF00', '#00FF00'], 'd', style_name='galFiltered Style') '' >>> set_node_color_mapping('ColorCol', mapping_type='p', default_color='#654321', style_name='galFiltered Style') ''<|endoftext|>