INSTRUCTION stringlengths 1 8.43k | RESPONSE stringlengths 75 104k |
|---|---|
Return list of unicodes for <scanning - codepoints > | def get_unicodes(codepoint):
""" Return list of unicodes for <scanning-codepoints> """
result = re.sub('\s', '', codepoint.text)
return Extension.convert_to_list_of_unicodes(result) |
* get request token if OAuth1 * Get user authorization * Get access token | def handler(self,):
"""* get request token if OAuth1
* Get user authorization
* Get access token
"""
if self.oauth_version == 'oauth1':
request_token, request_token_secret = self.oauth.get_request_token(params={'oauth_callback': self.callback_uri})
... |
Generates header for oauth2 | def generate_oauth2_headers(self):
"""Generates header for oauth2
"""
encoded_credentials = base64.b64encode(('{0}:{1}'.format(self.consumer_key,self.consumer_secret)).encode('utf-8'))
headers={
'Authorization':'Basic {0}'.format(encoded_credentials.decode('utf-8')),
... |
Parse oauth2 access | def oauth2_access_parser(self, raw_access):
"""Parse oauth2 access
"""
parsed_access = json.loads(raw_access.content.decode('utf-8'))
self.access_token = parsed_access['access_token']
self.token_type = parsed_access['token_type']
self.refresh_token = parsed_access['refres... |
Refresh access token | def refresh_access_token(self,):
"""Refresh access token
"""
logger.debug("REFRESHING TOKEN")
self.token_time = time.time()
credentials = {
'token_time': self.token_time
}
if self.oauth_version == 'oauth1':
self.access_token, self.access_t... |
Check the validity of the token: 3600s | def token_is_valid(self,):
"""Check the validity of the token :3600s
"""
elapsed_time = time.time() - self.token_time
logger.debug("ELAPSED TIME : {0}".format(elapsed_time))
if elapsed_time > 3540: # 1 minute before it expires
logger.debug("TOKEN HAS EXPIRED")
... |
Calls right function according to file extension | def get_data(filename):
"""Calls right function according to file extension
"""
name, ext = get_file_extension(filename)
func = json_get_data if ext == '.json' else yaml_get_data
return func(filename) |
Call right func to save data according to file extension | def write_data(data, filename):
"""Call right func to save data according to file extension
"""
name, ext = get_file_extension(filename)
func = json_write_data if ext == '.json' else yaml_write_data
return func(data, filename) |
Write json data into a file | def json_write_data(json_data, filename):
"""Write json data into a file
"""
with open(filename, 'w') as fp:
json.dump(json_data, fp, indent=4, sort_keys=True, ensure_ascii=False)
return True
return False |
Get data from json file | def json_get_data(filename):
"""Get data from json file
"""
with open(filename) as fp:
json_data = json.load(fp)
return json_data
return False |
Get data from. yml file | def yaml_get_data(filename):
"""Get data from .yml file
"""
with open(filename, 'rb') as fd:
yaml_data = yaml.load(fd)
return yaml_data
return False |
Write data into a. yml file | def yaml_write_data(yaml_data, filename):
"""Write data into a .yml file
"""
with open(filename, 'w') as fd:
yaml.dump(yaml_data, fd, default_flow_style=False)
return True
return False |
If scale_by_median find: attr: median_ ; otherwise do nothing. | def fit(self, X, y=None):
'''
If scale_by_median, find :attr:`median_`; otherwise, do nothing.
Parameters
----------
X : array
The raw pairwise distances.
'''
X = check_array(X)
if self.scale_by_median:
self.median_ = np.median(X[... |
Turns distances into RBF values. | def transform(self, X):
'''
Turns distances into RBF values.
Parameters
----------
X : array
The raw pairwise distances.
Returns
-------
X_rbf : array of same shape as X
The distances in X passed through the RBF kernel.
''... |
Learn the linear transformation to clipped eigenvalues. | def fit(self, X, y=None):
'''
Learn the linear transformation to clipped eigenvalues.
Note that if min_eig isn't zero and any of the original eigenvalues
were exactly zero, this will leave those eigenvalues as zero.
Parameters
----------
X : array, shape [n, n]
... |
Learn the linear transformation to flipped eigenvalues. | def fit(self, X, y=None):
'''
Learn the linear transformation to flipped eigenvalues.
Parameters
----------
X : array, shape [n, n]
The *symmetric* input similarities. If X is asymmetric, it will be
treated as if it were symmetric based on its lower-trian... |
Transforms X according to the linear transformation corresponding to flipping the input eigenvalues. | def transform(self, X):
'''
Transforms X according to the linear transformation corresponding to
flipping the input eigenvalues.
Parameters
----------
X : array, shape [n_test, n]
The test similarities to training points.
Returns
-------
... |
Flips the negative eigenvalues of X. | def fit_transform(self, X, y=None):
'''
Flips the negative eigenvalues of X.
Parameters
----------
X : array, shape [n, n]
The *symmetric* input similarities. If X is asymmetric, it will be
treated as if it were symmetric based on its lower-triangular par... |
Learn the transformation to shifted eigenvalues. Only depends on the input dimension. | def fit(self, X, y=None):
'''
Learn the transformation to shifted eigenvalues. Only depends
on the input dimension.
Parameters
----------
X : array, shape [n, n]
The *symmetric* input similarities.
'''
n = X.shape[0]
if X.shape != (n, ... |
Transforms X according to the linear transformation corresponding to shifting the input eigenvalues to all be at least self. min_eig. | def transform(self, X):
'''
Transforms X according to the linear transformation corresponding to
shifting the input eigenvalues to all be at least ``self.min_eig``.
Parameters
----------
X : array, shape [n_test, n]
The test similarities to training points.
... |
Picks the elements of the basis to use for the given data. | def fit(self, X, y=None):
'''
Picks the elements of the basis to use for the given data.
Only depends on the dimension of X. If it's more convenient, you can
pass a single integer for X, which is the dimension to use.
Parameters
----------
X : an integer, a :cla... |
Transform a list of bag features into its projection series representation. | def transform(self, X):
'''
Transform a list of bag features into its projection series
representation.
Parameters
----------
X : :class:`skl_groups.features.Features` or list of bag feature arrays
New data to transform. The data should all lie in [0, 1];
... |
Get distribution version. | def get_version(self):
"""
Get distribution version.
This method is enhanced compared to original distutils implementation.
If the version string is set to a special value then instead of using
the actual value the real version is obtained by querying versiontools.
If ... |
Get a live version string using versiontools | def __get_live_version(self):
"""
Get a live version string using versiontools
"""
try:
import versiontools
except ImportError:
return None
else:
return str(versiontools.Version.from_expression(self.name)) |
Fit the transformer on the stacked points. | def fit(self, X, y=None, **params):
'''
Fit the transformer on the stacked points.
Parameters
----------
X : :class:`Features` or list of arrays of shape ``[n_samples[i], n_features]``
Training set. If a Features object, it will be stacked.
any other keyword... |
Transform the stacked points. | def transform(self, X, **params):
'''
Transform the stacked points.
Parameters
----------
X : :class:`Features` or list of bag feature arrays
New data to transform.
any other keyword argument :
Passed on as keyword arguments to the transformer's ... |
Fit and transform the stacked points. | def fit_transform(self, X, y=None, **params):
'''
Fit and transform the stacked points.
Parameters
----------
X : :class:`Features` or list of bag feature arrays
Data to train on and transform.
any other keyword argument :
Passed on as keyword ar... |
Transform data back to its original space i. e. return an input X_original whose transform would ( maybe approximately ) be X. | def inverse_transform(self, X, **params):
'''
Transform data back to its original space, i.e., return an input
X_original whose transform would (maybe approximately) be X.
Parameters
----------
X : :class:`Features` or list of bag feature arrays
Data to train... |
Compute the minimum and maximum to be used for later scaling. | def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features a... |
Scaling features of X according to feature_range. | def transform(self, X):
"""Scaling features of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
X = check_array(X, copy=self.copy)
X *= self.scale_
X... |
Undo the scaling of X according to feature_range. | def inverse_transform(self, X):
"""Undo the scaling of X according to feature_range.
Note that if truncate is true, any truncated points will not
be restored exactly.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that wil... |
Choose the codewords based on a training set. | def fit(self, X, y=None):
'''
Choose the codewords based on a training set.
Parameters
----------
X : :class:`skl_groups.features.Features` or list of arrays of shape ``[n_samples[i], n_features]``
Training set. If a Features object, it will be stacked.
'''
... |
Transform a list of bag features into its bag - of - words representation. | def transform(self, X):
'''
Transform a list of bag features into its bag-of-words representation.
Parameters
----------
X : :class:`skl_groups.features.Features` or list of bag feature arrays
New data to transform.
Returns
-------
X_new : in... |
Compute clustering and transform a list of bag features into its bag - of - words representation. Like calling fit ( X ) and then transform ( X ) but more efficient. | def fit_transform(self, X):
'''
Compute clustering and transform a list of bag features into its
bag-of-words representation. Like calling fit(X) and then transform(X),
but more efficient.
Parameters
----------
X : :class:`skl_groups.features.Features` or list of... |
Checks whether the array is either integral or boolean. | def is_categorical_type(ary):
"Checks whether the array is either integral or boolean."
ary = np.asanyarray(ary)
return is_integer_type(ary) or ary.dtype.kind == 'b' |
Returns argument as an integer array converting floats if convertable. Raises ValueError if it s a float array with nonintegral values. | def as_integer_type(ary):
'''
Returns argument as an integer array, converting floats if convertable.
Raises ValueError if it's a float array with nonintegral values.
'''
ary = np.asanyarray(ary)
if is_integer_type(ary):
return ary
rounded = np.rint(ary)
if np.any(rounded != ary)... |
Sets up a: class: ProgressBarHandler to handle progess logs for a given module. | def show_progress(name, **kwargs):
'''
Sets up a :class:`ProgressBarHandler` to handle progess logs for
a given module.
Parameters
----------
name : string
The module name of the progress logger to use. For example,
:class:`skl_groups.divergences.KNNDivergenceEstimator`
... |
Signal the start of the process. | def start(self, total):
'''
Signal the start of the process.
Parameters
----------
total : int
The total number of steps in the process, or None if unknown.
'''
self.logger.info(json.dumps(['START', self.name, total])) |
Builds FLANN indices for each bag. | def _build_indices(X, flann_args):
"Builds FLANN indices for each bag."
# TODO: should probably multithread this
logger.info("Building indices...")
indices = [None] * len(X)
for i, bag in enumerate(plog(X, name="index building")):
indices[i] = idx = FLANNIndex(**flann_args)
idx.build... |
Gets within - bag distances for each bag. | def _get_rhos(X, indices, Ks, max_K, save_all_Ks, min_dist):
"Gets within-bag distances for each bag."
logger.info("Getting within-bag distances...")
if max_K >= X.n_pts.min():
msg = "asked for K = {}, but there's a bag with only {} points"
raise ValueError(msg.format(max_K, X.n_pts.min()))... |
r Estimates the linear inner product \ int p q between two distributions based on kNN distances. | def linear(Ks, dim, num_q, rhos, nus):
r'''
Estimates the linear inner product \int p q between two distributions,
based on kNN distances.
'''
return _get_linear(Ks, dim)(num_q, rhos, nus) |
r Estimate the alpha divergence between distributions: \ int p^ \ alpha q^ ( 1 - \ alpha ) based on kNN distances. | def alpha_div(alphas, Ks, dim, num_q, rhos, nus):
r'''
Estimate the alpha divergence between distributions:
\int p^\alpha q^(1-\alpha)
based on kNN distances.
Used in Renyi, Hellinger, Bhattacharyya, Tsallis divergences.
Enforces that estimates are >= 0.
Returns divergence estimates w... |
r Estimates 1/ 2 mean_X ( d * log radius of largest ball in X + Y around X_i with no more than M/ ( n + m - 1 ) weight where X points have weight 1/ ( 2 n - 1 ) and Y points have weight n/ ( m ( 2 n - 1 )) - digamma ( # of neighbors in that ball )) | def jensen_shannon_core(Ks, dim, num_q, rhos, nus):
r'''
Estimates
1/2 mean_X( d * log radius of largest ball in X+Y around X_i
with no more than M/(n+m-1) weight
where X points have weight 1 / (2 n - 1)
... |
r Estimate the Bhattacharyya coefficient between distributions based on kNN distances: \ int \ sqrt { p q } | def bhattacharyya(Ks, dim, required, clamp=True, to_self=False):
r'''
Estimate the Bhattacharyya coefficient between distributions, based on kNN
distances: \int \sqrt{p q}
If clamp (the default), enforces 0 <= BC <= 1.
Returns an array of shape (num_Ks,).
'''
est = required
if clamp:
... |
r Estimate the Hellinger distance between distributions based on kNN distances: \ sqrt { 1 - \ int \ sqrt { p q }} | def hellinger(Ks, dim, required, clamp=True, to_self=False):
r'''
Estimate the Hellinger distance between distributions, based on kNN
distances: \sqrt{1 - \int \sqrt{p q}}
Always enforces 0 <= H, to be able to sqrt; if clamp, also enforces
H <= 1.
Returns a vector: one element for each K.
... |
r Estimate the Renyi - alpha divergence between distributions based on kNN distances: 1/ ( \ alpha - 1 ) \ log \ int p^alpha q^ ( 1 - \ alpha ) | def renyi(alphas, Ks, dim, required, min_val=np.spacing(1),
clamp=True, to_self=False):
r'''
Estimate the Renyi-alpha divergence between distributions, based on kNN
distances: 1/(\alpha-1) \log \int p^alpha q^(1-\alpha)
If the inner integral is less than min_val (default ``np.spacing(1)``),
... |
r Estimate the Tsallis - alpha divergence between distributions based on kNN distances: ( \ int p^alpha q^ ( 1 - \ alpha ) - 1 )/ ( \ alpha - 1 ) | def tsallis(alphas, Ks, dim, required, clamp=True, to_self=False):
r'''
Estimate the Tsallis-alpha divergence between distributions, based on kNN
distances: (\int p^alpha q^(1-\alpha) - 1) / (\alpha - 1)
If clamp (the default), enforces the estimate is nonnegative.
Returns an array of shape (num_... |
r Estimates the L2 distance between distributions via \ int ( p - q ) ^2 = \ int p^2 - \ int p q - \ int q p + \ int q^2. | def l2(Ks, dim, X_rhos, Y_rhos, required, clamp=True, to_self=False):
r'''
Estimates the L2 distance between distributions, via
\int (p - q)^2 = \int p^2 - \int p q - \int q p + \int q^2.
\int pq and \int qp are estimated with the linear function (in both
directions), while \int p^2 and \int q^... |
r Estimates \ int p^2 based on kNN distances. | def quadratic(Ks, dim, rhos, required=None):
r'''
Estimates \int p^2 based on kNN distances.
In here because it's used in the l2 distance, above.
Returns array of shape (num_Ks,).
'''
# Estimated with alpha=1, beta=0:
# B_{k,d,1,0} is the same as B_{k,d,0,1} in linear()
# and the ful... |
r Estimate the difference between the Shannon entropy of an equally - weighted mixture between X and Y and the mixture of the Shannon entropies: | def jensen_shannon(Ks, dim, X_rhos, Y_rhos, required,
clamp=True, to_self=False):
r'''
Estimate the difference between the Shannon entropy of an equally-weighted
mixture between X and Y and the mixture of the Shannon entropies:
JS(X, Y) = H[ (X + Y) / 2 ] - (H[X] + H[Y]) / 2
... |
Topologically sort a DAG represented by a dict of child = > set of parents. The dependency dict is destroyed during operation. | def topological_sort(deps):
'''
Topologically sort a DAG, represented by a dict of child => set of parents.
The dependency dict is destroyed during operation.
Uses the Kahn algorithm: http://en.wikipedia.org/wiki/Topological_sorting
Not a particularly good implementation, but we're just running it ... |
Set up the different functions we need to call. | def _parse_specs(specs, Ks):
'''
Set up the different functions we need to call.
Returns:
- a dict mapping base estimator functions to _FuncInfo objects.
If the function needs_alpha, then the alphas attribute is an array
of alpha values and pos is a corresponding array of indice... |
Ks as an array and type - checked. | def _get_Ks(self):
"Ks as an array and type-checked."
Ks = as_integer_type(self.Ks)
if Ks.ndim != 1:
raise TypeError("Ks should be 1-dim, got shape {}".format(Ks.shape))
if Ks.min() < 1:
raise ValueError("Ks should be positive; got {}".format(Ks.min()))
re... |
The dictionary of arguments to give to FLANN. | def _flann_args(self, X=None):
"The dictionary of arguments to give to FLANN."
args = {'cores': self._n_jobs}
if self.flann_algorithm == 'auto':
if X is None or X.dim > 5:
args['algorithm'] = 'linear'
else:
args['algorithm'] = 'kdtree_singl... |
Sets up for divergence estimation from new data to X. Builds FLANN indices for each bag and maybe gets within - bag distances. | def fit(self, X, y=None, get_rhos=False):
'''
Sets up for divergence estimation "from" new data "to" X.
Builds FLANN indices for each bag, and maybe gets within-bag distances.
Parameters
----------
X : list of arrays or :class:`skl_groups.features.Features`
T... |
r Computes the divergences from X to: attr: features_. | def transform(self, X):
r'''
Computes the divergences from X to :attr:`features_`.
Parameters
----------
X : list of bag feature arrays or :class:`skl_groups.features.Features`
The bags to search "from".
Returns
-------
divs : array of shape ... |
Returns a version of X as a: class: Features object. | def as_features(X, stack=False, bare=False):
'''
Returns a version of X as a :class:`Features` object.
Parameters
----------
stack : boolean, default False
Make a stacked version of X. Note that if X is a features object,
this will stack it in-place, since that's usually what you wa... |
If unstacked convert to stacked. If stacked do nothing. | def make_stacked(self):
"If unstacked, convert to stacked. If stacked, do nothing."
if self.stacked:
return
self._boundaries = bounds = np.r_[0, np.cumsum(self.n_pts)]
self.stacked_features = stacked = np.vstack(self.features)
self.features = np.array(
[s... |
Copies the Feature object. Makes a copy of the features array. | def copy(self, stack=False, copy_meta=False, memo=None):
'''
Copies the Feature object. Makes a copy of the features array.
Parameters
----------
stack : boolean, optional, default False
Whether to stack the copy if this one is unstacked.
copy_meta : boolean... |
Make a Features object with no metadata ; points to the same features. | def bare(self):
"Make a Features object with no metadata; points to the same features."
if not self.meta:
return self
elif self.stacked:
return Features(self.stacked_features, self.n_pts, copy=False)
else:
return Features(self.features, copy=False) |
r Estimate the KL divergence between distributions: \ int p ( x ) \ log ( p ( x )/ q ( x )) using the kNN - based estimator ( 5 ) of Qing Wang Sanjeev R Kulkarni and Sergio Verdu ( 2009 ). Divergence Estimation for Multidimensional Densities Via k - Nearest - Neighbor Distances. IEEE Transactions on Information Theory.... | def kl(Ks, dim, num_q, rhos, nus, clamp=True):
r'''
Estimate the KL divergence between distributions:
\int p(x) \log (p(x) / q(x))
using the kNN-based estimator (5) of
Qing Wang, Sanjeev R Kulkarni, and Sergio Verdu (2009).
Divergence Estimation for Multidimensional Densities Via
... |
Specify the data to which kernel values should be computed. | def fit(self, X, y=None):
'''
Specify the data to which kernel values should be computed.
Parameters
----------
X : list of arrays or :class:`skl_groups.features.Features`
The bags to compute "to".
'''
self.features_ = as_features(X, stack=True, bare=... |
Compute kernels from X to: attr: features_. | def transform(self, X):
'''
Compute kernels from X to :attr:`features_`.
Parameters
----------
X : list of arrays or :class:`skl_groups.features.Features`
The bags to compute "from". Must have same dimension as
:attr:`features_`.
Returns
... |
Transform a list of bag features into a matrix of its mean features. | def transform(self, X):
'''
Transform a list of bag features into a matrix of its mean features.
Parameters
----------
X : :class:`skl_groups.features.Features` or list of bag feature arrays
Data to transform.
Returns
-------
X_new : array, s... |
Constructor from xml element * SHYPO * | def from_shypo(cls, xml, encoding='utf-8'):
"""Constructor from xml element *SHYPO*
:param xml.etree.ElementTree xml: the xml *SHYPO* element
:param string encoding: encoding of the xml
"""
score = float(xml.get('SCORE'))
words = [Word.from_whypo(w_xml, encoding) for w_... |
Constructor from xml element * WHYPO * | def from_whypo(cls, xml, encoding='utf-8'):
"""Constructor from xml element *WHYPO*
:param xml.etree.ElementTree xml: the xml *WHYPO* element
:param string encoding: encoding of the xml
"""
word = unicode(xml.get('WORD'), encoding)
confidence = float(xml.get('CM'))
... |
Start listening to the server | def run(self):
"""Start listening to the server"""
logger.info(u'Started listening')
while not self._stop:
xml = self._readxml()
# Exit on invalid XML
if xml is None:
break
# Raw xml only
if not self.modelize:
... |
Connect to the server | def connect(self):
"""Connect to the server
:raise ConnectionError: If socket cannot establish a connection
"""
try:
logger.info(u'Connecting %s:%d' % (self.host, self.port))
self.sock.connect((self.host, self.port))
except socket.error:
rais... |
Disconnect from the server | def disconnect(self):
"""Disconnect from the server"""
logger.info(u'Disconnecting')
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
self.state = DISCONNECTED |
Send a command to the server | def send(self, command, timeout=5):
"""Send a command to the server
:param string command: command to send
"""
logger.info(u'Sending %s' % command)
_, writable, __ = select.select([], [self.sock], [], timeout)
if not writable:
raise SendTimeoutError()
... |
Read a line from the server. Data is read from the socket until a character \ n is found | def _readline(self):
"""Read a line from the server. Data is read from the socket until a character ``\n`` is found
:return: the read line
:rtype: string
"""
line = ''
while 1:
readable, _, __ = select.select([self.sock], [], [], 0.5)
if self._st... |
Read a block from the server. Lines are read until a character. is found | def _readblock(self):
"""Read a block from the server. Lines are read until a character ``.`` is found
:return: the read block
:rtype: string
"""
block = ''
while not self._stop:
line = self._readline()
if line == '.':
break
... |
Read a block and return the result as XML | def _readxml(self):
"""Read a block and return the result as XML
:return: block as xml
:rtype: xml.etree.ElementTree
"""
block = re.sub(r'<(/?)s>', r'<\1s>', self._readblock())
try:
xml = XML(block)
except ParseError:
xml = None
... |
Analyse an OpenStreetMap changeset. | def cli(id):
"""Analyse an OpenStreetMap changeset."""
ch = Analyse(id)
ch.full_analysis()
click.echo(
'Created: %s. Modified: %s. Deleted: %s' % (ch.create, ch.modify, ch.delete)
)
if ch.is_suspect:
click.echo('The changeset {} is suspect! Reasons: {}'.format(
id... |
Get information about number of changesets blocks and mapping days of a user using both the OSM API and the Mapbox comments APIself. | def get_user_details(user_id):
"""Get information about number of changesets, blocks and mapping days of a
user, using both the OSM API and the Mapbox comments APIself.
"""
reasons = []
try:
url = OSM_USERS_API.format(user_id=requests.compat.quote(user_id))
user_request = requests.ge... |
Return a dictionary with id user user_id bounds date of creation and all the tags of the changeset. | def changeset_info(changeset):
"""Return a dictionary with id, user, user_id, bounds, date of creation
and all the tags of the changeset.
Args:
changeset: the XML string of the changeset.
"""
keys = [tag.attrib.get('k') for tag in changeset.getchildren()]
keys += ['id', 'user', 'uid', '... |
Get the changeset using the OSM API and return the content as a XML ElementTree. | def get_changeset(changeset):
"""Get the changeset using the OSM API and return the content as a XML
ElementTree.
Args:
changeset: the id of the changeset.
"""
url = 'https://www.openstreetmap.org/api/0.6/changeset/{}/download'.format(
changeset
)
return ET.fromstring(re... |
Get the metadata of a changeset using the OSM API and return it as a XML ElementTree. | def get_metadata(changeset):
"""Get the metadata of a changeset using the OSM API and return it as a XML
ElementTree.
Args:
changeset: the id of the changeset.
"""
url = 'https://www.openstreetmap.org/api/0.6/changeset/{}'.format(changeset)
return ET.fromstring(requests.get(url).content... |
Get the bounds of the changeset and return it as a Polygon object. If the changeset has not coordinates ( case of the changesets that deal only with relations ) it returns an empty Polygon. | def get_bounds(changeset):
"""Get the bounds of the changeset and return it as a Polygon object. If
the changeset has not coordinates (case of the changesets that deal only
with relations), it returns an empty Polygon.
Args:
changeset: the XML string of the changeset.
"""
try:
r... |
Check if a text has some of the suspect words ( or words that starts with one of the suspect words ). You can set some words to be excluded of the search so you can remove false positives like important be detected when you search by import. It will return True if the number of suspect words found is greater than the n... | def find_words(text, suspect_words, excluded_words=[]):
"""Check if a text has some of the suspect words (or words that starts with
one of the suspect words). You can set some words to be excluded of the
search, so you can remove false positives like 'important' be detected when
you search by 'import'. ... |
Download the replication changeset file or read it directly from the filesystem ( to test purposes ). | def read_file(self, changeset_file):
"""Download the replication changeset file or read it directly from the
filesystem (to test purposes).
"""
if isfile(changeset_file):
self.filename = changeset_file
else:
self.path = mkdtemp()
self.filename ... |
Read the first feature from the geojson and return it as a Polygon object. | def get_area(self, geojson):
"""Read the first feature from the geojson and return it as a Polygon
object.
"""
geojson = json.load(open(geojson, 'r'))
self.area = Polygon(geojson['features'][0]['geometry']['coordinates'][0]) |
Filter the changesets that intersects with the geojson geometry. | def filter(self):
"""Filter the changesets that intersects with the geojson geometry."""
self.content = [
ch
for ch in self.xml.getchildren()
if get_bounds(ch).intersects(self.area)
] |
Set the fields of this class with the metadata of the analysed changeset. | def set_fields(self, changeset):
"""Set the fields of this class with the metadata of the analysed
changeset.
"""
self.id = int(changeset.get('id'))
self.user = changeset.get('user')
self.uid = changeset.get('uid')
self.editor = changeset.get('created_by', None)
... |
Add suspicion reason and set the suspicious flag. | def label_suspicious(self, reason):
"""Add suspicion reason and set the suspicious flag."""
self.suspicion_reasons.append(reason)
self.is_suspect = True |
Execute the count and verify_words methods. | def full_analysis(self):
"""Execute the count and verify_words methods."""
self.count()
self.verify_words()
self.verify_user()
if self.review_requested == 'yes':
self.label_suspicious('Review requested') |
Verify if the changeset was made by a inexperienced mapper ( anyone with less than 5 edits ) or by a user that was blocked more than once. | def verify_user(self):
"""Verify if the changeset was made by a inexperienced mapper (anyone
with less than 5 edits) or by a user that was blocked more than once.
"""
user_reasons = get_user_details(self.uid)
[self.label_suspicious(reason) for reason in user_reasons] |
Verify the fields source imagery_used and comment of the changeset for some suspect words. | def verify_words(self):
"""Verify the fields source, imagery_used and comment of the changeset
for some suspect words.
"""
if self.comment:
if find_words(self.comment, self.suspect_words, self.excluded_words):
self.label_suspicious('suspect_word')
if ... |
Verify if the software used in the changeset is a powerfull_editor. | def verify_editor(self):
"""Verify if the software used in the changeset is a powerfull_editor.
"""
powerful_editors = [
'josm', 'level0', 'merkaartor', 'qgis', 'arcgis', 'upload.py',
'osmapi', 'Services_OpenStreetMap'
]
if self.editor is not None:
... |
Count the number of elements created modified and deleted by the changeset and analyses if it is a possible import mass modification or a mass deletion. | def count(self):
"""Count the number of elements created, modified and deleted by the
changeset and analyses if it is a possible import, mass modification or
a mass deletion.
"""
xml = get_changeset(self.id)
actions = [action.tag for action in xml.getchildren()]
s... |
Get a stream URI from a playlist URI uri. Unwraps nested playlists until something that s not a playlist is found or the timeout is reached. | def _unwrap_stream(uri, timeout, scanner, requests_session):
"""
Get a stream URI from a playlist URI, ``uri``.
Unwraps nested playlists until something that's not a playlist is found or
the ``timeout`` is reached.
"""
original_uri = uri
seen_uris = set()
deadline = time.time() + timeou... |
Start asynchronous HTTP Server on an individual process. | def serve(self, sock, request_handler, error_handler, debug=False,
request_timeout=60, ssl=None, request_max_size=None,
reuse_port=False, loop=None, protocol=HttpProtocol,
backlog=100, **kwargs):
"""Start asynchronous HTTP Server on an individual process.
:para... |
Grow this Pantheon by multiplying Gods. | def spawn(self, generations):
"""Grow this Pantheon by multiplying Gods."""
egg_donors = [god for god in self.gods.values() if god.chromosomes == 'XX']
sperm_donors = [god for god in self.gods.values() if god.chromosomes == 'XY']
for i in range(generations):
print("\nGENERAT... |
Get it on. | def breed(self, egg_donor, sperm_donor):
"""Get it on."""
offspring = []
try:
num_children = npchoice([1,2], 1, p=[0.8, 0.2])[0] # 20% chance of twins
for _ in range(num_children):
child = God(egg_donor, sperm_donor)
offspring.append(child)... |
Return words from <tokens > that are most closely related to <word >. | def get_matches(word, tokens, limit, offset=0):
"""Return words from <tokens> that are most closely related to <word>."""
return closest(tokens, word_vec(word), limit, offset) |
Compare vectors. Borrowed from A. Parish. | def cosine(vec1, vec2):
"""Compare vectors. Borrowed from A. Parish."""
if norm(vec1) > 0 and norm(vec2) > 0:
return dot(vec1, vec2) / (norm(vec1) * norm(vec2))
else:
return 0.0 |
Return the <limit > words from <tokens > whose vectors most closely resemble the search_vec. Skip the first <offset > results. | def closest(tokens, search_vec, limit, offset=0):
"""Return the <limit> words from <tokens> whose vectors most closely
resemble the search_vec. Skip the first <offset> results.
"""
return sorted(tokens,
key=lambda x: cosine(search_vec, word_vec(x)),
reverse=True)[offs... |
Generate a json file for each txt file in the/ data/ corpora directory. | def tokenize_texts():
"""Generate a json file for each txt file in the /data/corpora directory."""
text_files = [fname for fname in os.listdir(corpora_dir) \
if fname.split('.')[1] == 'txt']
for text_fname in text_files:
json_fname = text_fname.split('.')[0] + '.json'
if os.path.isf... |
Create a new directory named <dir_ >. Create a new file within it called sources. json. The input <sources > is a list of names of tokenized texts. Write <sources > into sources. json. | def make_tokens_dir(dir_, sources):
"""Create a new directory named <dir_>. Create a new file within it called
sources.json. The input <sources> is a list of names of tokenized texts.
Write <sources> into sources.json.
"""
os.mkdir(tokens_dir + dir_)
for source in sources:
if not os.path... |
Find sources. json in <dir_ >. It contains a list of tokenized texts. For each tokenized text listed in sources. json read its tokens filter them and add them to an aggregated list. Write the aggregated list to disk using a filename based on the <filters > given. | def make_tokens_list(dir_, filters):
"""Find sources.json in <dir_>. It contains a list of tokenized texts. For
each tokenized text listed in sources.json, read its tokens, filter them,
and add them to an aggregated list. Write the aggregated list to disk using
a filename based on the <filters> given.
... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.