_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q261900
Deposit._publish_edited
validation
def _publish_edited(self): """Publish the deposit after for editing.""" record_pid, record = self.fetch_published() if record.revision_id == self['_deposit']['pid']['revision_id']: data = dict(self.dumps()) else: data = self.merge_with_published() data['$schema'] = self.record_schema data['_deposit'] = self['_deposit'] record = record.__class__(data, model=record.model) return record
python
{ "resource": "" }
q261901
Deposit.publish
validation
def publish(self, pid=None, id_=None): """Publish a deposit. If it's the first time: * it calls the minter and set the following meta information inside the deposit: .. code-block:: python deposit['_deposit'] = { 'type': pid_type, 'value': pid_value, 'revision_id': 0, } * A dump of all information inside the deposit is done. * A snapshot of the files is done. Otherwise, published the new edited version. In this case, if in the mainwhile someone already published a new version, it'll try to merge the changes with the latest version. .. note:: no need for indexing as it calls `self.commit()`. Status required: ``'draft'``. :param pid: Force the new pid value. (Default: ``None``) :param id_: Force the new uuid value as deposit id. (Default: ``None``) :returns: Returns itself. """ pid = pid or self.pid if not pid.is_registered(): raise PIDInvalidAction() self['_deposit']['status'] = 'published' if self['_deposit'].get('pid') is None: # First publishing self._publish_new(id_=id_) else: # Update after edit record = self._publish_edited() record.commit() self.commit() return self
python
{ "resource": "" }
q261902
Deposit._prepare_edit
validation
def _prepare_edit(self, record): """Update selected keys. :param record: The record to prepare. """ data = record.dumps() # Keep current record revision for merging. data['_deposit']['pid']['revision_id'] = record.revision_id data['_deposit']['status'] = 'draft' data['$schema'] = self.build_deposit_schema(record) return data
python
{ "resource": "" }
q261903
Deposit.edit
validation
def edit(self, pid=None): """Edit deposit. #. The signal :data:`invenio_records.signals.before_record_update` is sent before the edit execution. #. The following meta information are saved inside the deposit: .. code-block:: python deposit['_deposit']['pid'] = record.revision_id deposit['_deposit']['status'] = 'draft' deposit['$schema'] = deposit_schema_from_record_schema #. The signal :data:`invenio_records.signals.after_record_update` is sent after the edit execution. #. The deposit index is updated. Status required: `published`. .. note:: the process fails if the pid has status :attr:`invenio_pidstore.models.PIDStatus.REGISTERED`. :param pid: Force a pid object. (Default: ``None``) :returns: A new Deposit object. """ pid = pid or self.pid with db.session.begin_nested(): before_record_update.send( current_app._get_current_object(), record=self) record_pid, record = self.fetch_published() assert PIDStatus.REGISTERED == record_pid.status assert record['_deposit'] == self['_deposit'] self.model.json = self._prepare_edit(record) flag_modified(self.model, 'json') db.session.merge(self.model) after_record_update.send( current_app._get_current_object(), record=self) return self.__class__(self.model.json, model=self.model)
python
{ "resource": "" }
q261904
Deposit.discard
validation
def discard(self, pid=None): """Discard deposit changes. #. The signal :data:`invenio_records.signals.before_record_update` is sent before the edit execution. #. It restores the last published version. #. The following meta information are saved inside the deposit: .. code-block:: python deposit['$schema'] = deposit_schema_from_record_schema #. The signal :data:`invenio_records.signals.after_record_update` is sent after the edit execution. #. The deposit index is updated. Status required: ``'draft'``. :param pid: Force a pid object. (Default: ``None``) :returns: A new Deposit object. """ pid = pid or self.pid with db.session.begin_nested(): before_record_update.send( current_app._get_current_object(), record=self) _, record = self.fetch_published() self.model.json = deepcopy(record.model.json) self.model.json['$schema'] = self.build_deposit_schema(record) flag_modified(self.model, 'json') db.session.merge(self.model) after_record_update.send( current_app._get_current_object(), record=self) return self.__class__(self.model.json, model=self.model)
python
{ "resource": "" }
q261905
Deposit.delete
validation
def delete(self, force=True, pid=None): """Delete deposit. Status required: ``'draft'``. :param force: Force deposit delete. (Default: ``True``) :param pid: Force pid object. (Default: ``None``) :returns: A new Deposit object. """ pid = pid or self.pid if self['_deposit'].get('pid'): raise PIDInvalidAction() if pid: pid.delete() return super(Deposit, self).delete(force=force)
python
{ "resource": "" }
q261906
Deposit.clear
validation
def clear(self, *args, **kwargs): """Clear only drafts. Status required: ``'draft'``. Meta information inside `_deposit` are preserved. """ super(Deposit, self).clear(*args, **kwargs)
python
{ "resource": "" }
q261907
Deposit.update
validation
def update(self, *args, **kwargs): """Update only drafts. Status required: ``'draft'``. Meta information inside `_deposit` are preserved. """ super(Deposit, self).update(*args, **kwargs)
python
{ "resource": "" }
q261908
Deposit.patch
validation
def patch(self, *args, **kwargs): """Patch only drafts. Status required: ``'draft'``. Meta information inside `_deposit` are preserved. """ return super(Deposit, self).patch(*args, **kwargs)
python
{ "resource": "" }
q261909
Deposit.files
validation
def files(self): """List of Files inside the deposit. Add validation on ``sort_by`` method: if, at the time of files access, the record is not a ``'draft'`` then a :exc:`invenio_pidstore.errors.PIDInvalidAction` is rised. """ files_ = super(Deposit, self).files if files_: sort_by_ = files_.sort_by def sort_by(*args, **kwargs): """Only in draft state.""" if 'draft' != self.status: raise PIDInvalidAction() return sort_by_(*args, **kwargs) files_.sort_by = sort_by return files_
python
{ "resource": "" }
q261910
rst2node
validation
def rst2node(doc_name, data): """Converts a reStructuredText into its node """ if not data: return parser = docutils.parsers.rst.Parser() document = docutils.utils.new_document('<%s>' % doc_name) document.settings = docutils.frontend.OptionParser().get_default_values() document.settings.tab_width = 4 document.settings.pep_references = False document.settings.rfc_references = False document.settings.env = Env() parser.parse(data, document) if len(document.children) == 1: return document.children[0] else: par = docutils.nodes.paragraph() for child in document.children: par += child return par
python
{ "resource": "" }
q261911
setup
validation
def setup(app): """Hook the directives when Sphinx ask for it.""" if 'http' not in app.domains: httpdomain.setup(app) app.add_directive('autopyramid', RouteDirective)
python
{ "resource": "" }
q261912
api._parse_response
validation
def _parse_response(self, response): """Parses the API response and raises appropriate errors if raise_errors was set to True """ if not self._raise_errors: return response is_4xx_error = str(response.status_code)[0] == '4' is_5xx_error = str(response.status_code)[0] == '5' content = response.content if response.status_code == 403: raise AuthenticationError(content) elif is_4xx_error: raise APIError(content) elif is_5xx_error: raise ServerError(content) return response
python
{ "resource": "" }
q261913
api.templates
validation
def templates(self, timeout=None): """ API call to get a list of templates """ return self._api_request( self.TEMPLATES_ENDPOINT, self.HTTP_GET, timeout=timeout )
python
{ "resource": "" }
q261914
api.get_template
validation
def get_template(self, template_id, version=None, timeout=None): """ API call to get a specific template """ if (version): return self._api_request( self.TEMPLATES_VERSION_ENDPOINT % (template_id, version), self.HTTP_GET, timeout=timeout ) else: return self._api_request( self.TEMPLATES_SPECIFIC_ENDPOINT % template_id, self.HTTP_GET, timeout=timeout )
python
{ "resource": "" }
q261915
api.create_template
validation
def create_template( self, name, subject, html, text='', timeout=None ): """ API call to create a template """ payload = { 'name': name, 'subject': subject, 'html': html, 'text': text } return self._api_request( self.TEMPLATES_ENDPOINT, self.HTTP_POST, payload=payload, timeout=timeout )
python
{ "resource": "" }
q261916
api.create_new_locale
validation
def create_new_locale( self, template_id, locale, version_name, subject, text='', html='', timeout=None ): """ API call to create a new locale and version of a template """ payload = { 'locale': locale, 'name': version_name, 'subject': subject } if html: payload['html'] = html if text: payload['text'] = text return self._api_request( self.TEMPLATES_LOCALES_ENDPOINT % template_id, self.HTTP_POST, payload=payload, timeout=timeout )
python
{ "resource": "" }
q261917
api.create_new_version
validation
def create_new_version( self, name, subject, text='', template_id=None, html=None, locale=None, timeout=None ): """ API call to create a new version of a template """ if(html): payload = { 'name': name, 'subject': subject, 'html': html, 'text': text } else: payload = { 'name': name, 'subject': subject, 'text': text } if locale: url = self.TEMPLATES_SPECIFIC_LOCALE_VERSIONS_ENDPOINT % ( template_id, locale ) else: url = self.TEMPLATES_NEW_VERSION_ENDPOINT % template_id return self._api_request( url, self.HTTP_POST, payload=payload, timeout=timeout )
python
{ "resource": "" }
q261918
api.update_template_version
validation
def update_template_version( self, name, subject, template_id, version_id, text='', html=None, timeout=None ): """ API call to update a template version """ if(html): payload = { 'name': name, 'subject': subject, 'html': html, 'text': text } else: payload = { 'name': name, 'subject': subject, 'text': text } return self._api_request( self.TEMPLATES_VERSION_ENDPOINT % (template_id, version_id), self.HTTP_PUT, payload=payload, timeout=timeout )
python
{ "resource": "" }
q261919
api.snippets
validation
def snippets(self, timeout=None): """ API call to get list of snippets """ return self._api_request( self.SNIPPETS_ENDPOINT, self.HTTP_GET, timeout=timeout )
python
{ "resource": "" }
q261920
api.get_snippet
validation
def get_snippet(self, snippet_id, timeout=None): """ API call to get a specific Snippet """ return self._api_request( self.SNIPPET_ENDPOINT % (snippet_id), self.HTTP_GET, timeout=timeout )
python
{ "resource": "" }
q261921
api.create_snippet
validation
def create_snippet(self, name, body, timeout=None): """ API call to create a Snippet """ payload = { 'name': name, 'body': body } return self._api_request( self.SNIPPETS_ENDPOINT, self.HTTP_POST, payload=payload, timeout=timeout )
python
{ "resource": "" }
q261922
api._make_file_dict
validation
def _make_file_dict(self, f): """Make a dictionary with filename and base64 file data""" if isinstance(f, dict): file_obj = f['file'] if 'filename' in f: file_name = f['filename'] else: file_name = file_obj.name else: file_obj = f file_name = f.name b64_data = base64.b64encode(file_obj.read()) return { 'id': file_name, 'data': b64_data.decode() if six.PY3 else b64_data, }
python
{ "resource": "" }
q261923
api.send
validation
def send( self, email_id, recipient, email_data=None, sender=None, cc=None, bcc=None, tags=[], headers={}, esp_account=None, locale=None, email_version_name=None, inline=None, files=[], timeout=None ): """ API call to send an email """ if not email_data: email_data = {} # for backwards compatibility, will be removed if isinstance(recipient, string_types): warnings.warn( "Passing email directly for recipient is deprecated", DeprecationWarning) recipient = {'address': recipient} payload = { 'email_id': email_id, 'recipient': recipient, 'email_data': email_data } if sender: payload['sender'] = sender if cc: if not type(cc) == list: logger.error( 'kwarg cc must be type(list), got %s' % type(cc)) payload['cc'] = cc if bcc: if not type(bcc) == list: logger.error( 'kwarg bcc must be type(list), got %s' % type(bcc)) payload['bcc'] = bcc if tags: if not type(tags) == list: logger.error( 'kwarg tags must be type(list), got %s' % (type(tags))) payload['tags'] = tags if headers: if not type(headers) == dict: logger.error( 'kwarg headers must be type(dict), got %s' % ( type(headers) ) ) payload['headers'] = headers if esp_account: if not isinstance(esp_account, string_types): logger.error( 'kwarg esp_account must be a string, got %s' % ( type(esp_account) ) ) payload['esp_account'] = esp_account if locale: if not isinstance(locale, string_types): logger.error( 'kwarg locale must be a string, got %s' % (type(locale)) ) payload['locale'] = locale if email_version_name: if not isinstance(email_version_name, string_types): logger.error( 'kwarg email_version_name must be a string, got %s' % ( type(email_version_name))) payload['version_name'] = email_version_name if inline: payload['inline'] = self._make_file_dict(inline) if files: payload['files'] = [self._make_file_dict(f) for f in files] return self._api_request( self.SEND_ENDPOINT, self.HTTP_POST, payload=payload, timeout=timeout )
python
{ "resource": "" }
q261924
BatchAPI.execute
validation
def execute(self, timeout=None): """Execute all currently queued batch commands""" logger.debug(' > Batch API request (length %s)' % len(self._commands)) auth = self._build_http_auth() headers = self._build_request_headers() logger.debug('\tbatch headers: %s' % headers) logger.debug('\tbatch command length: %s' % len(self._commands)) path = self._build_request_path(self.BATCH_ENDPOINT) data = json.dumps(self._commands, cls=self._json_encoder) r = requests.post( path, auth=auth, headers=headers, data=data, timeout=(self.DEFAULT_TIMEOUT if timeout is None else timeout) ) self._commands = [] logger.debug('\tresponse code:%s' % r.status_code) try: logger.debug('\tresponse: %s' % r.json()) except: logger.debug('\tresponse: %s' % r.content) return r
python
{ "resource": "" }
q261925
TabView.get_group_tabs
validation
def get_group_tabs(self): """ Return instances of all other tabs that are members of the tab's tab group. """ if self.tab_group is None: raise ImproperlyConfigured( "%s requires a definition of 'tab_group'" % self.__class__.__name__) group_members = [t for t in self._registry if t.tab_group == self.tab_group] return [t() for t in group_members]
python
{ "resource": "" }
q261926
TabView._process_tabs
validation
def _process_tabs(self, tabs, current_tab, group_current_tab): """ Process and prepare tabs. This includes steps like updating references to the current tab, filtering out hidden tabs, sorting tabs etc... Args: tabs: The list of tabs to process. current_tab: The reference to the currently loaded tab. group_current_tab: The reference to the active tab in the current tab group. For parent tabs, this is different than for the current tab group. Returns: Processed list of tabs. Note that the method may have side effects. """ # Update references to the current tab for t in tabs: t.current_tab = current_tab t.group_current_tab = group_current_tab # Filter out hidden tabs tabs = list(filter(lambda t: t.tab_visible, tabs)) # Sort remaining tabs in-place tabs.sort(key=lambda t: t.weight) return tabs
python
{ "resource": "" }
q261927
TabView.get_context_data
validation
def get_context_data(self, **kwargs): """ Adds tab information to context. To retrieve a list of all group tab instances, use ``{{ tabs }}`` in your template. The id of the current tab is added as ``current_tab_id`` to the template context. If the current tab has a parent tab the parent's id is added to the template context as ``parent_tab_id``. Instances of all tabs of the parent level are added as ``parent_tabs`` to the context. If the current tab has children they are added to the template context as ``child_tabs``. """ context = super(TabView, self).get_context_data(**kwargs) # Update the context with kwargs, TemplateView doesn't do this. context.update(kwargs) # Add tabs and "current" references to context process_tabs_kwargs = { 'tabs': self.get_group_tabs(), 'current_tab': self, 'group_current_tab': self, } context['tabs'] = self._process_tabs(**process_tabs_kwargs) context['current_tab_id'] = self.tab_id # Handle parent tabs if self.tab_parent is not None: # Verify that tab parent is valid if self.tab_parent not in self._registry: msg = '%s has no attribute _is_tab' % self.tab_parent.__class__.__name__ raise ImproperlyConfigured(msg) # Get parent tab instance parent = self.tab_parent() # Add parent tabs to context process_parents_kwargs = { 'tabs': parent.get_group_tabs(), 'current_tab': self, 'group_current_tab': parent, } context['parent_tabs'] = self._process_tabs(**process_parents_kwargs) context['parent_tab_id'] = parent.tab_id # Handle child tabs if self.tab_id in self._children: process_children_kwargs = { 'tabs': [t() for t in self._children[self.tab_id]], 'current_tab': self, 'group_current_tab': None, } context['child_tabs'] = self._process_tabs(**process_children_kwargs) return context
python
{ "resource": "" }
q261928
normalize_name
validation
def normalize_name(s): """Convert a string into a valid python attribute name. This function is called to convert ASCII strings to something that can pass as python attribute name, to be used with namedtuples. >>> str(normalize_name('class')) 'class_' >>> str(normalize_name('a-name')) 'a_name' >>> str(normalize_name('a n\u00e4me')) 'a_name' >>> str(normalize_name('Name')) 'Name' >>> str(normalize_name('')) '_' >>> str(normalize_name('1')) '_1' """ s = s.replace('-', '_').replace('.', '_').replace(' ', '_') if s in keyword.kwlist: return s + '_' s = '_'.join(slug(ss, lowercase=False) for ss in s.split('_')) if not s: s = '_' if s[0] not in string.ascii_letters + '_': s = '_' + s return s
python
{ "resource": "" }
q261929
schema
validation
def schema(tg): """ Convert the table and column descriptions of a `TableGroup` into specifications for the DB schema. :param ds: :return: A pair (tables, reference_tables). """ tables = {} for tname, table in tg.tabledict.items(): t = TableSpec.from_table_metadata(table) tables[t.name] = t for at in t.many_to_many.values(): tables[at.name] = at # We must determine the order in which tables must be created! ordered = OrderedDict() i = 0 # We loop through the tables repeatedly, and whenever we find one, which has all # referenced tables already in ordered, we move it from tables to ordered. while tables and i < 100: i += 1 for table in list(tables.keys()): if all((ref[1] in ordered) or ref[1] == table for ref in tables[table].foreign_keys): # All referenced tables are already created (or self-referential). ordered[table] = tables.pop(table) break if tables: # pragma: no cover raise ValueError('there seem to be cyclic dependencies between the tables') return list(ordered.values())
python
{ "resource": "" }
q261930
Database.write
validation
def write(self, _force=False, _exists_ok=False, **items): """ Creates a db file with the core schema. :param force: If `True` an existing db file will be overwritten. """ if self.fname and self.fname.exists(): raise ValueError('db file already exists, use force=True to overwrite') with self.connection() as db: for table in self.tables: db.execute(table.sql(translate=self.translate)) db.execute('PRAGMA foreign_keys = ON;') db.commit() refs = defaultdict(list) # collects rows in association tables. for t in self.tables: if t.name not in items: continue rows, keys = [], [] cols = {c.name: c for c in t.columns} for i, row in enumerate(items[t.name]): pk = row[t.primary_key[0]] \ if t.primary_key and len(t.primary_key) == 1 else None values = [] for k, v in row.items(): if k in t.many_to_many: assert pk at = t.many_to_many[k] atkey = tuple([at.name] + [c.name for c in at.columns]) for vv in v: fkey, context = self.association_table_context(t, k, vv) refs[atkey].append((pk, fkey, context)) else: col = cols[k] if isinstance(v, list): # Note: This assumes list-valued columns are of datatype string! v = (col.separator or ';').join( col.convert(vv) for vv in v) else: v = col.convert(v) if v is not None else None if i == 0: keys.append(col.name) values.append(v) rows.append(tuple(values)) insert(db, self.translate, t.name, keys, *rows) for atkey, rows in refs.items(): insert(db, self.translate, atkey[0], atkey[1:], *rows) db.commit()
python
{ "resource": "" }
q261931
iterrows
validation
def iterrows(lines_or_file, namedtuples=False, dicts=False, encoding='utf-8', **kw): """Convenience factory function for csv reader. :param lines_or_file: Content to be read. Either a file handle, a file path or a list\ of strings. :param namedtuples: Yield namedtuples. :param dicts: Yield dicts. :param encoding: Encoding of the content. :param kw: Keyword parameters are passed through to csv.reader. :return: A generator over the rows. """ if namedtuples and dicts: raise ValueError('either namedtuples or dicts can be chosen as output format') elif namedtuples: _reader = NamedTupleReader elif dicts: _reader = UnicodeDictReader else: _reader = UnicodeReader with _reader(lines_or_file, encoding=encoding, **fix_kw(kw)) as r: for item in r: yield item
python
{ "resource": "" }
q261932
rewrite
validation
def rewrite(fname, visitor, **kw): """Utility function to rewrite rows in tsv files. :param fname: Path of the dsv file to operate on. :param visitor: A callable that takes a line-number and a row as input and returns a \ (modified) row or None to filter out the row. :param kw: Keyword parameters are passed through to csv.reader/csv.writer. """ if not isinstance(fname, pathlib.Path): assert isinstance(fname, string_types) fname = pathlib.Path(fname) assert fname.is_file() with tempfile.NamedTemporaryFile(delete=False) as fp: tmp = pathlib.Path(fp.name) with UnicodeReader(fname, **kw) as reader_: with UnicodeWriter(tmp, **kw) as writer: for i, row in enumerate(reader_): row = visitor(i, row) if row is not None: writer.writerow(row) shutil.move(str(tmp), str(fname))
python
{ "resource": "" }
q261933
filter_rows_as_dict
validation
def filter_rows_as_dict(fname, filter_, **kw): """Rewrite a dsv file, filtering the rows. :param fname: Path to dsv file :param filter_: callable which accepts a `dict` with a row's data as single argument\ returning a `Boolean` indicating whether to keep the row (`True`) or to discard it \ `False`. :param kw: Keyword arguments to be passed `UnicodeReader` and `UnicodeWriter`. :return: The number of rows that have been removed. """ filter_ = DictFilter(filter_) rewrite(fname, filter_, **kw) return filter_.removed
python
{ "resource": "" }
q261934
dump_grid
validation
def dump_grid(grid): """ Dump a single grid to its ZINC representation. """ header = 'ver:%s' % dump_str(str(grid._version), version=grid._version) if bool(grid.metadata): header += ' ' + dump_meta(grid.metadata, version=grid._version) columns = dump_columns(grid.column, version=grid._version) rows = dump_rows(grid) return '\n'.join([header, columns] + rows + [''])
python
{ "resource": "" }
q261935
parse
validation
def parse(grid_str, mode=MODE_ZINC, charset='utf-8'): ''' Parse the given Zinc text and return the equivalent data. ''' # Decode incoming text (or python3 will whine!) if isinstance(grid_str, six.binary_type): grid_str = grid_str.decode(encoding=charset) # Split the separate grids up, the grammar definition has trouble splitting # them up normally. This will truncate the newline off the end of the last # row. _parse = functools.partial(parse_grid, mode=mode, charset=charset) if mode == MODE_JSON: if isinstance(grid_str, six.string_types): grid_data = json.loads(grid_str) else: grid_data = grid_str if isinstance(grid_data, dict): return _parse(grid_data) else: return list(map(_parse, grid_data)) else: return list(map(_parse, GRID_SEP.split(grid_str.rstrip())))
python
{ "resource": "" }
q261936
MetadataObject.append
validation
def append(self, key, value=MARKER, replace=True): ''' Append the item to the metadata. ''' return self.add_item(key, value, replace=replace)
python
{ "resource": "" }
q261937
MetadataObject.extend
validation
def extend(self, items, replace=True): ''' Append the items to the metadata. ''' if isinstance(items, dict) or isinstance(items, SortableDict): items = list(items.items()) for (key, value) in items: self.append(key, value, replace=replace)
python
{ "resource": "" }
q261938
Shape.regular_polygon
validation
def regular_polygon(cls, center, radius, n_vertices, start_angle=0, **kwargs): """Construct a regular polygon. Parameters ---------- center : array-like radius : float n_vertices : int start_angle : float, optional Where to put the first point, relative to `center`, in radians counter-clockwise starting from the horizontal axis. kwargs Other keyword arguments are passed to the |Shape| constructor. """ angles = (np.arange(n_vertices) * 2 * np.pi / n_vertices) + start_angle return cls(center + radius * np.array([np.cos(angles), np.sin(angles)]).T, **kwargs)
python
{ "resource": "" }
q261939
Shape.circle
validation
def circle(cls, center, radius, n_vertices=50, **kwargs): """Construct a circle. Parameters ---------- center : array-like radius : float n_vertices : int, optional Number of points to draw. Decrease for performance, increase for appearance. kwargs Other keyword arguments are passed to the |Shape| constructor. """ return cls.regular_polygon(center, radius, n_vertices, **kwargs)
python
{ "resource": "" }
q261940
Shape.rectangle
validation
def rectangle(cls, vertices, **kwargs): """Shortcut for creating a rectangle aligned with the screen axes from only two corners. Parameters ---------- vertices : array-like An array containing the ``[x, y]`` positions of two corners. kwargs Other keyword arguments are passed to the |Shape| constructor. """ bottom_left, top_right = vertices top_left = [bottom_left[0], top_right[1]] bottom_right = [top_right[0], bottom_left[1]] return cls([bottom_left, bottom_right, top_right, top_left], **kwargs)
python
{ "resource": "" }
q261941
Shape.from_dict
validation
def from_dict(cls, spec): """Create a |Shape| from a dictionary specification. Parameters ---------- spec : dict A dictionary with either the fields ``'center'`` and ``'radius'`` (for a circle), ``'center'``, ``'radius'``, and ``'n_vertices'`` (for a regular polygon), or ``'vertices'``. If only two vertices are given, they are assumed to be lower left and top right corners of a rectangle. Other fields are interpreted as keyword arguments. """ spec = spec.copy() center = spec.pop('center', None) radius = spec.pop('radius', None) if center and radius: return cls.circle(center, radius, **spec) vertices = spec.pop('vertices') if len(vertices) == 2: return cls.rectangle(vertices, **spec) return cls(vertices, **spec)
python
{ "resource": "" }
q261942
Shape._kwargs
validation
def _kwargs(self): """Keyword arguments for recreating the Shape from the vertices. """ return dict(color=self.color, velocity=self.velocity, colors=self.colors)
python
{ "resource": "" }
q261943
Shape.rotate
validation
def rotate(self, angle, center=None): """Rotate the shape, in-place. Parameters ---------- angle : float Angle to rotate, in radians counter-clockwise. center : array-like, optional Point about which to rotate. If not passed, the center of the shape will be used. """ args = [angle] if center is not None: args.extend(center) self.poly.rotate(*args) return self
python
{ "resource": "" }
q261944
Shape.flip_x
validation
def flip_x(self, center=None): """Flip the shape in the x direction, in-place. Parameters ---------- center : array-like, optional Point about which to flip. If not passed, the center of the shape will be used. """ if center is None: self.poly.flip() else: self.poly.flip(center[0])
python
{ "resource": "" }
q261945
Shape.flip_y
validation
def flip_y(self, center=None): """Flip the shape in the y direction, in-place. Parameters ---------- center : array-like, optional Point about which to flip. If not passed, the center of the shape will be used. """ if center is None: self.poly.flop() else: self.poly.flop(center[1]) return self
python
{ "resource": "" }
q261946
Shape.flip
validation
def flip(self, angle, center=None): """ Flip the shape in an arbitrary direction. Parameters ---------- angle : array-like The angle, in radians counter-clockwise from the horizontal axis, defining the angle about which to flip the shape (of a line through `center`). center : array-like, optional The point about which to flip. If not passed, the center of the shape will be used. """ return self.rotate(-angle, center=center).flip_y(center=center).rotate(angle, center=center)
python
{ "resource": "" }
q261947
Shape.draw
validation
def draw(self): """Draw the shape in the current OpenGL context. """ if self.enabled: self._vertex_list.colors = self._gl_colors self._vertex_list.vertices = self._gl_vertices self._vertex_list.draw(pyglet.gl.GL_TRIANGLES)
python
{ "resource": "" }
q261948
Shape.update
validation
def update(self, dt): """Update the shape's position by moving it forward according to its velocity. Parameters ---------- dt : float """ self.translate(dt * self.velocity) self.rotate(dt * self.angular_velocity)
python
{ "resource": "" }
q261949
_map_timezones
validation
def _map_timezones(): """ Map the official Haystack timezone list to those recognised by pytz. """ tz_map = {} todo = HAYSTACK_TIMEZONES_SET.copy() for full_tz in pytz.all_timezones: # Finished case: if not bool(todo): # pragma: no cover # This is nearly impossible for us to cover, and an unlikely case. break # Case 1: exact match if full_tz in todo: tz_map[full_tz] = full_tz # Exact match todo.discard(full_tz) continue # Case 2: suffix match after '/' if '/' not in full_tz: continue (prefix, suffix) = full_tz.split('/',1) # Case 2 exception: full timezone contains more than one '/' -> ignore if '/' in suffix: continue if suffix in todo: tz_map[suffix] = full_tz todo.discard(suffix) continue return tz_map
python
{ "resource": "" }
q261950
timezone
validation
def timezone(haystack_tz, version=LATEST_VER): """ Retrieve the Haystack timezone """ tz_map = get_tz_map(version=version) try: tz_name = tz_map[haystack_tz] except KeyError: raise ValueError('%s is not a recognised timezone on this host' \ % haystack_tz) return pytz.timezone(tz_name)
python
{ "resource": "" }
q261951
_unescape
validation
def _unescape(s, uri=False): """ Iterative parser for string escapes. """ out = '' while len(s) > 0: c = s[0] if c == '\\': # Backslash escape esc_c = s[1] if esc_c in ('u', 'U'): # Unicode escape out += six.unichr(int(s[2:6], base=16)) s = s[6:] continue else: if esc_c == 'b': out += '\b' elif esc_c == 'f': out += '\f' elif esc_c == 'n': out += '\n' elif esc_c == 'r': out += '\r' elif esc_c == 't': out += '\t' else: if uri and (esc_c == '#'): # \# is passed through with backslash. out += '\\' # Pass through out += esc_c s = s[2:] continue else: out += c s = s[1:] return out
python
{ "resource": "" }
q261952
parse_grid
validation
def parse_grid(grid_data): """ Parse the incoming grid. """ try: # Split the grid up. grid_parts = NEWLINE_RE.split(grid_data) if len(grid_parts) < 2: raise ZincParseException('Malformed grid received', grid_data, 1, 1) # Grid and column metadata are the first two lines. grid_meta_str = grid_parts.pop(0) col_meta_str = grid_parts.pop(0) # First element is the grid metadata ver_match = VERSION_RE.match(grid_meta_str) if ver_match is None: raise ZincParseException( 'Could not determine version from %r' % grid_meta_str, grid_data, 1, 1) version = Version(ver_match.group(1)) # Now parse the rest of the grid accordingly try: grid_meta = hs_gridMeta[version].parseString(grid_meta_str, parseAll=True)[0] except pp.ParseException as pe: # Raise a new exception with the appropriate line number. raise ZincParseException( 'Failed to parse grid metadata: %s' % pe, grid_data, 1, pe.col) except: # pragma: no cover # Report an error to the log if we fail to parse something. LOG.debug('Failed to parse grid meta: %r', grid_meta_str) raise try: col_meta = hs_cols[version].parseString(col_meta_str, parseAll=True)[0] except pp.ParseException as pe: # Raise a new exception with the appropriate line number. raise ZincParseException( 'Failed to parse column metadata: %s' \ % reformat_exception(pe, 2), grid_data, 2, pe.col) except: # pragma: no cover # Report an error to the log if we fail to parse something. LOG.debug('Failed to parse column meta: %r', col_meta_str) raise row_grammar = hs_row[version] def _parse_row(row_num_and_data): (row_num, row) = row_num_and_data line_num = row_num + 3 try: return dict(zip(col_meta.keys(), row_grammar.parseString(row, parseAll=True)[0].asList())) except pp.ParseException as pe: # Raise a new exception with the appropriate line number. raise ZincParseException( 'Failed to parse row: %s' \ % reformat_exception(pe, line_num), grid_data, line_num, pe.col) except: # pragma: no cover # Report an error to the log if we fail to parse something. LOG.debug('Failed to parse row: %r', row) raise g = Grid(version=grid_meta.pop('ver'), metadata=grid_meta, columns=list(col_meta.items())) g.extend(map(_parse_row, filter(lambda gp : bool(gp[1]), enumerate(grid_parts)))) return g except: LOG.debug('Failing grid: %r', grid_data) raise
python
{ "resource": "" }
q261953
parse_scalar
validation
def parse_scalar(scalar_data, version): """ Parse a Project Haystack scalar in ZINC format. """ try: return hs_scalar[version].parseString(scalar_data, parseAll=True)[0] except pp.ParseException as pe: # Raise a new exception with the appropriate line number. raise ZincParseException( 'Failed to parse scalar: %s' % reformat_exception(pe), scalar_data, 1, pe.col) except: LOG.debug('Failing scalar data: %r (version %r)', scalar_data, version)
python
{ "resource": "" }
q261954
SortableDict.add_item
validation
def add_item(self, key, value, after=False, index=None, pos_key=None, replace=True): """ Add an item at a specific location, possibly replacing the existing item. If after is True, we insert *after* the given index, otherwise we insert before. The position is specified using either index or pos_key, the former specifies the position from the start of the array (base 0). pos_key specifies the name of another key, and positions the new key relative to that key. When replacing, the position will be left un-changed unless a location is specified explicitly. """ if self._validate_fn: self._validate_fn(value) if (index is not None) and (pos_key is not None): raise ValueError('Either specify index or pos_key, not both.') elif pos_key is not None: try: index = self.index(pos_key) except ValueError: raise KeyError('%r not found' % pos_key) if after and (index is not None): # insert inserts *before* index, so increment by one. index += 1 if key in self._values: if not replace: raise KeyError('%r is duplicate' % key) if index is not None: # We are re-locating. del self[key] else: # We are updating self._values[key] = value return if index is not None: # Place at given position self._order.insert(index, key) else: # Place at end self._order.append(key) self._values[key] = value
python
{ "resource": "" }
q261955
dump
validation
def dump(grids, mode=MODE_ZINC): """ Dump the given grids in the specified over-the-wire format. """ if isinstance(grids, Grid): return dump_grid(grids, mode=mode) _dump = functools.partial(dump_grid, mode=mode) if mode == MODE_ZINC: return '\n'.join(map(_dump, grids)) elif mode == MODE_JSON: return '[%s]' % ','.join(map(_dump, grids)) else: # pragma: no cover raise NotImplementedError('Format not implemented: %s' % mode)
python
{ "resource": "" }
q261956
Grid._detect_or_validate
validation
def _detect_or_validate(self, val): ''' Detect the version used from the row content, or validate against the version if given. ''' if isinstance(val, list) \ or isinstance(val, dict) \ or isinstance(val, SortableDict) \ or isinstance(val, Grid): # Project Haystack 3.0 type. self._assert_version(VER_3_0)
python
{ "resource": "" }
q261957
Grid._assert_version
validation
def _assert_version(self, version): ''' Assert that the grid version is equal to or above the given value. If no version is set, set the version. ''' if self.nearest_version < version: if self._version_given: raise ValueError( 'Data type requires version %s' \ % version) else: self._version = version
python
{ "resource": "" }
q261958
Version.nearest
validation
def nearest(self, ver): """ Retrieve the official version nearest the one given. """ if not isinstance(ver, Version): ver = Version(ver) if ver in OFFICIAL_VERSIONS: return ver # We might not have an exact match for that. # See if we have one that's newer than the grid we're looking at. versions = list(OFFICIAL_VERSIONS) versions.sort(reverse=True) best = None for candidate in versions: # Due to ambiguities, we might have an exact match and not know it. # '2.0' will not hash to the same value as '2.0.0', but both are # equivalent. if candidate == ver: # We can't beat this, make a note of the match for later return candidate # If we have not seen a better candidate, and this is older # then we may have to settle for that. if (best is None) and (candidate < ver): warnings.warn('This version of hszinc does not yet '\ 'support version %s, please seek a newer version '\ 'or file a bug. Closest (older) version supported is %s.'\ % (ver, candidate)) return candidate # Probably the best so far, but see if we can go closer if candidate > ver: best = candidate # Unhappy path, no best option? This should not happen. assert best is not None warnings.warn('This version of hszinc does not yet '\ 'support version %s, please seek a newer version '\ 'or file a bug. Closest (newer) version supported is %s.'\ % (ver, best)) return best
python
{ "resource": "" }
q261959
encrypt_files
validation
def encrypt_files(selected_host, only_link, file_name): """ Encrypts file with gpg and random generated password """ if ENCRYPTION_DISABLED: print('For encryption please install gpg') exit() passphrase = '%030x' % random.randrange(16**30) source_filename = file_name cmd = 'gpg --batch --symmetric --cipher-algo AES256 --passphrase-fd 0 ' \ '--output - {}'.format(source_filename) encrypted_output = Popen(shlex.split(cmd), stdout=PIPE, stdin=PIPE, stderr=PIPE) encrypted_data = encrypted_output.communicate(passphrase.encode())[0] return upload_files(encrypted_data, selected_host, only_link, file_name)+'#'+passphrase
python
{ "resource": "" }
q261960
check_max_filesize
validation
def check_max_filesize(chosen_file, max_size): """ Checks file sizes for host """ if os.path.getsize(chosen_file) > max_size: return False else: return True
python
{ "resource": "" }
q261961
parse_arguments
validation
def parse_arguments(args, clone_list): """ Makes parsing arguments a function. """ returned_string="" host_number = args.host if args.show_list: print(generate_host_string(clone_list, "Available hosts: ")) exit() if args.decrypt: for i in args.files: print(decrypt_files(i)) exit() if args.files: for i in args.files: if args.limit_size: if args.host == host_number and host_number is not None: if not check_max_filesize(i, clone_list[host_number][3]): host_number = None for n, host in enumerate(clone_list): if not check_max_filesize(i, host[3]): clone_list[n] = None if not clone_list: print('None of the clones is able to support so big file.') if args.no_cloudflare: if args.host == host_number and host_number is not None and not clone_list[host_number][4]: print("This host uses Cloudflare, please choose different host.") exit(1) else: for n, host in enumerate(clone_list): if not host[4]: clone_list[n] = None clone_list = list(filter(None, clone_list)) if host_number is None or args.host != host_number: host_number = random.randrange(0, len(clone_list)) while True: try: if args.encrypt: returned_string = encrypt_files(clone_list[host_number], args.only_link, i) else: returned_string = upload_files(open(i, 'rb'), \ clone_list[host_number], args.only_link, i) if args.only_link: print(returned_string[0]) else: print(returned_string) except IndexError: #print('Selected server (' + clone_list[host_number][0] + ') is offline.') #print('Trying other host.') host_number = random.randrange(0, len(clone_list)) continue except IsADirectoryError: print('limf does not support directory upload, if you want to upload ' \ 'every file in directory use limf {}/*.'.format(i.replace('/', ''))) if args.log: with open(os.path.expanduser(args.logfile), "a+") as logfile: if args.only_link: logfile.write(returned_string[1]) else: logfile.write(returned_string) logfile.write("\n") break else: print("limf: try 'limf -h' for more information")
python
{ "resource": "" }
q261962
upload_files
validation
def upload_files(selected_file, selected_host, only_link, file_name): """ Uploads selected file to the host, thanks to the fact that every pomf.se based site has pretty much the same architecture. """ try: answer = requests.post( url=selected_host[0]+"upload.php", files={'files[]':selected_file}) file_name_1 = re.findall(r'"url": *"((h.+\/){0,1}(.+?))"[,\}]', \ answer.text.replace("\\", ""))[0][2] if only_link: return [selected_host[1]+file_name_1, "{}: {}{}".format(file_name, selected_host[1], file_name_1)] else: return "{}: {}{}".format(file_name, selected_host[1], file_name_1) except requests.exceptions.ConnectionError: print(file_name + ' couldn\'t be uploaded to ' + selected_host[0])
python
{ "resource": "" }
q261963
decrypt_files
validation
def decrypt_files(file_link): """ Decrypts file from entered links """ if ENCRYPTION_DISABLED: print('For decryption please install gpg') exit() try: parsed_link = re.findall(r'(.*/(.*))#(.{30})', file_link)[0] req = urllib.request.Request( parsed_link[0], data=None, headers={ 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) ' \ ' AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36' } ) #downloads the file using fake useragent file_response = urllib.request.urlopen(req) file_to_decrypt = file_response.read() #decrypts the data using piping to ggp decrypt_r, decrypt_w = os.pipe() cmd = 'gpg --batch --decrypt --passphrase-fd {}'.format(decrypt_r) decrypt_output = Popen(shlex.split(cmd), stdout=PIPE, stdin=PIPE, stderr=PIPE, \ pass_fds=(decrypt_r,)) os.close(decrypt_r) open(decrypt_w, 'w').write(parsed_link[2]) decrypted_data, stderr = decrypt_output.communicate(file_to_decrypt) with open(parsed_link[1], 'wb') as decrypted_file: decrypted_file.write(decrypted_data) return parsed_link[1] + ' is decrypted and saved.' except IndexError: return 'Please enter valid link.'
python
{ "resource": "" }
q261964
DefinitionHandler.from_schema
validation
def from_schema(self, schema_node, base_name=None): """ Creates a Swagger definition from a colander schema. :param schema_node: Colander schema to be transformed into a Swagger definition. :param base_name: Schema alternative title. :rtype: dict :returns: Swagger schema. """ return self._ref_recursive(self.type_converter(schema_node), self.ref, base_name)
python
{ "resource": "" }
q261965
ParameterHandler.from_schema
validation
def from_schema(self, schema_node): """ Creates a list of Swagger params from a colander request schema. :param schema_node: Request schema to be transformed into Swagger. :param validators: Validators used in colander with the schema. :rtype: list :returns: List of Swagger parameters. """ params = [] for param_schema in schema_node.children: location = param_schema.name if location is 'body': name = param_schema.__class__.__name__ if name == 'body': name = schema_node.__class__.__name__ + 'Body' param = self.parameter_converter(location, param_schema) param['name'] = name if self.ref: param = self._ref(param) params.append(param) elif location in (('path', 'header', 'headers', 'querystring', 'GET')): for node_schema in param_schema.children: param = self.parameter_converter(location, node_schema) if self.ref: param = self._ref(param) params.append(param) return params
python
{ "resource": "" }
q261966
ParameterHandler.from_path
validation
def from_path(self, path): """ Create a list of Swagger path params from a cornice service path. :type path: string :rtype: list """ path_components = path.split('/') param_names = [comp[1:-1] for comp in path_components if comp.startswith('{') and comp.endswith('}')] params = [] for name in param_names: param_schema = colander.SchemaNode(colander.String(), name=name) param = self.parameter_converter('path', param_schema) if self.ref: param = self._ref(param) params.append(param) return params
python
{ "resource": "" }
q261967
ParameterHandler._ref
validation
def _ref(self, param, base_name=None): """ Store a parameter schema and return a reference to it. :param schema: Swagger parameter definition. :param base_name: Name that should be used for the reference. :rtype: dict :returns: JSON pointer to the original parameter definition. """ name = base_name or param.get('title', '') or param.get('name', '') pointer = self.json_pointer + name self.parameter_registry[name] = param return {'$ref': pointer}
python
{ "resource": "" }
q261968
ResponseHandler.from_schema_mapping
validation
def from_schema_mapping(self, schema_mapping): """ Creates a Swagger response object from a dict of response schemas. :param schema_mapping: Dict with entries matching ``{status_code: response_schema}``. :rtype: dict :returns: Response schema. """ responses = {} for status, response_schema in schema_mapping.items(): response = {} if response_schema.description: response['description'] = response_schema.description else: raise CorniceSwaggerException('Responses must have a description.') for field_schema in response_schema.children: location = field_schema.name if location == 'body': title = field_schema.__class__.__name__ if title == 'body': title = response_schema.__class__.__name__ + 'Body' field_schema.title = title response['schema'] = self.definitions.from_schema(field_schema) elif location in ('header', 'headers'): header_schema = self.type_converter(field_schema) headers = header_schema.get('properties') if headers: # Response headers doesn't accept titles for header in headers.values(): header.pop('title') response['headers'] = headers pointer = response_schema.__class__.__name__ if self.ref: response = self._ref(response, pointer) responses[status] = response return responses
python
{ "resource": "" }
q261969
ResponseHandler._ref
validation
def _ref(self, resp, base_name=None): """ Store a response schema and return a reference to it. :param schema: Swagger response definition. :param base_name: Name that should be used for the reference. :rtype: dict :returns: JSON pointer to the original response definition. """ name = base_name or resp.get('title', '') or resp.get('name', '') pointer = self.json_pointer + name self.response_registry[name] = resp return {'$ref': pointer}
python
{ "resource": "" }
q261970
CorniceSwagger.generate
validation
def generate(self, title=None, version=None, base_path=None, info=None, swagger=None, **kwargs): """Generate a Swagger 2.0 documentation. Keyword arguments may be used to provide additional information to build methods as such ignores. :param title: The name presented on the swagger document. :param version: The version of the API presented on the swagger document. :param base_path: The path that all requests to the API must refer to. :param info: Swagger info field. :param swagger: Extra fields that should be provided on the swagger documentation. :rtype: dict :returns: Full OpenAPI/Swagger compliant specification for the application. """ title = title or self.api_title version = version or self.api_version info = info or self.swagger.get('info', {}) swagger = swagger or self.swagger base_path = base_path or self.base_path swagger = swagger.copy() info.update(title=title, version=version) swagger.update(swagger='2.0', info=info, basePath=base_path) paths, tags = self._build_paths() # Update the provided tags with the extracted ones preserving order if tags: swagger.setdefault('tags', []) tag_names = {t['name'] for t in swagger['tags']} for tag in tags: if tag['name'] not in tag_names: swagger['tags'].append(tag) # Create/Update swagger sections with extracted values where not provided if paths: swagger.setdefault('paths', {}) merge_dicts(swagger['paths'], paths) definitions = self.definitions.definition_registry if definitions: swagger.setdefault('definitions', {}) merge_dicts(swagger['definitions'], definitions) parameters = self.parameters.parameter_registry if parameters: swagger.setdefault('parameters', {}) merge_dicts(swagger['parameters'], parameters) responses = self.responses.response_registry if responses: swagger.setdefault('responses', {}) merge_dicts(swagger['responses'], responses) return swagger
python
{ "resource": "" }
q261971
CorniceSwagger._build_paths
validation
def _build_paths(self): """ Build the Swagger "paths" and "tags" attributes from cornice service definitions. """ paths = {} tags = [] for service in self.services: path, path_obj = self._extract_path_from_service(service) service_tags = getattr(service, 'tags', []) self._check_tags(service_tags) tags = self._get_tags(tags, service_tags) for method, view, args in service.definitions: if method.lower() in map(str.lower, self.ignore_methods): continue op = self._extract_operation_from_view(view, args) if any(ctype in op.get('consumes', []) for ctype in self.ignore_ctypes): continue # XXX: Swagger doesn't support different schemas for for a same method # with different ctypes as cornice. If this happens, you may ignore one # content-type from the documentation otherwise we raise an Exception # Related to https://github.com/OAI/OpenAPI-Specification/issues/146 previous_definition = path_obj.get(method.lower()) if previous_definition: raise CorniceSwaggerException(("Swagger doesn't support multiple " "views for a same method. You may " "ignore one.")) # If tag not defined and a default tag is provided if 'tags' not in op and self.default_tags: if callable(self.default_tags): op['tags'] = self.default_tags(service, method) else: op['tags'] = self.default_tags op_tags = op.get('tags', []) self._check_tags(op_tags) # Add service tags if service_tags: new_tags = service_tags + op_tags op['tags'] = list(OrderedDict.fromkeys(new_tags)) # Add method tags to root tags tags = self._get_tags(tags, op_tags) # If operation id is not defined and a default generator is provided if 'operationId' not in op and self.default_op_ids: if not callable(self.default_op_ids): raise CorniceSwaggerException('default_op_id should be a callable.') op['operationId'] = self.default_op_ids(service, method) # If security options not defined and default is provided if 'security' not in op and self.default_security: if callable(self.default_security): op['security'] = self.default_security(service, method) else: op['security'] = self.default_security if not isinstance(op.get('security', []), list): raise CorniceSwaggerException('security should be a list or callable') path_obj[method.lower()] = op paths[path] = path_obj return paths, tags
python
{ "resource": "" }
q261972
CorniceSwagger._extract_path_from_service
validation
def _extract_path_from_service(self, service): """ Extract path object and its parameters from service definitions. :param service: Cornice service to extract information from. :rtype: dict :returns: Path definition. """ path_obj = {} path = service.path route_name = getattr(service, 'pyramid_route', None) # handle services that don't create fresh routes, # we still need the paths so we need to grab pyramid introspector to # extract that information if route_name: # avoid failure if someone forgets to pass registry registry = self.pyramid_registry or get_current_registry() route_intr = registry.introspector.get('routes', route_name) if route_intr: path = route_intr['pattern'] else: msg = 'Route `{}` is not found by ' \ 'pyramid introspector'.format(route_name) raise ValueError(msg) # handle traverse and subpath as regular parameters # docs.pylonsproject.org/projects/pyramid/en/latest/narr/hybrid.html for subpath_marker in ('*subpath', '*traverse'): path = path.replace(subpath_marker, '{subpath}') # Extract path parameters parameters = self.parameters.from_path(path) if parameters: path_obj['parameters'] = parameters return path, path_obj
python
{ "resource": "" }
q261973
CorniceSwagger._extract_operation_from_view
validation
def _extract_operation_from_view(self, view, args): """ Extract swagger operation details from colander view definitions. :param view: View to extract information from. :param args: Arguments from the view decorator. :rtype: dict :returns: Operation definition. """ op = { 'responses': { 'default': { 'description': 'UNDOCUMENTED RESPONSE' } }, } # If 'produces' are not defined in the view, try get from renderers renderer = args.get('renderer', '') if "json" in renderer: # allows for "json" or "simplejson" produces = ['application/json'] elif renderer == 'xml': produces = ['text/xml'] else: produces = None if produces: op.setdefault('produces', produces) # Get explicit accepted content-types consumes = args.get('content_type') if consumes is not None: # convert to a list, if it's not yet one consumes = to_list(consumes) # It is possible to add callables for content_type, so we have to # to filter those out, since we cannot evaluate those here. consumes = [x for x in consumes if not callable(x)] op['consumes'] = consumes # Get parameters from view schema is_colander = self._is_colander_schema(args) if is_colander: schema = self._extract_transform_colander_schema(args) parameters = self.parameters.from_schema(schema) else: # Bail out for now parameters = None if parameters: op['parameters'] = parameters # Get summary from docstring if isinstance(view, six.string_types): if 'klass' in args: ob = args['klass'] view_ = getattr(ob, view.lower()) docstring = trim(view_.__doc__) else: docstring = str(trim(view.__doc__)) if docstring and self.summary_docstrings: op['summary'] = docstring # Get response definitions if 'response_schemas' in args: op['responses'] = self.responses.from_schema_mapping(args['response_schemas']) # Get response tags if 'tags' in args: op['tags'] = args['tags'] # Get response operationId if 'operation_id' in args: op['operationId'] = args['operation_id'] # Get security policies if 'api_security' in args: op['security'] = args['api_security'] return op
python
{ "resource": "" }
q261974
CorniceSwagger._extract_transform_colander_schema
validation
def _extract_transform_colander_schema(self, args): """ Extract schema from view args and transform it using the pipeline of schema transformers :param args: Arguments from the view decorator. :rtype: colander.MappingSchema() :returns: View schema cloned and transformed """ schema = args.get('schema', colander.MappingSchema()) if not isinstance(schema, colander.Schema): schema = schema() schema = schema.clone() for transformer in self.schema_transformers: schema = transformer(schema, args) return schema
python
{ "resource": "" }
q261975
ParameterConverter.convert
validation
def convert(self, schema_node, definition_handler): """ Convert node schema into a parameter object. """ converted = { 'name': schema_node.name, 'in': self._in, 'required': schema_node.required } if schema_node.description: converted['description'] = schema_node.description if schema_node.default: converted['default'] = schema_node.default schema = definition_handler(schema_node) # Parameters shouldn't have a title schema.pop('title', None) converted.update(schema) if schema.get('type') == 'array': converted['items'] = {'type': schema['items']['type']} return converted
python
{ "resource": "" }
q261976
merge_dicts
validation
def merge_dicts(base, changes): """Merge b into a recursively, without overwriting values. :param base: the dict that will be altered. :param changes: changes to update base. """ for k, v in changes.items(): if isinstance(v, dict): merge_dicts(base.setdefault(k, {}), v) else: base.setdefault(k, v)
python
{ "resource": "" }
q261977
get_transition_viewset_method
validation
def get_transition_viewset_method(transition_name, **kwargs): ''' Create a viewset method for the provided `transition_name` ''' @detail_route(methods=['post'], **kwargs) def inner_func(self, request, pk=None, **kwargs): object = self.get_object() transition_method = getattr(object, transition_name) transition_method(by=self.request.user) if self.save_after_transition: object.save() serializer = self.get_serializer(object) return Response(serializer.data) return inner_func
python
{ "resource": "" }
q261978
get_viewset_transition_action_mixin
validation
def get_viewset_transition_action_mixin(model, **kwargs): ''' Find all transitions defined on `model`, then create a corresponding viewset action method for each and apply it to `Mixin`. Finally, return `Mixin` ''' instance = model() class Mixin(object): save_after_transition = True transitions = instance.get_all_status_transitions() transition_names = set(x.name for x in transitions) for transition_name in transition_names: setattr( Mixin, transition_name, get_transition_viewset_method(transition_name, **kwargs) ) return Mixin
python
{ "resource": "" }
q261979
fresh_cookies
validation
def fresh_cookies(ctx, mold=''): """Refresh the project from the original cookiecutter template.""" mold = mold or "https://github.com/Springerle/py-generic-project.git" # TODO: URL from config tmpdir = os.path.join(tempfile.gettempdir(), "cc-upgrade-pygments-markdown-lexer") if os.path.isdir('.git'): # TODO: Ensure there are no local unstashed changes pass # Make a copy of the new mold version if os.path.isdir(tmpdir): shutil.rmtree(tmpdir) if os.path.exists(mold): shutil.copytree(mold, tmpdir, ignore=shutil.ignore_patterns( ".git", ".svn", "*~", )) else: ctx.run("git clone {} {}".format(mold, tmpdir)) # Copy recorded "cookiecutter.json" into mold shutil.copy2("project.d/cookiecutter.json", tmpdir) with pushd('..'): ctx.run("cookiecutter --no-input {}".format(tmpdir)) if os.path.exists('.git'): ctx.run("git status")
python
{ "resource": "" }
q261980
ci
validation
def ci(ctx): """Perform continuous integration tasks.""" opts = [''] # 'tox' makes no sense in Travis if os.environ.get('TRAVIS', '').lower() == 'true': opts += ['test.pytest'] else: opts += ['test.tox'] ctx.run("invoke --echo --pty clean --all build --docs check --reports{}".format(' '.join(opts)))
python
{ "resource": "" }
q261981
py_hash
validation
def py_hash(key, num_buckets): """Generate a number in the range [0, num_buckets). Args: key (int): The key to hash. num_buckets (int): Number of buckets to use. Returns: The bucket number `key` computes to. Raises: ValueError: If `num_buckets` is not a positive number. """ b, j = -1, 0 if num_buckets < 1: raise ValueError('num_buckets must be a positive number') while j < num_buckets: b = int(j) key = ((key * long(2862933555777941757)) + 1) & 0xffffffffffffffff j = float(b + 1) * (float(1 << 31) / float((key >> 33) + 1)) return int(b)
python
{ "resource": "" }
q261982
setup
validation
def setup(app): """ Initializer for Sphinx extension API. See http://www.sphinx-doc.org/en/stable/extdev/index.html#dev-extensions. """ lexer = MarkdownLexer() for alias in lexer.aliases: app.add_lexer(alias, lexer) return dict(version=__version__)
python
{ "resource": "" }
q261983
MdStat.load
validation
def load(self): """Return a dict of stats.""" ret = {} # Read the mdstat file with open(self.get_path(), 'r') as f: # lines is a list of line (with \n) lines = f.readlines() # First line: get the personalities # The "Personalities" line tells you what RAID level the kernel currently supports. # This can be changed by either changing the raid modules or recompiling the kernel. # Possible personalities include: [raid0] [raid1] [raid4] [raid5] [raid6] [linear] [multipath] [faulty] ret['personalities'] = self.get_personalities(lines[0]) # Second to last before line: Array definition ret['arrays'] = self.get_arrays(lines[1:-1], ret['personalities']) # Save the file content as it for the __str__ method self.content = reduce(lambda x, y: x + y, lines) return ret
python
{ "resource": "" }
q261984
MdStat.get_personalities
validation
def get_personalities(self, line): """Return a list of personalities readed from the input line.""" return [split('\W+', i)[1] for i in line.split(':')[1].split(' ') if i.startswith('[')]
python
{ "resource": "" }
q261985
MdStat.get_arrays
validation
def get_arrays(self, lines, personalities=[]): """Return a dict of arrays.""" ret = {} i = 0 while i < len(lines): try: # First array line: get the md device md_device = self.get_md_device_name(lines[i]) except IndexError: # No array detected pass else: # Array detected if md_device is not None: # md device line ret[md_device] = self.get_md_device(lines[i], personalities) # md config/status line i += 1 ret[md_device].update(self.get_md_status(lines[i])) i += 1 return ret
python
{ "resource": "" }
q261986
MdStat.get_md_device
validation
def get_md_device(self, line, personalities=[]): """Return a dict of md device define in the line.""" ret = {} splitted = split('\W+', line) # Raid status # Active or 'started'. An inactive array is usually faulty. # Stopped arrays aren't visible here. ret['status'] = splitted[1] if splitted[2] in personalities: # Raid type (ex: RAID5) ret['type'] = splitted[2] # Array's components ret['components'] = self.get_components(line, with_type=True) else: # Raid type (ex: RAID5) ret['type'] = None # Array's components ret['components'] = self.get_components(line, with_type=False) return ret
python
{ "resource": "" }
q261987
MdStat.get_md_status
validation
def get_md_status(self, line): """Return a dict of md status define in the line.""" ret = {} splitted = split('\W+', line) if len(splitted) < 7: ret['available'] = None ret['used'] = None ret['config'] = None else: # The final 2 entries on this line: [n/m] [UUUU_] # [n/m] means that ideally the array would have n devices however, currently, m devices are in use. # Obviously when m >= n then things are good. ret['available'] = splitted[-4] ret['used'] = splitted[-3] # [UUUU_] represents the status of each device, either U for up or _ for down. ret['config'] = splitted[-2] return ret
python
{ "resource": "" }
q261988
MdStat.get_components
validation
def get_components(self, line, with_type=True): """Return a dict of components in the line. key: device name (ex: 'sdc1') value: device role number """ ret = {} # Ignore (F) (see test 08) line2 = reduce(lambda x, y: x + y, split('\(.+\)', line)) if with_type: splitted = split('\W+', line2)[3:] else: splitted = split('\W+', line2)[2:] ret = dict(zip(splitted[0::2], splitted[1::2])) return ret
python
{ "resource": "" }
q261989
register_receivers
validation
def register_receivers(app, config): """Register signal receivers which send events.""" for event_name, event_config in config.items(): event_builders = [ obj_or_import_string(func) for func in event_config.get('event_builders', []) ] signal = obj_or_import_string(event_config['signal']) signal.connect( EventEmmiter(event_name, event_builders), sender=app, weak=False )
python
{ "resource": "" }
q261990
InternalMailbox.set_scheduled
validation
def set_scheduled(self): """ Returns True if state was successfully changed from idle to scheduled. """ with self._idle_lock: if self._idle: self._idle = False return True return False
python
{ "resource": "" }
q261991
StatsQueryResource.post
validation
def post(self, **kwargs): """Get statistics.""" data = request.get_json(force=False) if data is None: data = {} result = {} for query_name, config in data.items(): if config is None or not isinstance(config, dict) \ or (set(config.keys()) != {'stat', 'params'} and set(config.keys()) != {'stat'}): raise InvalidRequestInputError( 'Invalid Input. It should be of the form ' '{ STATISTIC_NAME: { "stat": STAT_TYPE, ' '"params": STAT_PARAMS \}}' ) stat = config['stat'] params = config.get('params', {}) try: query_cfg = current_stats.queries[stat] except KeyError: raise UnknownQueryError(stat) permission = current_stats.permission_factory(stat, params) if permission is not None and not permission.can(): message = ('You do not have a permission to query the ' 'statistic "{}" with those ' 'parameters'.format(stat)) if current_user.is_authenticated: abort(403, message) abort(401, message) try: query = query_cfg.query_class(**query_cfg.query_config) result[query_name] = query.run(**params) except ValueError as e: raise InvalidRequestInputError(e.args[0]) except NotFoundError as e: return None return self.make_response(result)
python
{ "resource": "" }
q261992
StatAggregator._get_oldest_event_timestamp
validation
def _get_oldest_event_timestamp(self): """Search for the oldest event timestamp.""" # Retrieve the oldest event in order to start aggregation # from there query_events = Search( using=self.client, index=self.event_index )[0:1].sort( {'timestamp': {'order': 'asc'}} ) result = query_events.execute() # There might not be any events yet if the first event have been # indexed but the indices have not been refreshed yet. if len(result) == 0: return None return parser.parse(result[0]['timestamp'])
python
{ "resource": "" }
q261993
StatAggregator.get_bookmark
validation
def get_bookmark(self): """Get last aggregation date.""" if not Index(self.aggregation_alias, using=self.client).exists(): if not Index(self.event_index, using=self.client).exists(): return datetime.date.today() return self._get_oldest_event_timestamp() # retrieve the oldest bookmark query_bookmark = Search( using=self.client, index=self.aggregation_alias, doc_type=self.bookmark_doc_type )[0:1].sort( {'date': {'order': 'desc'}} ) bookmarks = query_bookmark.execute() # if no bookmark is found but the index exist, the bookmark was somehow # lost or never written, so restart from the beginning if len(bookmarks) == 0: return self._get_oldest_event_timestamp() # change it to doc_id_suffix bookmark = datetime.datetime.strptime(bookmarks[0].date, self.doc_id_suffix) return bookmark
python
{ "resource": "" }
q261994
StatAggregator.set_bookmark
validation
def set_bookmark(self): """Set bookmark for starting next aggregation.""" def _success_date(): bookmark = { 'date': self.new_bookmark or datetime.datetime.utcnow(). strftime(self.doc_id_suffix) } yield dict(_index=self.last_index_written, _type=self.bookmark_doc_type, _source=bookmark) if self.last_index_written: bulk(self.client, _success_date(), stats_only=True)
python
{ "resource": "" }
q261995
StatAggregator._format_range_dt
validation
def _format_range_dt(self, d): """Format range filter datetime to the closest aggregation interval.""" if not isinstance(d, six.string_types): d = d.isoformat() return '{0}||/{1}'.format( d, self.dt_rounding_map[self.aggregation_interval])
python
{ "resource": "" }
q261996
StatAggregator.agg_iter
validation
def agg_iter(self, lower_limit=None, upper_limit=None): """Aggregate and return dictionary to be indexed in ES.""" lower_limit = lower_limit or self.get_bookmark().isoformat() upper_limit = upper_limit or ( datetime.datetime.utcnow().replace(microsecond=0).isoformat()) aggregation_data = {} self.agg_query = Search(using=self.client, index=self.event_index).\ filter('range', timestamp={ 'gte': self._format_range_dt(lower_limit), 'lte': self._format_range_dt(upper_limit)}) # apply query modifiers for modifier in self.query_modifiers: self.agg_query = modifier(self.agg_query) hist = self.agg_query.aggs.bucket( 'histogram', 'date_histogram', field='timestamp', interval=self.aggregation_interval ) terms = hist.bucket( 'terms', 'terms', field=self.aggregation_field, size=0 ) top = terms.metric( 'top_hit', 'top_hits', size=1, sort={'timestamp': 'desc'} ) for dst, (metric, src, opts) in self.metric_aggregation_fields.items(): terms.metric(dst, metric, field=src, **opts) results = self.agg_query.execute() index_name = None for interval in results.aggregations['histogram'].buckets: interval_date = datetime.datetime.strptime( interval['key_as_string'], '%Y-%m-%dT%H:%M:%S') for aggregation in interval['terms'].buckets: aggregation_data['timestamp'] = interval_date.isoformat() aggregation_data[self.aggregation_field] = aggregation['key'] aggregation_data['count'] = aggregation['doc_count'] if self.metric_aggregation_fields: for f in self.metric_aggregation_fields: aggregation_data[f] = aggregation[f]['value'] doc = aggregation.top_hit.hits.hits[0]['_source'] for destination, source in self.copy_fields.items(): if isinstance(source, six.string_types): aggregation_data[destination] = doc[source] else: aggregation_data[destination] = source( doc, aggregation_data ) index_name = 'stats-{0}-{1}'.\ format(self.event, interval_date.strftime( self.index_name_suffix)) self.indices.add(index_name) yield dict(_id='{0}-{1}'. format(aggregation['key'], interval_date.strftime( self.doc_id_suffix)), _index=index_name, _type=self.aggregation_doc_type, _source=aggregation_data) self.last_index_written = index_name
python
{ "resource": "" }
q261997
StatAggregator.run
validation
def run(self, start_date=None, end_date=None, update_bookmark=True): """Calculate statistics aggregations.""" # If no events have been indexed there is nothing to aggregate if not Index(self.event_index, using=self.client).exists(): return lower_limit = start_date or self.get_bookmark() # Stop here if no bookmark could be estimated. if lower_limit is None: return upper_limit = min( end_date or datetime.datetime.max, # ignore if `None` datetime.datetime.utcnow().replace(microsecond=0), datetime.datetime.combine( lower_limit + datetime.timedelta(self.batch_size), datetime.datetime.min.time()) ) while upper_limit <= datetime.datetime.utcnow(): self.indices = set() self.new_bookmark = upper_limit.strftime(self.doc_id_suffix) bulk(self.client, self.agg_iter(lower_limit, upper_limit), stats_only=True, chunk_size=50) # Flush all indices which have been modified current_search_client.indices.flush( index=','.join(self.indices), wait_if_ongoing=True ) if update_bookmark: self.set_bookmark() self.indices = set() lower_limit = lower_limit + datetime.timedelta(self.batch_size) upper_limit = min( end_date or datetime.datetime.max, # ignore if `None`` datetime.datetime.utcnow().replace(microsecond=0), lower_limit + datetime.timedelta(self.batch_size) ) if lower_limit > upper_limit: break
python
{ "resource": "" }
q261998
StatAggregator.list_bookmarks
validation
def list_bookmarks(self, start_date=None, end_date=None, limit=None): """List the aggregation's bookmarks.""" query = Search( using=self.client, index=self.aggregation_alias, doc_type=self.bookmark_doc_type ).sort({'date': {'order': 'desc'}}) range_args = {} if start_date: range_args['gte'] = self._format_range_dt( start_date.replace(microsecond=0)) if end_date: range_args['lte'] = self._format_range_dt( end_date.replace(microsecond=0)) if range_args: query = query.filter('range', date=range_args) return query[0:limit].execute() if limit else query.scan()
python
{ "resource": "" }
q261999
StatAggregator.delete
validation
def delete(self, start_date=None, end_date=None): """Delete aggregation documents.""" aggs_query = Search( using=self.client, index=self.aggregation_alias, doc_type=self.aggregation_doc_type ).extra(_source=False) range_args = {} if start_date: range_args['gte'] = self._format_range_dt( start_date.replace(microsecond=0)) if end_date: range_args['lte'] = self._format_range_dt( end_date.replace(microsecond=0)) if range_args: aggs_query = aggs_query.filter('range', timestamp=range_args) bookmarks_query = Search( using=self.client, index=self.aggregation_alias, doc_type=self.bookmark_doc_type ).sort({'date': {'order': 'desc'}}) if range_args: bookmarks_query = bookmarks_query.filter('range', date=range_args) def _delete_actions(): for query in (aggs_query, bookmarks_query): affected_indices = set() for doc in query.scan(): affected_indices.add(doc.meta.index) yield dict(_index=doc.meta.index, _op_type='delete', _id=doc.meta.id, _type=doc.meta.doc_type) current_search_client.indices.flush( index=','.join(affected_indices), wait_if_ongoing=True) bulk(self.client, _delete_actions(), refresh=True)
python
{ "resource": "" }