body_hash
stringlengths
64
64
body
stringlengths
23
109k
docstring
stringlengths
1
57k
path
stringlengths
4
198
name
stringlengths
1
115
repository_name
stringlengths
7
111
repository_stars
float64
0
191k
lang
stringclasses
1 value
body_without_docstring
stringlengths
14
108k
unified
stringlengths
45
133k
a5643fe6df11b4e8e844da93b1662c993eb0487eb69c2b57d7e7959861ea5e7a
def lookahead(self, element, table=None, fields=None, tree=None, directory=None, lookup=None): '\n Find referenced elements in the tree\n\n Args:\n element: the element\n table: the DB table\n fields: the FK fields in the table\n tree: the import tree\n directory: a dictionary to lookup elements in the tree\n (will be filled in by this function)\n ' db = current.db s3db = current.s3db xml = current.xml import_uid = xml.import_uid ATTRIBUTE = xml.ATTRIBUTE TAG = xml.TAG UID = xml.UID reference_list = [] rlappend = reference_list.append root = None if (tree is not None): root = (tree if isinstance(tree, etree._Element) else tree.getroot()) uidmap = self.uidmap references = ([lookup] if lookup else element.findall('reference')) for reference in references: if lookup: field = None if (element is None): (tablename, attr, uid) = reference ktable = s3db.table(tablename) if (ktable is None): continue uids = ([import_uid(uid)] if (attr == 'uuid') else [uid]) else: tablename = element.get(ATTRIBUTE.name, None) (ktable, uid) = reference attr = UID uids = [import_uid(uid)] else: field = reference.get(ATTRIBUTE.field, None) if ((not field) or (field not in fields) or (field not in table)): continue (ktablename, _, multiple) = s3_get_foreign_key(table[field]) if (not ktablename): continue try: ktable = s3db[ktablename] except AttributeError: continue tablename = reference.get(ATTRIBUTE.resource, None) if (UID not in ktable.fields): continue if (not tablename): tablename = ktablename if (tablename != ktablename): field = (ktable._id.name, field) if ((tablename == ktablename) and (ktable._id.name != 'id')): continue uids = reference.get(UID, None) attr = UID if (not uids): uids = reference.get(ATTRIBUTE.tuid, None) attr = ATTRIBUTE.tuid if (uids and multiple): uids = json.loads(uids) elif uids: uids = [uids] relements = [] id_map = {} if ((attr == UID) and uids): if (len(uids) == 1): uid = import_uid(uids[0]) record = db((ktable[UID] == uid)).select(ktable.id, cacheable=True, limitby=(0, 1)).first() if record: id_map[uid] = record.id else: uids_ = [import_uid(uid) for uid in uids] records = db(ktable[UID].belongs(uids_)).select(ktable.id, ktable[UID], limitby=(0, len(uids_))) for r in records: id_map[r[UID]] = r.id if (not uids): expr = ('.//%s[@%s="%s"]' % (TAG.resource, ATTRIBUTE.name, tablename)) relements = reference.xpath(expr) if (relements and (not multiple)): relements = relements[:1] elif (root is not None): for uid in uids: entry = None if (directory is not None): entry = directory.get((tablename, attr, uid)) if (not entry): e = (uidmap[attr].get((tablename, uid)) if uidmap else None) if (e is not None): relements.append(e) else: _uid = import_uid(uid) if (_uid and (_uid in id_map)): _id = id_map[_uid] entry = Storage(tablename=tablename, element=None, uid=uid, id=_id, item_id=None) rlappend(Storage(field=field, element=reference, entry=entry)) else: continue else: rlappend(Storage(field=field, element=reference, entry=entry)) for relement in relements: uid = relement.get(attr, None) if (attr == UID): _uid = import_uid(uid) _id = ((_uid and id_map and id_map.get(_uid, None)) or None) else: _uid = None _id = None entry = Storage(tablename=tablename, element=relement, uid=uid, id=_id, item_id=None) if (uid and (directory is not None)): directory[(tablename, attr, uid)] = entry rlappend(Storage(field=field, element=reference, entry=entry)) return reference_list
Find referenced elements in the tree Args: element: the element table: the DB table fields: the FK fields in the table tree: the import tree directory: a dictionary to lookup elements in the tree (will be filled in by this function)
modules/s3/s3import.py
lookahead
annehaley/eden
205
python
def lookahead(self, element, table=None, fields=None, tree=None, directory=None, lookup=None): '\n Find referenced elements in the tree\n\n Args:\n element: the element\n table: the DB table\n fields: the FK fields in the table\n tree: the import tree\n directory: a dictionary to lookup elements in the tree\n (will be filled in by this function)\n ' db = current.db s3db = current.s3db xml = current.xml import_uid = xml.import_uid ATTRIBUTE = xml.ATTRIBUTE TAG = xml.TAG UID = xml.UID reference_list = [] rlappend = reference_list.append root = None if (tree is not None): root = (tree if isinstance(tree, etree._Element) else tree.getroot()) uidmap = self.uidmap references = ([lookup] if lookup else element.findall('reference')) for reference in references: if lookup: field = None if (element is None): (tablename, attr, uid) = reference ktable = s3db.table(tablename) if (ktable is None): continue uids = ([import_uid(uid)] if (attr == 'uuid') else [uid]) else: tablename = element.get(ATTRIBUTE.name, None) (ktable, uid) = reference attr = UID uids = [import_uid(uid)] else: field = reference.get(ATTRIBUTE.field, None) if ((not field) or (field not in fields) or (field not in table)): continue (ktablename, _, multiple) = s3_get_foreign_key(table[field]) if (not ktablename): continue try: ktable = s3db[ktablename] except AttributeError: continue tablename = reference.get(ATTRIBUTE.resource, None) if (UID not in ktable.fields): continue if (not tablename): tablename = ktablename if (tablename != ktablename): field = (ktable._id.name, field) if ((tablename == ktablename) and (ktable._id.name != 'id')): continue uids = reference.get(UID, None) attr = UID if (not uids): uids = reference.get(ATTRIBUTE.tuid, None) attr = ATTRIBUTE.tuid if (uids and multiple): uids = json.loads(uids) elif uids: uids = [uids] relements = [] id_map = {} if ((attr == UID) and uids): if (len(uids) == 1): uid = import_uid(uids[0]) record = db((ktable[UID] == uid)).select(ktable.id, cacheable=True, limitby=(0, 1)).first() if record: id_map[uid] = record.id else: uids_ = [import_uid(uid) for uid in uids] records = db(ktable[UID].belongs(uids_)).select(ktable.id, ktable[UID], limitby=(0, len(uids_))) for r in records: id_map[r[UID]] = r.id if (not uids): expr = ('.//%s[@%s="%s"]' % (TAG.resource, ATTRIBUTE.name, tablename)) relements = reference.xpath(expr) if (relements and (not multiple)): relements = relements[:1] elif (root is not None): for uid in uids: entry = None if (directory is not None): entry = directory.get((tablename, attr, uid)) if (not entry): e = (uidmap[attr].get((tablename, uid)) if uidmap else None) if (e is not None): relements.append(e) else: _uid = import_uid(uid) if (_uid and (_uid in id_map)): _id = id_map[_uid] entry = Storage(tablename=tablename, element=None, uid=uid, id=_id, item_id=None) rlappend(Storage(field=field, element=reference, entry=entry)) else: continue else: rlappend(Storage(field=field, element=reference, entry=entry)) for relement in relements: uid = relement.get(attr, None) if (attr == UID): _uid = import_uid(uid) _id = ((_uid and id_map and id_map.get(_uid, None)) or None) else: _uid = None _id = None entry = Storage(tablename=tablename, element=relement, uid=uid, id=_id, item_id=None) if (uid and (directory is not None)): directory[(tablename, attr, uid)] = entry rlappend(Storage(field=field, element=reference, entry=entry)) return reference_list
def lookahead(self, element, table=None, fields=None, tree=None, directory=None, lookup=None): '\n Find referenced elements in the tree\n\n Args:\n element: the element\n table: the DB table\n fields: the FK fields in the table\n tree: the import tree\n directory: a dictionary to lookup elements in the tree\n (will be filled in by this function)\n ' db = current.db s3db = current.s3db xml = current.xml import_uid = xml.import_uid ATTRIBUTE = xml.ATTRIBUTE TAG = xml.TAG UID = xml.UID reference_list = [] rlappend = reference_list.append root = None if (tree is not None): root = (tree if isinstance(tree, etree._Element) else tree.getroot()) uidmap = self.uidmap references = ([lookup] if lookup else element.findall('reference')) for reference in references: if lookup: field = None if (element is None): (tablename, attr, uid) = reference ktable = s3db.table(tablename) if (ktable is None): continue uids = ([import_uid(uid)] if (attr == 'uuid') else [uid]) else: tablename = element.get(ATTRIBUTE.name, None) (ktable, uid) = reference attr = UID uids = [import_uid(uid)] else: field = reference.get(ATTRIBUTE.field, None) if ((not field) or (field not in fields) or (field not in table)): continue (ktablename, _, multiple) = s3_get_foreign_key(table[field]) if (not ktablename): continue try: ktable = s3db[ktablename] except AttributeError: continue tablename = reference.get(ATTRIBUTE.resource, None) if (UID not in ktable.fields): continue if (not tablename): tablename = ktablename if (tablename != ktablename): field = (ktable._id.name, field) if ((tablename == ktablename) and (ktable._id.name != 'id')): continue uids = reference.get(UID, None) attr = UID if (not uids): uids = reference.get(ATTRIBUTE.tuid, None) attr = ATTRIBUTE.tuid if (uids and multiple): uids = json.loads(uids) elif uids: uids = [uids] relements = [] id_map = {} if ((attr == UID) and uids): if (len(uids) == 1): uid = import_uid(uids[0]) record = db((ktable[UID] == uid)).select(ktable.id, cacheable=True, limitby=(0, 1)).first() if record: id_map[uid] = record.id else: uids_ = [import_uid(uid) for uid in uids] records = db(ktable[UID].belongs(uids_)).select(ktable.id, ktable[UID], limitby=(0, len(uids_))) for r in records: id_map[r[UID]] = r.id if (not uids): expr = ('.//%s[@%s="%s"]' % (TAG.resource, ATTRIBUTE.name, tablename)) relements = reference.xpath(expr) if (relements and (not multiple)): relements = relements[:1] elif (root is not None): for uid in uids: entry = None if (directory is not None): entry = directory.get((tablename, attr, uid)) if (not entry): e = (uidmap[attr].get((tablename, uid)) if uidmap else None) if (e is not None): relements.append(e) else: _uid = import_uid(uid) if (_uid and (_uid in id_map)): _id = id_map[_uid] entry = Storage(tablename=tablename, element=None, uid=uid, id=_id, item_id=None) rlappend(Storage(field=field, element=reference, entry=entry)) else: continue else: rlappend(Storage(field=field, element=reference, entry=entry)) for relement in relements: uid = relement.get(attr, None) if (attr == UID): _uid = import_uid(uid) _id = ((_uid and id_map and id_map.get(_uid, None)) or None) else: _uid = None _id = None entry = Storage(tablename=tablename, element=relement, uid=uid, id=_id, item_id=None) if (uid and (directory is not None)): directory[(tablename, attr, uid)] = entry rlappend(Storage(field=field, element=reference, entry=entry)) return reference_list<|docstring|>Find referenced elements in the tree Args: element: the element table: the DB table fields: the FK fields in the table tree: the import tree directory: a dictionary to lookup elements in the tree (will be filled in by this function)<|endoftext|>
6a3e26105efb27c827aa7a3fcf5591127743d1576ad85ca2a453cb7f4452d72a
def load_item(self, row): '\n Load an item from the item table (counterpart to add_item\n when restoring a job from the database)\n ' item = S3ImportItem(self) if (not item.restore(row)): self.error = item.error if (item.load_parent is None): self.error_tree.append(deepcopy(item.element)) item_id = item.item_id self.items[item_id] = item return item_id
Load an item from the item table (counterpart to add_item when restoring a job from the database)
modules/s3/s3import.py
load_item
annehaley/eden
205
python
def load_item(self, row): '\n Load an item from the item table (counterpart to add_item\n when restoring a job from the database)\n ' item = S3ImportItem(self) if (not item.restore(row)): self.error = item.error if (item.load_parent is None): self.error_tree.append(deepcopy(item.element)) item_id = item.item_id self.items[item_id] = item return item_id
def load_item(self, row): '\n Load an item from the item table (counterpart to add_item\n when restoring a job from the database)\n ' item = S3ImportItem(self) if (not item.restore(row)): self.error = item.error if (item.load_parent is None): self.error_tree.append(deepcopy(item.element)) item_id = item.item_id self.items[item_id] = item return item_id<|docstring|>Load an item from the item table (counterpart to add_item when restoring a job from the database)<|endoftext|>
1052cb6431831c4ad1d62a236184c64f550d3ce1a22eeef8b8240e23620a62a3
def resolve(self, item_id, import_list): '\n Resolve the reference list of an item\n\n Args:\n item_id: the import item UID\n import_list: the ordered list of items (UIDs) to import\n ' item = self.items[item_id] if (item.lock or (item.accepted is False)): return False references = [] for reference in item.references: ritem_id = reference.entry.item_id if (ritem_id and (ritem_id not in import_list)): references.append(ritem_id) for ritem_id in references: item.lock = True if self.resolve(ritem_id, import_list): import_list.append(ritem_id) item.lock = False return True
Resolve the reference list of an item Args: item_id: the import item UID import_list: the ordered list of items (UIDs) to import
modules/s3/s3import.py
resolve
annehaley/eden
205
python
def resolve(self, item_id, import_list): '\n Resolve the reference list of an item\n\n Args:\n item_id: the import item UID\n import_list: the ordered list of items (UIDs) to import\n ' item = self.items[item_id] if (item.lock or (item.accepted is False)): return False references = [] for reference in item.references: ritem_id = reference.entry.item_id if (ritem_id and (ritem_id not in import_list)): references.append(ritem_id) for ritem_id in references: item.lock = True if self.resolve(ritem_id, import_list): import_list.append(ritem_id) item.lock = False return True
def resolve(self, item_id, import_list): '\n Resolve the reference list of an item\n\n Args:\n item_id: the import item UID\n import_list: the ordered list of items (UIDs) to import\n ' item = self.items[item_id] if (item.lock or (item.accepted is False)): return False references = [] for reference in item.references: ritem_id = reference.entry.item_id if (ritem_id and (ritem_id not in import_list)): references.append(ritem_id) for ritem_id in references: item.lock = True if self.resolve(ritem_id, import_list): import_list.append(ritem_id) item.lock = False return True<|docstring|>Resolve the reference list of an item Args: item_id: the import item UID import_list: the ordered list of items (UIDs) to import<|endoftext|>
b278f44114dda66695f3253ee12a00fc82acb88ad66020c4fe1701a9b7102395
def commit(self, ignore_errors=False, log_items=None): '\n Commit the import job to the DB\n\n Args:\n ignore_errors: skip any items with errors\n (does still report the errors)\n log_items: callback function to log import items\n before committing them\n ' ATTRIBUTE = current.xml.ATTRIBUTE METHOD = S3ImportItem.METHOD import_list = [] for item_id in self.items: self.resolve(item_id, import_list) if (item_id not in import_list): import_list.append(item_id) items = self.items count = 0 mtime = None created = [] cappend = created.append updated = [] deleted = [] tablename = self.table._tablename self.log = log_items failed = False for item_id in import_list: item = items[item_id] error = None if (item.accepted is not False): logged = False success = item.commit(ignore_errors=ignore_errors) else: logged = True success = ignore_errors if (not success): failed = True error = item.error if error: current.log.error(error) self.error = error element = item.element if (element is not None): if (not element.get(ATTRIBUTE.error, False)): element.set(ATTRIBUTE.error, s3_str(error)) if (not logged): self.error_tree.append(deepcopy(element)) elif (item.tablename == tablename): count += 1 if ((mtime is None) or (item.mtime > mtime)): mtime = item.mtime if item.id: if (item.method == METHOD.CREATE): cappend(item.id) elif (item.method == METHOD.UPDATE): updated.append(item.id) elif (item.method in (METHOD.MERGE, METHOD.DELETE)): deleted.append(item.id) if failed: return False self.count = count self.mtime = mtime self.created = created self.updated = updated self.deleted = deleted return True
Commit the import job to the DB Args: ignore_errors: skip any items with errors (does still report the errors) log_items: callback function to log import items before committing them
modules/s3/s3import.py
commit
annehaley/eden
205
python
def commit(self, ignore_errors=False, log_items=None): '\n Commit the import job to the DB\n\n Args:\n ignore_errors: skip any items with errors\n (does still report the errors)\n log_items: callback function to log import items\n before committing them\n ' ATTRIBUTE = current.xml.ATTRIBUTE METHOD = S3ImportItem.METHOD import_list = [] for item_id in self.items: self.resolve(item_id, import_list) if (item_id not in import_list): import_list.append(item_id) items = self.items count = 0 mtime = None created = [] cappend = created.append updated = [] deleted = [] tablename = self.table._tablename self.log = log_items failed = False for item_id in import_list: item = items[item_id] error = None if (item.accepted is not False): logged = False success = item.commit(ignore_errors=ignore_errors) else: logged = True success = ignore_errors if (not success): failed = True error = item.error if error: current.log.error(error) self.error = error element = item.element if (element is not None): if (not element.get(ATTRIBUTE.error, False)): element.set(ATTRIBUTE.error, s3_str(error)) if (not logged): self.error_tree.append(deepcopy(element)) elif (item.tablename == tablename): count += 1 if ((mtime is None) or (item.mtime > mtime)): mtime = item.mtime if item.id: if (item.method == METHOD.CREATE): cappend(item.id) elif (item.method == METHOD.UPDATE): updated.append(item.id) elif (item.method in (METHOD.MERGE, METHOD.DELETE)): deleted.append(item.id) if failed: return False self.count = count self.mtime = mtime self.created = created self.updated = updated self.deleted = deleted return True
def commit(self, ignore_errors=False, log_items=None): '\n Commit the import job to the DB\n\n Args:\n ignore_errors: skip any items with errors\n (does still report the errors)\n log_items: callback function to log import items\n before committing them\n ' ATTRIBUTE = current.xml.ATTRIBUTE METHOD = S3ImportItem.METHOD import_list = [] for item_id in self.items: self.resolve(item_id, import_list) if (item_id not in import_list): import_list.append(item_id) items = self.items count = 0 mtime = None created = [] cappend = created.append updated = [] deleted = [] tablename = self.table._tablename self.log = log_items failed = False for item_id in import_list: item = items[item_id] error = None if (item.accepted is not False): logged = False success = item.commit(ignore_errors=ignore_errors) else: logged = True success = ignore_errors if (not success): failed = True error = item.error if error: current.log.error(error) self.error = error element = item.element if (element is not None): if (not element.get(ATTRIBUTE.error, False)): element.set(ATTRIBUTE.error, s3_str(error)) if (not logged): self.error_tree.append(deepcopy(element)) elif (item.tablename == tablename): count += 1 if ((mtime is None) or (item.mtime > mtime)): mtime = item.mtime if item.id: if (item.method == METHOD.CREATE): cappend(item.id) elif (item.method == METHOD.UPDATE): updated.append(item.id) elif (item.method in (METHOD.MERGE, METHOD.DELETE)): deleted.append(item.id) if failed: return False self.count = count self.mtime = mtime self.created = created self.updated = updated self.deleted = deleted return True<|docstring|>Commit the import job to the DB Args: ignore_errors: skip any items with errors (does still report the errors) log_items: callback function to log import items before committing them<|endoftext|>
ff36fc835e2899a009f6107ae7dd1a7a17609add00bed2d22fc1e680d3cf208c
def __define_tables(self): '\n Define the database tables for jobs and items\n ' self.job_table = self.define_job_table() self.item_table = self.define_item_table()
Define the database tables for jobs and items
modules/s3/s3import.py
__define_tables
annehaley/eden
205
python
def __define_tables(self): '\n \n ' self.job_table = self.define_job_table() self.item_table = self.define_item_table()
def __define_tables(self): '\n \n ' self.job_table = self.define_job_table() self.item_table = self.define_item_table()<|docstring|>Define the database tables for jobs and items<|endoftext|>
e4e4a576c37fa842cff275d92e594846b511eb6885194fe799e4fcf53ca79768
def store(self): '\n Store this job and all its items in the job table\n ' db = current.db self.__define_tables() jobtable = self.job_table query = (jobtable.job_id == self.job_id) row = db(query).select(jobtable.id, limitby=(0, 1)).first() if row: record_id = row.id else: record_id = None record = Storage(job_id=self.job_id) try: tablename = self.table._tablename except AttributeError: pass else: record.update(tablename=tablename) for item in self.items.values(): item.store(item_table=self.item_table) if record_id: db((jobtable.id == record_id)).update(**record) else: record_id = jobtable.insert(**record) return record_id
Store this job and all its items in the job table
modules/s3/s3import.py
store
annehaley/eden
205
python
def store(self): '\n \n ' db = current.db self.__define_tables() jobtable = self.job_table query = (jobtable.job_id == self.job_id) row = db(query).select(jobtable.id, limitby=(0, 1)).first() if row: record_id = row.id else: record_id = None record = Storage(job_id=self.job_id) try: tablename = self.table._tablename except AttributeError: pass else: record.update(tablename=tablename) for item in self.items.values(): item.store(item_table=self.item_table) if record_id: db((jobtable.id == record_id)).update(**record) else: record_id = jobtable.insert(**record) return record_id
def store(self): '\n \n ' db = current.db self.__define_tables() jobtable = self.job_table query = (jobtable.job_id == self.job_id) row = db(query).select(jobtable.id, limitby=(0, 1)).first() if row: record_id = row.id else: record_id = None record = Storage(job_id=self.job_id) try: tablename = self.table._tablename except AttributeError: pass else: record.update(tablename=tablename) for item in self.items.values(): item.store(item_table=self.item_table) if record_id: db((jobtable.id == record_id)).update(**record) else: record_id = jobtable.insert(**record) return record_id<|docstring|>Store this job and all its items in the job table<|endoftext|>
ba088a0fd8cc0fc0d19034b2bbc12ba887371966d5546616a78b6b2620728330
def get_tree(self): '\n Reconstruct the element tree of this job\n ' if (self.tree is not None): return self.tree xml = current.xml ATTRIBUTE = xml.ATTRIBUTE UID = xml.UID root = etree.Element(xml.TAG.root) for item in self.items.values(): element = item.element if ((element is not None) and (not item.parent)): if ((item.tablename == self.table._tablename) or element.get(UID, None) or element.get(ATTRIBUTE.tuid, None)): root.append(deepcopy(element)) return etree.ElementTree(root)
Reconstruct the element tree of this job
modules/s3/s3import.py
get_tree
annehaley/eden
205
python
def get_tree(self): '\n \n ' if (self.tree is not None): return self.tree xml = current.xml ATTRIBUTE = xml.ATTRIBUTE UID = xml.UID root = etree.Element(xml.TAG.root) for item in self.items.values(): element = item.element if ((element is not None) and (not item.parent)): if ((item.tablename == self.table._tablename) or element.get(UID, None) or element.get(ATTRIBUTE.tuid, None)): root.append(deepcopy(element)) return etree.ElementTree(root)
def get_tree(self): '\n \n ' if (self.tree is not None): return self.tree xml = current.xml ATTRIBUTE = xml.ATTRIBUTE UID = xml.UID root = etree.Element(xml.TAG.root) for item in self.items.values(): element = item.element if ((element is not None) and (not item.parent)): if ((item.tablename == self.table._tablename) or element.get(UID, None) or element.get(ATTRIBUTE.tuid, None)): root.append(deepcopy(element)) return etree.ElementTree(root)<|docstring|>Reconstruct the element tree of this job<|endoftext|>
f1f99c4ec0f9e2e69241886db828eb4a87c165161e3c96e05306e06881958639
def delete(self): '\n Delete this job and all its items from the job table\n ' db = current.db self.__define_tables() db((self.item_table.job_id == self.job_id)).delete() db((self.job_table.job_id == self.job_id)).delete()
Delete this job and all its items from the job table
modules/s3/s3import.py
delete
annehaley/eden
205
python
def delete(self): '\n \n ' db = current.db self.__define_tables() db((self.item_table.job_id == self.job_id)).delete() db((self.job_table.job_id == self.job_id)).delete()
def delete(self): '\n \n ' db = current.db self.__define_tables() db((self.item_table.job_id == self.job_id)).delete() db((self.job_table.job_id == self.job_id)).delete()<|docstring|>Delete this job and all its items from the job table<|endoftext|>
f26705673a06cdfda925dd94d45b2f3b2000c06749049a27b21211f707144d8a
def restore_references(self): "\n Restore the job's reference structure after loading items\n from the item table\n " db = current.db UID = current.xml.UID for item in self.items.values(): for citem_id in item.load_components: if (citem_id in self.items): item.components.append(self.items[citem_id]) item.load_components = [] for ritem in item.load_references: field = ritem['field'] if ('item_id' in ritem): item_id = ritem['item_id'] if (item_id in self.items): _item = self.items[item_id] entry = Storage(tablename=_item.tablename, element=_item.element, uid=_item.uid, id=_item.id, item_id=item_id) item.references.append(Storage(field=field, entry=entry)) else: _id = None uid = ritem.get('uid', None) tablename = ritem.get('tablename', None) if (tablename and uid): try: table = current.s3db[tablename] except AttributeError: continue if (UID not in table.fields): continue row = db((table[UID] == uid)).select(table._id, limitby=(0, 1)).first() if row: _id = row[table._id.name] else: continue entry = Storage(tablename=ritem['tablename'], element=None, uid=ritem['uid'], id=_id, item_id=None) item.references.append(Storage(field=field, entry=entry)) item.load_references = [] if (item.load_parent is not None): parent = self.items[item.load_parent] if (parent is None): item.skip = True else: item.parent = parent item.load_parent = None
Restore the job's reference structure after loading items from the item table
modules/s3/s3import.py
restore_references
annehaley/eden
205
python
def restore_references(self): "\n Restore the job's reference structure after loading items\n from the item table\n " db = current.db UID = current.xml.UID for item in self.items.values(): for citem_id in item.load_components: if (citem_id in self.items): item.components.append(self.items[citem_id]) item.load_components = [] for ritem in item.load_references: field = ritem['field'] if ('item_id' in ritem): item_id = ritem['item_id'] if (item_id in self.items): _item = self.items[item_id] entry = Storage(tablename=_item.tablename, element=_item.element, uid=_item.uid, id=_item.id, item_id=item_id) item.references.append(Storage(field=field, entry=entry)) else: _id = None uid = ritem.get('uid', None) tablename = ritem.get('tablename', None) if (tablename and uid): try: table = current.s3db[tablename] except AttributeError: continue if (UID not in table.fields): continue row = db((table[UID] == uid)).select(table._id, limitby=(0, 1)).first() if row: _id = row[table._id.name] else: continue entry = Storage(tablename=ritem['tablename'], element=None, uid=ritem['uid'], id=_id, item_id=None) item.references.append(Storage(field=field, entry=entry)) item.load_references = [] if (item.load_parent is not None): parent = self.items[item.load_parent] if (parent is None): item.skip = True else: item.parent = parent item.load_parent = None
def restore_references(self): "\n Restore the job's reference structure after loading items\n from the item table\n " db = current.db UID = current.xml.UID for item in self.items.values(): for citem_id in item.load_components: if (citem_id in self.items): item.components.append(self.items[citem_id]) item.load_components = [] for ritem in item.load_references: field = ritem['field'] if ('item_id' in ritem): item_id = ritem['item_id'] if (item_id in self.items): _item = self.items[item_id] entry = Storage(tablename=_item.tablename, element=_item.element, uid=_item.uid, id=_item.id, item_id=item_id) item.references.append(Storage(field=field, entry=entry)) else: _id = None uid = ritem.get('uid', None) tablename = ritem.get('tablename', None) if (tablename and uid): try: table = current.s3db[tablename] except AttributeError: continue if (UID not in table.fields): continue row = db((table[UID] == uid)).select(table._id, limitby=(0, 1)).first() if row: _id = row[table._id.name] else: continue entry = Storage(tablename=ritem['tablename'], element=None, uid=ritem['uid'], id=_id, item_id=None) item.references.append(Storage(field=field, entry=entry)) item.load_references = [] if (item.load_parent is not None): parent = self.items[item.load_parent] if (parent is None): item.skip = True else: item.parent = parent item.load_parent = None<|docstring|>Restore the job's reference structure after loading items from the item table<|endoftext|>
562633de282e6132b3e37c0565dfca05092878d992eb8e09838fdae8b3174ca0
def __init__(self, obj): '\n Constructor\n\n @param obj: the object to inspect (parsed)\n ' self.obj = obj self._refs = None self._objs = None
Constructor @param obj: the object to inspect (parsed)
modules/s3/s3import.py
__init__
annehaley/eden
205
python
def __init__(self, obj): '\n Constructor\n\n @param obj: the object to inspect (parsed)\n ' self.obj = obj self._refs = None self._objs = None
def __init__(self, obj): '\n Constructor\n\n @param obj: the object to inspect (parsed)\n ' self.obj = obj self._refs = None self._objs = None<|docstring|>Constructor @param obj: the object to inspect (parsed)<|endoftext|>
52d61efbc11904cb09b6fa1472796e7f43ad971594f612f41a4366adf94483cb
@property def refs(self): '\n List of references discovered in the object (lazy property)\n\n @returns: a list of tuples (tablename, uidtype, uid)\n ' if (self._refs is None): self._refs = [] self._objs = {} self._traverse(self.obj) return self._refs
List of references discovered in the object (lazy property) @returns: a list of tuples (tablename, uidtype, uid)
modules/s3/s3import.py
refs
annehaley/eden
205
python
@property def refs(self): '\n List of references discovered in the object (lazy property)\n\n @returns: a list of tuples (tablename, uidtype, uid)\n ' if (self._refs is None): self._refs = [] self._objs = {} self._traverse(self.obj) return self._refs
@property def refs(self): '\n List of references discovered in the object (lazy property)\n\n @returns: a list of tuples (tablename, uidtype, uid)\n ' if (self._refs is None): self._refs = [] self._objs = {} self._traverse(self.obj) return self._refs<|docstring|>List of references discovered in the object (lazy property) @returns: a list of tuples (tablename, uidtype, uid)<|endoftext|>
18ae37263bf2eddd7163666b9f044b87aa4b86b4eaedb0e55300bf822f9ce84c
@property def objs(self): '\n A dict with pointers to the references inside the object\n\n @returns: a dict {(tablename, uidtype, uid): (obj, key)}\n ' if (self._objs is None): self._refs = [] self._objs = {} self._traverse(self.obj) return self._objs
A dict with pointers to the references inside the object @returns: a dict {(tablename, uidtype, uid): (obj, key)}
modules/s3/s3import.py
objs
annehaley/eden
205
python
@property def objs(self): '\n A dict with pointers to the references inside the object\n\n @returns: a dict {(tablename, uidtype, uid): (obj, key)}\n ' if (self._objs is None): self._refs = [] self._objs = {} self._traverse(self.obj) return self._objs
@property def objs(self): '\n A dict with pointers to the references inside the object\n\n @returns: a dict {(tablename, uidtype, uid): (obj, key)}\n ' if (self._objs is None): self._refs = [] self._objs = {} self._traverse(self.obj) return self._objs<|docstring|>A dict with pointers to the references inside the object @returns: a dict {(tablename, uidtype, uid): (obj, key)}<|endoftext|>
ab5d20ce68e57396a3bd43d1b4b28b95b06a720d5d1ad4aee5de3847aa6512e1
def _traverse(self, obj): '\n Traverse a (possibly nested) object and find all references,\n populates self.refs and self.objs\n\n @param obj: the object to inspect\n ' refs = self._refs objs = self._objs if (type(obj) is list): for item in obj: self._traverse(item) elif (type(obj) is dict): for (key, value) in obj.items(): if ((key[:3] == '$k_') and (type(value) is dict)): tablename = uid = uid_type = None for k in self.TABLENAME_KEYS: tablename = value.get(k) if tablename: break if tablename: for k in self.UUID_KEYS: uid = value.get(k) if uid: uid_type = 'uuid' break if (tablename and (not uid)): for k in self.TUID_KEYS: uid = value.get(k) if uid: uid_type = 'tuid' break if ((not tablename) or (not uid)): self._traverse(value) else: ref = (tablename, uid_type, uid) if (ref not in objs): refs.append(ref) objs[ref] = [(obj, key)] else: objs[ref].append((obj, key)) else: self._traverse(value)
Traverse a (possibly nested) object and find all references, populates self.refs and self.objs @param obj: the object to inspect
modules/s3/s3import.py
_traverse
annehaley/eden
205
python
def _traverse(self, obj): '\n Traverse a (possibly nested) object and find all references,\n populates self.refs and self.objs\n\n @param obj: the object to inspect\n ' refs = self._refs objs = self._objs if (type(obj) is list): for item in obj: self._traverse(item) elif (type(obj) is dict): for (key, value) in obj.items(): if ((key[:3] == '$k_') and (type(value) is dict)): tablename = uid = uid_type = None for k in self.TABLENAME_KEYS: tablename = value.get(k) if tablename: break if tablename: for k in self.UUID_KEYS: uid = value.get(k) if uid: uid_type = 'uuid' break if (tablename and (not uid)): for k in self.TUID_KEYS: uid = value.get(k) if uid: uid_type = 'tuid' break if ((not tablename) or (not uid)): self._traverse(value) else: ref = (tablename, uid_type, uid) if (ref not in objs): refs.append(ref) objs[ref] = [(obj, key)] else: objs[ref].append((obj, key)) else: self._traverse(value)
def _traverse(self, obj): '\n Traverse a (possibly nested) object and find all references,\n populates self.refs and self.objs\n\n @param obj: the object to inspect\n ' refs = self._refs objs = self._objs if (type(obj) is list): for item in obj: self._traverse(item) elif (type(obj) is dict): for (key, value) in obj.items(): if ((key[:3] == '$k_') and (type(value) is dict)): tablename = uid = uid_type = None for k in self.TABLENAME_KEYS: tablename = value.get(k) if tablename: break if tablename: for k in self.UUID_KEYS: uid = value.get(k) if uid: uid_type = 'uuid' break if (tablename and (not uid)): for k in self.TUID_KEYS: uid = value.get(k) if uid: uid_type = 'tuid' break if ((not tablename) or (not uid)): self._traverse(value) else: ref = (tablename, uid_type, uid) if (ref not in objs): refs.append(ref) objs[ref] = [(obj, key)] else: objs[ref].append((obj, key)) else: self._traverse(value)<|docstring|>Traverse a (possibly nested) object and find all references, populates self.refs and self.objs @param obj: the object to inspect<|endoftext|>
c5c7bf4bd199ea764a593a9db96b0c5bf419a777663b4c45a154959a63721965
def resolve(self, tablename, uidtype, uid, value): '\n Resolve a reference in self.obj with the given value; will\n resolve all occurences of the reference\n\n @param tablename: the referenced table\n @param uidtype: the type of uid (uuid or tuid)\n @param uid: the uuid or tuid\n @param value: the value to resolve the reference\n ' items = self.objs.get((tablename, uidtype, uid)) if items: for (obj, key) in items: if (len(key) > 3): obj[key[3:]] = value obj.pop(key, None)
Resolve a reference in self.obj with the given value; will resolve all occurences of the reference @param tablename: the referenced table @param uidtype: the type of uid (uuid or tuid) @param uid: the uuid or tuid @param value: the value to resolve the reference
modules/s3/s3import.py
resolve
annehaley/eden
205
python
def resolve(self, tablename, uidtype, uid, value): '\n Resolve a reference in self.obj with the given value; will\n resolve all occurences of the reference\n\n @param tablename: the referenced table\n @param uidtype: the type of uid (uuid or tuid)\n @param uid: the uuid or tuid\n @param value: the value to resolve the reference\n ' items = self.objs.get((tablename, uidtype, uid)) if items: for (obj, key) in items: if (len(key) > 3): obj[key[3:]] = value obj.pop(key, None)
def resolve(self, tablename, uidtype, uid, value): '\n Resolve a reference in self.obj with the given value; will\n resolve all occurences of the reference\n\n @param tablename: the referenced table\n @param uidtype: the type of uid (uuid or tuid)\n @param uid: the uuid or tuid\n @param value: the value to resolve the reference\n ' items = self.objs.get((tablename, uidtype, uid)) if items: for (obj, key) in items: if (len(key) > 3): obj[key[3:]] = value obj.pop(key, None)<|docstring|>Resolve a reference in self.obj with the given value; will resolve all occurences of the reference @param tablename: the referenced table @param uidtype: the type of uid (uuid or tuid) @param uid: the uuid or tuid @param value: the value to resolve the reference<|endoftext|>
209588e27c0aca2bb8dfc4c1ecd7bcf79100537aaab536175404f767a8aeb334
def __init__(self, primary=None, secondary=None, ignore_case=True, ignore_deleted=False, noupdate=False): '\n Args:\n primary: list or tuple of primary fields to find a\n match, must always match (mandatory, defaults\n to "name" field)\n secondary: list or tuple of secondary fields to\n find a match, must match if values are\n present in the import item\n ignore_case: ignore case for string/text fields\n ignore_deleted: do not match deleted records\n noupdate: match, but do not update\n\n TODO:\n Fuzzy option to do a LIKE search\n ' if (not primary): primary = ('name',) self.primary = set(primary) if (not secondary): self.secondary = set() else: self.secondary = set(secondary) self.ignore_case = ignore_case self.ignore_deleted = ignore_deleted self.noupdate = noupdate
Args: primary: list or tuple of primary fields to find a match, must always match (mandatory, defaults to "name" field) secondary: list or tuple of secondary fields to find a match, must match if values are present in the import item ignore_case: ignore case for string/text fields ignore_deleted: do not match deleted records noupdate: match, but do not update TODO: Fuzzy option to do a LIKE search
modules/s3/s3import.py
__init__
annehaley/eden
205
python
def __init__(self, primary=None, secondary=None, ignore_case=True, ignore_deleted=False, noupdate=False): '\n Args:\n primary: list or tuple of primary fields to find a\n match, must always match (mandatory, defaults\n to "name" field)\n secondary: list or tuple of secondary fields to\n find a match, must match if values are\n present in the import item\n ignore_case: ignore case for string/text fields\n ignore_deleted: do not match deleted records\n noupdate: match, but do not update\n\n TODO:\n Fuzzy option to do a LIKE search\n ' if (not primary): primary = ('name',) self.primary = set(primary) if (not secondary): self.secondary = set() else: self.secondary = set(secondary) self.ignore_case = ignore_case self.ignore_deleted = ignore_deleted self.noupdate = noupdate
def __init__(self, primary=None, secondary=None, ignore_case=True, ignore_deleted=False, noupdate=False): '\n Args:\n primary: list or tuple of primary fields to find a\n match, must always match (mandatory, defaults\n to "name" field)\n secondary: list or tuple of secondary fields to\n find a match, must match if values are\n present in the import item\n ignore_case: ignore case for string/text fields\n ignore_deleted: do not match deleted records\n noupdate: match, but do not update\n\n TODO:\n Fuzzy option to do a LIKE search\n ' if (not primary): primary = ('name',) self.primary = set(primary) if (not secondary): self.secondary = set() else: self.secondary = set(secondary) self.ignore_case = ignore_case self.ignore_deleted = ignore_deleted self.noupdate = noupdate<|docstring|>Args: primary: list or tuple of primary fields to find a match, must always match (mandatory, defaults to "name" field) secondary: list or tuple of secondary fields to find a match, must match if values are present in the import item ignore_case: ignore case for string/text fields ignore_deleted: do not match deleted records noupdate: match, but do not update TODO: Fuzzy option to do a LIKE search<|endoftext|>
8108ea2d4d184189ae60045b0b90974433cabf8a834b98b72f833608bb986c6f
def __call__(self, item): "\n Entry point for importer\n\n Args:\n item: the import item\n\n Returns:\n The duplicate Row if match found, otherwise None\n\n Raises:\n SyntaxError: if any of the query fields doesn't exist\n in the item table\n " data = item.data table = item.table query = None error = 'Invalid field for duplicate detection: %s (%s)' primary = self.primary for fname in primary: if (fname not in table.fields): raise SyntaxError((error % (fname, table))) field = table[fname] value = data.get(fname) q = self.match(field, value) query = (q if (query is None) else (query & q)) secondary = self.secondary for fname in secondary: if (fname not in table.fields): raise SyntaxError((error % (fname, table))) field = table[fname] value = data.get(fname) if value: query &= self.match(field, value) if (self.ignore_deleted and ('deleted' in table.fields)): query &= (table.deleted == False) duplicate = current.db(query).select(table._id, limitby=(0, 1)).first() if duplicate: item.id = duplicate[table._id] if (not data.deleted): item.method = item.METHOD.UPDATE if self.noupdate: item.skip = True return duplicate
Entry point for importer Args: item: the import item Returns: The duplicate Row if match found, otherwise None Raises: SyntaxError: if any of the query fields doesn't exist in the item table
modules/s3/s3import.py
__call__
annehaley/eden
205
python
def __call__(self, item): "\n Entry point for importer\n\n Args:\n item: the import item\n\n Returns:\n The duplicate Row if match found, otherwise None\n\n Raises:\n SyntaxError: if any of the query fields doesn't exist\n in the item table\n " data = item.data table = item.table query = None error = 'Invalid field for duplicate detection: %s (%s)' primary = self.primary for fname in primary: if (fname not in table.fields): raise SyntaxError((error % (fname, table))) field = table[fname] value = data.get(fname) q = self.match(field, value) query = (q if (query is None) else (query & q)) secondary = self.secondary for fname in secondary: if (fname not in table.fields): raise SyntaxError((error % (fname, table))) field = table[fname] value = data.get(fname) if value: query &= self.match(field, value) if (self.ignore_deleted and ('deleted' in table.fields)): query &= (table.deleted == False) duplicate = current.db(query).select(table._id, limitby=(0, 1)).first() if duplicate: item.id = duplicate[table._id] if (not data.deleted): item.method = item.METHOD.UPDATE if self.noupdate: item.skip = True return duplicate
def __call__(self, item): "\n Entry point for importer\n\n Args:\n item: the import item\n\n Returns:\n The duplicate Row if match found, otherwise None\n\n Raises:\n SyntaxError: if any of the query fields doesn't exist\n in the item table\n " data = item.data table = item.table query = None error = 'Invalid field for duplicate detection: %s (%s)' primary = self.primary for fname in primary: if (fname not in table.fields): raise SyntaxError((error % (fname, table))) field = table[fname] value = data.get(fname) q = self.match(field, value) query = (q if (query is None) else (query & q)) secondary = self.secondary for fname in secondary: if (fname not in table.fields): raise SyntaxError((error % (fname, table))) field = table[fname] value = data.get(fname) if value: query &= self.match(field, value) if (self.ignore_deleted and ('deleted' in table.fields)): query &= (table.deleted == False) duplicate = current.db(query).select(table._id, limitby=(0, 1)).first() if duplicate: item.id = duplicate[table._id] if (not data.deleted): item.method = item.METHOD.UPDATE if self.noupdate: item.skip = True return duplicate<|docstring|>Entry point for importer Args: item: the import item Returns: The duplicate Row if match found, otherwise None Raises: SyntaxError: if any of the query fields doesn't exist in the item table<|endoftext|>
fe75602da6b453b51fda65a4c4d421d77f89a51b05edc039e359bf8984089adc
def match(self, field, value): '\n Helper function to generate a match-query\n\n Args:\n field: the Field\n value: the value\n\n Returns:\n a Query\n ' ftype = str(field.type) ignore_case = self.ignore_case if (ignore_case and hasattr(value, 'lower') and (ftype in ('string', 'text'))): query = (field.lower() == s3_str(value).lower()) else: query = (field == value) return query
Helper function to generate a match-query Args: field: the Field value: the value Returns: a Query
modules/s3/s3import.py
match
annehaley/eden
205
python
def match(self, field, value): '\n Helper function to generate a match-query\n\n Args:\n field: the Field\n value: the value\n\n Returns:\n a Query\n ' ftype = str(field.type) ignore_case = self.ignore_case if (ignore_case and hasattr(value, 'lower') and (ftype in ('string', 'text'))): query = (field.lower() == s3_str(value).lower()) else: query = (field == value) return query
def match(self, field, value): '\n Helper function to generate a match-query\n\n Args:\n field: the Field\n value: the value\n\n Returns:\n a Query\n ' ftype = str(field.type) ignore_case = self.ignore_case if (ignore_case and hasattr(value, 'lower') and (ftype in ('string', 'text'))): query = (field.lower() == s3_str(value).lower()) else: query = (field == value) return query<|docstring|>Helper function to generate a match-query Args: field: the Field value: the value Returns: a Query<|endoftext|>
71d785f1ef3791bded256fbadd7b526478a64e01e2588388f4f1ceba303fa704
def load_descriptor(self, path): '\n Load the descriptor file and then all the import tasks in that file\n into the task property.\n The descriptor file is the file called tasks.cfg in path.\n The file consists of a comma separated list of:\n module, resource name, csv filename, xsl filename.\n ' source = open(os.path.join(path, 'tasks.cfg'), 'r') values = self.csv.reader(source) for details in values: if (details == []): continue prefix = details[0][0].strip('" ') if (prefix == '#'): continue if (prefix == '*'): self.extract_other_import_line(path, details) else: self.extract_csv_import_line(path, details)
Load the descriptor file and then all the import tasks in that file into the task property. The descriptor file is the file called tasks.cfg in path. The file consists of a comma separated list of: module, resource name, csv filename, xsl filename.
modules/s3/s3import.py
load_descriptor
annehaley/eden
205
python
def load_descriptor(self, path): '\n Load the descriptor file and then all the import tasks in that file\n into the task property.\n The descriptor file is the file called tasks.cfg in path.\n The file consists of a comma separated list of:\n module, resource name, csv filename, xsl filename.\n ' source = open(os.path.join(path, 'tasks.cfg'), 'r') values = self.csv.reader(source) for details in values: if (details == []): continue prefix = details[0][0].strip('" ') if (prefix == '#'): continue if (prefix == '*'): self.extract_other_import_line(path, details) else: self.extract_csv_import_line(path, details)
def load_descriptor(self, path): '\n Load the descriptor file and then all the import tasks in that file\n into the task property.\n The descriptor file is the file called tasks.cfg in path.\n The file consists of a comma separated list of:\n module, resource name, csv filename, xsl filename.\n ' source = open(os.path.join(path, 'tasks.cfg'), 'r') values = self.csv.reader(source) for details in values: if (details == []): continue prefix = details[0][0].strip('" ') if (prefix == '#'): continue if (prefix == '*'): self.extract_other_import_line(path, details) else: self.extract_csv_import_line(path, details)<|docstring|>Load the descriptor file and then all the import tasks in that file into the task property. The descriptor file is the file called tasks.cfg in path. The file consists of a comma separated list of: module, resource name, csv filename, xsl filename.<|endoftext|>
5db1f4d42699fb3f8837b341eb4ce68cf1493b1af218988219812b793c9c246e
def extract_csv_import_line(self, path, details): '\n Extract the details for a CSV Import Task\n ' argCnt = len(details) if ((argCnt == 4) or (argCnt == 5)): mod = details[0].strip('" ') res = details[1].strip('" ') folder = current.request.folder csvFileName = details[2].strip('" ') if (csvFileName[:7] == 'http://'): csv = csvFileName else: (csvPath, csvFile) = os.path.split(csvFileName) if (csvPath != ''): path = os.path.join(folder, 'modules', 'templates', csvPath) csv = os.path.join(path, csvFile) xslFileName = details[3].strip('" ') templateDir = os.path.join(folder, 'static', 'formats', 's3csv') xsl = os.path.join(templateDir, mod, xslFileName) if (os.path.exists(xsl) == False): xsl = os.path.join(templateDir, xslFileName) if (os.path.exists(xsl) == False): xsl = os.path.join(path, xslFileName) if (os.path.exists(xsl) == False): self.errorList.append(('Failed to find a transform file %s, Giving up.' % xslFileName)) return if (argCnt == 5): extra_data = details[4] else: extra_data = None self.tasks.append([1, mod, res, csv, xsl, extra_data]) else: self.errorList.append(('prepopulate error: job not of length 4, ignored: %s' % str(details)))
Extract the details for a CSV Import Task
modules/s3/s3import.py
extract_csv_import_line
annehaley/eden
205
python
def extract_csv_import_line(self, path, details): '\n \n ' argCnt = len(details) if ((argCnt == 4) or (argCnt == 5)): mod = details[0].strip('" ') res = details[1].strip('" ') folder = current.request.folder csvFileName = details[2].strip('" ') if (csvFileName[:7] == 'http://'): csv = csvFileName else: (csvPath, csvFile) = os.path.split(csvFileName) if (csvPath != ): path = os.path.join(folder, 'modules', 'templates', csvPath) csv = os.path.join(path, csvFile) xslFileName = details[3].strip('" ') templateDir = os.path.join(folder, 'static', 'formats', 's3csv') xsl = os.path.join(templateDir, mod, xslFileName) if (os.path.exists(xsl) == False): xsl = os.path.join(templateDir, xslFileName) if (os.path.exists(xsl) == False): xsl = os.path.join(path, xslFileName) if (os.path.exists(xsl) == False): self.errorList.append(('Failed to find a transform file %s, Giving up.' % xslFileName)) return if (argCnt == 5): extra_data = details[4] else: extra_data = None self.tasks.append([1, mod, res, csv, xsl, extra_data]) else: self.errorList.append(('prepopulate error: job not of length 4, ignored: %s' % str(details)))
def extract_csv_import_line(self, path, details): '\n \n ' argCnt = len(details) if ((argCnt == 4) or (argCnt == 5)): mod = details[0].strip('" ') res = details[1].strip('" ') folder = current.request.folder csvFileName = details[2].strip('" ') if (csvFileName[:7] == 'http://'): csv = csvFileName else: (csvPath, csvFile) = os.path.split(csvFileName) if (csvPath != ): path = os.path.join(folder, 'modules', 'templates', csvPath) csv = os.path.join(path, csvFile) xslFileName = details[3].strip('" ') templateDir = os.path.join(folder, 'static', 'formats', 's3csv') xsl = os.path.join(templateDir, mod, xslFileName) if (os.path.exists(xsl) == False): xsl = os.path.join(templateDir, xslFileName) if (os.path.exists(xsl) == False): xsl = os.path.join(path, xslFileName) if (os.path.exists(xsl) == False): self.errorList.append(('Failed to find a transform file %s, Giving up.' % xslFileName)) return if (argCnt == 5): extra_data = details[4] else: extra_data = None self.tasks.append([1, mod, res, csv, xsl, extra_data]) else: self.errorList.append(('prepopulate error: job not of length 4, ignored: %s' % str(details)))<|docstring|>Extract the details for a CSV Import Task<|endoftext|>
b5e6c319df51a1efa6b566542ef145eb45287f1379b57e40c1ca9ca02e963d30
def extract_other_import_line(self, path, details): '\n Store a single import job into the tasks property\n *,function,filename,*extraArgs\n ' function = details[1].strip('" ') filepath = None if (len(details) >= 3): filename = details[2].strip('" ') if (filename != ''): (subfolder, filename) = os.path.split(filename) if (subfolder != ''): path = os.path.join(current.request.folder, 'modules', 'templates', subfolder) filepath = os.path.join(path, filename) if (len(details) >= 4): extraArgs = details[3:] else: extraArgs = None self.tasks.append((2, function, filepath, extraArgs))
Store a single import job into the tasks property *,function,filename,*extraArgs
modules/s3/s3import.py
extract_other_import_line
annehaley/eden
205
python
def extract_other_import_line(self, path, details): '\n Store a single import job into the tasks property\n *,function,filename,*extraArgs\n ' function = details[1].strip('" ') filepath = None if (len(details) >= 3): filename = details[2].strip('" ') if (filename != ): (subfolder, filename) = os.path.split(filename) if (subfolder != ): path = os.path.join(current.request.folder, 'modules', 'templates', subfolder) filepath = os.path.join(path, filename) if (len(details) >= 4): extraArgs = details[3:] else: extraArgs = None self.tasks.append((2, function, filepath, extraArgs))
def extract_other_import_line(self, path, details): '\n Store a single import job into the tasks property\n *,function,filename,*extraArgs\n ' function = details[1].strip('" ') filepath = None if (len(details) >= 3): filename = details[2].strip('" ') if (filename != ): (subfolder, filename) = os.path.split(filename) if (subfolder != ): path = os.path.join(current.request.folder, 'modules', 'templates', subfolder) filepath = os.path.join(path, filename) if (len(details) >= 4): extraArgs = details[3:] else: extraArgs = None self.tasks.append((2, function, filepath, extraArgs))<|docstring|>Store a single import job into the tasks property *,function,filename,*extraArgs<|endoftext|>
f311e09996473d0b38cf54d5494f2ed468931d370d43a0c265985552fa8a1cc1
def execute_import_task(self, task): '\n Execute each import job, in order\n ' current.auth.ignore_min_password_length() start = datetime.datetime.now() if (task[0] == 1): s3db = current.s3db response = current.response error_string = 'prepopulate error: file %s missing' view = response.view prefix = task[1] name = task[2] tablename = ('%s_%s' % (prefix, name)) if (tablename in self.alternateTables): details = self.alternateTables[tablename] if ('tablename' in details): tablename = details['tablename'] s3db.table(tablename) if ('loader' in details): loader = details['loader'] if (loader is not None): loader() if ('prefix' in details): prefix = details['prefix'] if ('name' in details): name = details['name'] try: resource = s3db.resource(tablename) except AttributeError: self.errorList.append(('WARNING: Unable to find table %s import job skipped' % tablename)) return filename = task[3] if (filename[:7] == 'http://'): req = urllib2.Request(url=filename) try: f = urlopen(req) except HTTPError as e: self.errorList.append(('Could not access %s: %s' % (filename, e.read()))) return except: self.errorList.append((error_string % filename)) return else: csv = f else: try: csv = open(filename, 'rb') except IOError: self.errorList.append((error_string % filename)) return try: S = open(task[4], 'r') except IOError: self.errorList.append((error_string % task[4])) return else: S.close() if (tablename not in self.customised): customise = current.deployment_settings.customise_resource(tablename) if customise: request = S3Request(prefix, name, current.request) customise(request, tablename) self.customised.append(tablename) extra_data = None if task[5]: try: extradata = self.unescape(task[5], {"'": '"'}) extradata = json.loads(extradata) extra_data = extradata except: self.errorList.append(('WARNING:5th parameter invalid, parameter %s ignored' % task[5])) auth = current.auth auth.rollback = True try: resource.import_xml(csv, format='csv', stylesheet=task[4], extra_data=extra_data) except SyntaxError as e: self.errorList.append(('WARNING: import error - %s (file: %s, stylesheet: %s)' % (e, filename, task[4]))) auth.rollback = False return if (not resource.error): current.db.commit() else: error = resource.error self.errorList.append(('%s - %s: %s' % (task[3], resource.tablename, error))) errors = current.xml.collect_errors(resource) if errors: self.errorList.extend(errors) current.db.rollback() auth.rollback = False response.view = view end = datetime.datetime.now() duration = (end - start) csvName = task[3][(task[3].rfind('/') + 1):] duration = '{:.2f}'.format(duration.total_seconds()) msg = ('%s imported (%s sec)' % (csvName, duration)) self.resultList.append(msg) current.log.debug(msg)
Execute each import job, in order
modules/s3/s3import.py
execute_import_task
annehaley/eden
205
python
def execute_import_task(self, task): '\n \n ' current.auth.ignore_min_password_length() start = datetime.datetime.now() if (task[0] == 1): s3db = current.s3db response = current.response error_string = 'prepopulate error: file %s missing' view = response.view prefix = task[1] name = task[2] tablename = ('%s_%s' % (prefix, name)) if (tablename in self.alternateTables): details = self.alternateTables[tablename] if ('tablename' in details): tablename = details['tablename'] s3db.table(tablename) if ('loader' in details): loader = details['loader'] if (loader is not None): loader() if ('prefix' in details): prefix = details['prefix'] if ('name' in details): name = details['name'] try: resource = s3db.resource(tablename) except AttributeError: self.errorList.append(('WARNING: Unable to find table %s import job skipped' % tablename)) return filename = task[3] if (filename[:7] == 'http://'): req = urllib2.Request(url=filename) try: f = urlopen(req) except HTTPError as e: self.errorList.append(('Could not access %s: %s' % (filename, e.read()))) return except: self.errorList.append((error_string % filename)) return else: csv = f else: try: csv = open(filename, 'rb') except IOError: self.errorList.append((error_string % filename)) return try: S = open(task[4], 'r') except IOError: self.errorList.append((error_string % task[4])) return else: S.close() if (tablename not in self.customised): customise = current.deployment_settings.customise_resource(tablename) if customise: request = S3Request(prefix, name, current.request) customise(request, tablename) self.customised.append(tablename) extra_data = None if task[5]: try: extradata = self.unescape(task[5], {"'": '"'}) extradata = json.loads(extradata) extra_data = extradata except: self.errorList.append(('WARNING:5th parameter invalid, parameter %s ignored' % task[5])) auth = current.auth auth.rollback = True try: resource.import_xml(csv, format='csv', stylesheet=task[4], extra_data=extra_data) except SyntaxError as e: self.errorList.append(('WARNING: import error - %s (file: %s, stylesheet: %s)' % (e, filename, task[4]))) auth.rollback = False return if (not resource.error): current.db.commit() else: error = resource.error self.errorList.append(('%s - %s: %s' % (task[3], resource.tablename, error))) errors = current.xml.collect_errors(resource) if errors: self.errorList.extend(errors) current.db.rollback() auth.rollback = False response.view = view end = datetime.datetime.now() duration = (end - start) csvName = task[3][(task[3].rfind('/') + 1):] duration = '{:.2f}'.format(duration.total_seconds()) msg = ('%s imported (%s sec)' % (csvName, duration)) self.resultList.append(msg) current.log.debug(msg)
def execute_import_task(self, task): '\n \n ' current.auth.ignore_min_password_length() start = datetime.datetime.now() if (task[0] == 1): s3db = current.s3db response = current.response error_string = 'prepopulate error: file %s missing' view = response.view prefix = task[1] name = task[2] tablename = ('%s_%s' % (prefix, name)) if (tablename in self.alternateTables): details = self.alternateTables[tablename] if ('tablename' in details): tablename = details['tablename'] s3db.table(tablename) if ('loader' in details): loader = details['loader'] if (loader is not None): loader() if ('prefix' in details): prefix = details['prefix'] if ('name' in details): name = details['name'] try: resource = s3db.resource(tablename) except AttributeError: self.errorList.append(('WARNING: Unable to find table %s import job skipped' % tablename)) return filename = task[3] if (filename[:7] == 'http://'): req = urllib2.Request(url=filename) try: f = urlopen(req) except HTTPError as e: self.errorList.append(('Could not access %s: %s' % (filename, e.read()))) return except: self.errorList.append((error_string % filename)) return else: csv = f else: try: csv = open(filename, 'rb') except IOError: self.errorList.append((error_string % filename)) return try: S = open(task[4], 'r') except IOError: self.errorList.append((error_string % task[4])) return else: S.close() if (tablename not in self.customised): customise = current.deployment_settings.customise_resource(tablename) if customise: request = S3Request(prefix, name, current.request) customise(request, tablename) self.customised.append(tablename) extra_data = None if task[5]: try: extradata = self.unescape(task[5], {"'": '"'}) extradata = json.loads(extradata) extra_data = extradata except: self.errorList.append(('WARNING:5th parameter invalid, parameter %s ignored' % task[5])) auth = current.auth auth.rollback = True try: resource.import_xml(csv, format='csv', stylesheet=task[4], extra_data=extra_data) except SyntaxError as e: self.errorList.append(('WARNING: import error - %s (file: %s, stylesheet: %s)' % (e, filename, task[4]))) auth.rollback = False return if (not resource.error): current.db.commit() else: error = resource.error self.errorList.append(('%s - %s: %s' % (task[3], resource.tablename, error))) errors = current.xml.collect_errors(resource) if errors: self.errorList.extend(errors) current.db.rollback() auth.rollback = False response.view = view end = datetime.datetime.now() duration = (end - start) csvName = task[3][(task[3].rfind('/') + 1):] duration = '{:.2f}'.format(duration.total_seconds()) msg = ('%s imported (%s sec)' % (csvName, duration)) self.resultList.append(msg) current.log.debug(msg)<|docstring|>Execute each import job, in order<|endoftext|>
74927155da24de2cba86c51875cabe63f4412bb3a5309b701f284c31ea5c2146
def execute_special_task(self, task): '\n Execute import tasks which require a custom function,\n such as import_role\n ' start = datetime.datetime.now() s3 = current.response.s3 if (task[0] == 2): fun = task[1] filepath = task[2] extraArgs = task[3] if (filepath is None): if (extraArgs is None): error = s3[fun]() else: error = s3[fun](*extraArgs) elif (extraArgs is None): error = s3[fun](filepath) else: error = s3[fun](filepath, *extraArgs) if error: self.errorList.append(error) end = datetime.datetime.now() duration = (end - start) duration = '{:.2f}'.format(duration.total_seconds()) msg = ('%s completed (%s sec)' % (fun, duration)) self.resultList.append(msg) current.log.debug(msg)
Execute import tasks which require a custom function, such as import_role
modules/s3/s3import.py
execute_special_task
annehaley/eden
205
python
def execute_special_task(self, task): '\n Execute import tasks which require a custom function,\n such as import_role\n ' start = datetime.datetime.now() s3 = current.response.s3 if (task[0] == 2): fun = task[1] filepath = task[2] extraArgs = task[3] if (filepath is None): if (extraArgs is None): error = s3[fun]() else: error = s3[fun](*extraArgs) elif (extraArgs is None): error = s3[fun](filepath) else: error = s3[fun](filepath, *extraArgs) if error: self.errorList.append(error) end = datetime.datetime.now() duration = (end - start) duration = '{:.2f}'.format(duration.total_seconds()) msg = ('%s completed (%s sec)' % (fun, duration)) self.resultList.append(msg) current.log.debug(msg)
def execute_special_task(self, task): '\n Execute import tasks which require a custom function,\n such as import_role\n ' start = datetime.datetime.now() s3 = current.response.s3 if (task[0] == 2): fun = task[1] filepath = task[2] extraArgs = task[3] if (filepath is None): if (extraArgs is None): error = s3[fun]() else: error = s3[fun](*extraArgs) elif (extraArgs is None): error = s3[fun](filepath) else: error = s3[fun](filepath, *extraArgs) if error: self.errorList.append(error) end = datetime.datetime.now() duration = (end - start) duration = '{:.2f}'.format(duration.total_seconds()) msg = ('%s completed (%s sec)' % (fun, duration)) self.resultList.append(msg) current.log.debug(msg)<|docstring|>Execute import tasks which require a custom function, such as import_role<|endoftext|>
0708c86588ab264801a5891b67dc3673c40a3fdf4eeb08e6bddea62b45418513
@staticmethod def _lookup_pe(entity): '\n Convert an Entity to a pe_id\n - helper for import_role\n - assumes org_organisation.name unless specified\n - entity needs to exist already\n ' if ('=' in entity): (pe_type, value) = entity.split('=') else: pe_type = 'org_organisation.name' value = entity (pe_tablename, pe_field) = pe_type.split('.') table = current.s3db.table(pe_tablename) record = current.db((table[pe_field] == value)).select(table.pe_id, limitby=(0, 1)).first() try: pe_id = record.pe_id except AttributeError: current.log.warning(('import_role cannot find pe_id for %s' % entity)) pe_id = None return pe_id
Convert an Entity to a pe_id - helper for import_role - assumes org_organisation.name unless specified - entity needs to exist already
modules/s3/s3import.py
_lookup_pe
annehaley/eden
205
python
@staticmethod def _lookup_pe(entity): '\n Convert an Entity to a pe_id\n - helper for import_role\n - assumes org_organisation.name unless specified\n - entity needs to exist already\n ' if ('=' in entity): (pe_type, value) = entity.split('=') else: pe_type = 'org_organisation.name' value = entity (pe_tablename, pe_field) = pe_type.split('.') table = current.s3db.table(pe_tablename) record = current.db((table[pe_field] == value)).select(table.pe_id, limitby=(0, 1)).first() try: pe_id = record.pe_id except AttributeError: current.log.warning(('import_role cannot find pe_id for %s' % entity)) pe_id = None return pe_id
@staticmethod def _lookup_pe(entity): '\n Convert an Entity to a pe_id\n - helper for import_role\n - assumes org_organisation.name unless specified\n - entity needs to exist already\n ' if ('=' in entity): (pe_type, value) = entity.split('=') else: pe_type = 'org_organisation.name' value = entity (pe_tablename, pe_field) = pe_type.split('.') table = current.s3db.table(pe_tablename) record = current.db((table[pe_field] == value)).select(table.pe_id, limitby=(0, 1)).first() try: pe_id = record.pe_id except AttributeError: current.log.warning(('import_role cannot find pe_id for %s' % entity)) pe_id = None return pe_id<|docstring|>Convert an Entity to a pe_id - helper for import_role - assumes org_organisation.name unless specified - entity needs to exist already<|endoftext|>
b690f7c98983520c0f53d5cb9440211f7536b9ce3355a52c93072e8f4e49167c
def import_role(self, filename): '\n Import Roles from CSV\n ' try: open_file = open(filename, 'r', encoding='utf-8') except IOError: return ('Unable to open file %s' % filename) auth = current.auth acl = auth.permission create_role = auth.s3_create_role def parseACL(_acl): permissions = _acl.split('|') acl_value = 0 for permission in permissions: if (permission == 'READ'): acl_value |= acl.READ if (permission == 'CREATE'): acl_value |= acl.CREATE if (permission == 'UPDATE'): acl_value |= acl.UPDATE if (permission == 'DELETE'): acl_value |= acl.DELETE if (permission == 'REVIEW'): acl_value |= acl.REVIEW if (permission == 'APPROVE'): acl_value |= acl.APPROVE if (permission == 'PUBLISH'): acl_value |= acl.PUBLISH if (permission == 'ALL'): acl_value |= acl.ALL return acl_value reader = self.csv.DictReader(open_file) roles = {} acls = {} args = {} for row in reader: if (row != None): row_get = row.get role = row_get('role') desc = row_get('description', '') rules = {} extra_param = {} controller = row_get('controller') if controller: rules['c'] = controller fn = row_get('function') if fn: rules['f'] = fn table = row_get('table') if table: rules['t'] = table oacl = row_get('oacl') if oacl: rules['oacl'] = parseACL(oacl) uacl = row_get('uacl') if uacl: rules['uacl'] = parseACL(uacl) entity = row_get('entity') if entity: if (entity == 'any'): pass else: try: entity = int(entity) except ValueError: entity = self._lookup_pe(entity) rules['entity'] = entity flag = (lambda s: (bool(s) and (s.lower() in ('1', 'true', 'yes')))) hidden = row_get('hidden') if hidden: extra_param['hidden'] = flag(hidden) system = row_get('system') if system: extra_param['system'] = flag(system) protected = row_get('protected') if protected: extra_param['protected'] = flag(protected) uid = row_get('uid') if uid: extra_param['uid'] = uid if (role in roles): acls[role].append(rules) else: roles[role] = [role, desc] acls[role] = [rules] if ((len(extra_param) > 0) and (role not in args)): args[role] = extra_param for rulelist in roles.values(): if (rulelist[0] in args): create_role(rulelist[0], rulelist[1], *acls[rulelist[0]], **args[rulelist[0]]) else: create_role(rulelist[0], rulelist[1], *acls[rulelist[0]])
Import Roles from CSV
modules/s3/s3import.py
import_role
annehaley/eden
205
python
def import_role(self, filename): '\n \n ' try: open_file = open(filename, 'r', encoding='utf-8') except IOError: return ('Unable to open file %s' % filename) auth = current.auth acl = auth.permission create_role = auth.s3_create_role def parseACL(_acl): permissions = _acl.split('|') acl_value = 0 for permission in permissions: if (permission == 'READ'): acl_value |= acl.READ if (permission == 'CREATE'): acl_value |= acl.CREATE if (permission == 'UPDATE'): acl_value |= acl.UPDATE if (permission == 'DELETE'): acl_value |= acl.DELETE if (permission == 'REVIEW'): acl_value |= acl.REVIEW if (permission == 'APPROVE'): acl_value |= acl.APPROVE if (permission == 'PUBLISH'): acl_value |= acl.PUBLISH if (permission == 'ALL'): acl_value |= acl.ALL return acl_value reader = self.csv.DictReader(open_file) roles = {} acls = {} args = {} for row in reader: if (row != None): row_get = row.get role = row_get('role') desc = row_get('description', ) rules = {} extra_param = {} controller = row_get('controller') if controller: rules['c'] = controller fn = row_get('function') if fn: rules['f'] = fn table = row_get('table') if table: rules['t'] = table oacl = row_get('oacl') if oacl: rules['oacl'] = parseACL(oacl) uacl = row_get('uacl') if uacl: rules['uacl'] = parseACL(uacl) entity = row_get('entity') if entity: if (entity == 'any'): pass else: try: entity = int(entity) except ValueError: entity = self._lookup_pe(entity) rules['entity'] = entity flag = (lambda s: (bool(s) and (s.lower() in ('1', 'true', 'yes')))) hidden = row_get('hidden') if hidden: extra_param['hidden'] = flag(hidden) system = row_get('system') if system: extra_param['system'] = flag(system) protected = row_get('protected') if protected: extra_param['protected'] = flag(protected) uid = row_get('uid') if uid: extra_param['uid'] = uid if (role in roles): acls[role].append(rules) else: roles[role] = [role, desc] acls[role] = [rules] if ((len(extra_param) > 0) and (role not in args)): args[role] = extra_param for rulelist in roles.values(): if (rulelist[0] in args): create_role(rulelist[0], rulelist[1], *acls[rulelist[0]], **args[rulelist[0]]) else: create_role(rulelist[0], rulelist[1], *acls[rulelist[0]])
def import_role(self, filename): '\n \n ' try: open_file = open(filename, 'r', encoding='utf-8') except IOError: return ('Unable to open file %s' % filename) auth = current.auth acl = auth.permission create_role = auth.s3_create_role def parseACL(_acl): permissions = _acl.split('|') acl_value = 0 for permission in permissions: if (permission == 'READ'): acl_value |= acl.READ if (permission == 'CREATE'): acl_value |= acl.CREATE if (permission == 'UPDATE'): acl_value |= acl.UPDATE if (permission == 'DELETE'): acl_value |= acl.DELETE if (permission == 'REVIEW'): acl_value |= acl.REVIEW if (permission == 'APPROVE'): acl_value |= acl.APPROVE if (permission == 'PUBLISH'): acl_value |= acl.PUBLISH if (permission == 'ALL'): acl_value |= acl.ALL return acl_value reader = self.csv.DictReader(open_file) roles = {} acls = {} args = {} for row in reader: if (row != None): row_get = row.get role = row_get('role') desc = row_get('description', ) rules = {} extra_param = {} controller = row_get('controller') if controller: rules['c'] = controller fn = row_get('function') if fn: rules['f'] = fn table = row_get('table') if table: rules['t'] = table oacl = row_get('oacl') if oacl: rules['oacl'] = parseACL(oacl) uacl = row_get('uacl') if uacl: rules['uacl'] = parseACL(uacl) entity = row_get('entity') if entity: if (entity == 'any'): pass else: try: entity = int(entity) except ValueError: entity = self._lookup_pe(entity) rules['entity'] = entity flag = (lambda s: (bool(s) and (s.lower() in ('1', 'true', 'yes')))) hidden = row_get('hidden') if hidden: extra_param['hidden'] = flag(hidden) system = row_get('system') if system: extra_param['system'] = flag(system) protected = row_get('protected') if protected: extra_param['protected'] = flag(protected) uid = row_get('uid') if uid: extra_param['uid'] = uid if (role in roles): acls[role].append(rules) else: roles[role] = [role, desc] acls[role] = [rules] if ((len(extra_param) > 0) and (role not in args)): args[role] = extra_param for rulelist in roles.values(): if (rulelist[0] in args): create_role(rulelist[0], rulelist[1], *acls[rulelist[0]], **args[rulelist[0]]) else: create_role(rulelist[0], rulelist[1], *acls[rulelist[0]])<|docstring|>Import Roles from CSV<|endoftext|>
7b4ecbb1287b53b44321860f77580ce9d4f75133bec471b6a0391478da1fa17c
def import_user(self, filename): '\n Import Users from CSV with an import Prep\n ' current.response.s3.import_prep = current.auth.s3_import_prep current.s3db.add_components('auth_user', auth_masterkey='user_id') user_task = [1, 'auth', 'user', filename, os.path.join(current.request.folder, 'static', 'formats', 's3csv', 'auth', 'user.xsl'), None] self.execute_import_task(user_task)
Import Users from CSV with an import Prep
modules/s3/s3import.py
import_user
annehaley/eden
205
python
def import_user(self, filename): '\n \n ' current.response.s3.import_prep = current.auth.s3_import_prep current.s3db.add_components('auth_user', auth_masterkey='user_id') user_task = [1, 'auth', 'user', filename, os.path.join(current.request.folder, 'static', 'formats', 's3csv', 'auth', 'user.xsl'), None] self.execute_import_task(user_task)
def import_user(self, filename): '\n \n ' current.response.s3.import_prep = current.auth.s3_import_prep current.s3db.add_components('auth_user', auth_masterkey='user_id') user_task = [1, 'auth', 'user', filename, os.path.join(current.request.folder, 'static', 'formats', 's3csv', 'auth', 'user.xsl'), None] self.execute_import_task(user_task)<|docstring|>Import Users from CSV with an import Prep<|endoftext|>
c805b445d55eeee1b5cdfd3f4311325f44a1e723b800d717826169959098238c
def import_feed(self, filename): '\n Import RSS Feeds from CSV with an import Prep\n ' stylesheet = os.path.join(current.request.folder, 'static', 'formats', 's3csv', 'msg', 'rss_channel.xsl') from s3db.pr import pr_import_prep current.response.s3.import_prep = pr_import_prep user_task = [1, 'pr', 'contact', filename, stylesheet, None] self.execute_import_task(user_task) user_task = [1, 'msg', 'rss_channel', filename, stylesheet, None] self.execute_import_task(user_task)
Import RSS Feeds from CSV with an import Prep
modules/s3/s3import.py
import_feed
annehaley/eden
205
python
def import_feed(self, filename): '\n \n ' stylesheet = os.path.join(current.request.folder, 'static', 'formats', 's3csv', 'msg', 'rss_channel.xsl') from s3db.pr import pr_import_prep current.response.s3.import_prep = pr_import_prep user_task = [1, 'pr', 'contact', filename, stylesheet, None] self.execute_import_task(user_task) user_task = [1, 'msg', 'rss_channel', filename, stylesheet, None] self.execute_import_task(user_task)
def import_feed(self, filename): '\n \n ' stylesheet = os.path.join(current.request.folder, 'static', 'formats', 's3csv', 'msg', 'rss_channel.xsl') from s3db.pr import pr_import_prep current.response.s3.import_prep = pr_import_prep user_task = [1, 'pr', 'contact', filename, stylesheet, None] self.execute_import_task(user_task) user_task = [1, 'msg', 'rss_channel', filename, stylesheet, None] self.execute_import_task(user_task)<|docstring|>Import RSS Feeds from CSV with an import Prep<|endoftext|>
556b6ee3965758a5a86f70eee6affdb8b8308885434f0be98b002933de0333c5
def import_image(self, filename, tablename, idfield, imagefield): '\n Import images, such as a logo\n\n Args:\n filename: a CSV list of records and filenames\n tablename: the name of the table\n idfield: the field used to identify the record\n imagefield: the field to where the image will be added\n\n Example:\n bi.import_image ("org_logos.csv", "org_organisation", "name", "logo")\n and the file org_logos.csv may look as follows\n id file\n Sahana Software Foundation sahanalogo.jpg\n American Red Cross icrc.gif\n ' try: openFile = open(filename, 'r', encoding='utf-8') except IOError: return ('Unable to open file %s' % filename) (prefix, name) = tablename.split('_', 1) reader = self.csv.DictReader(openFile) db = current.db s3db = current.s3db audit = current.audit table = s3db[tablename] idfield = table[idfield] base_query = (table.deleted == False) fieldnames = [table._id.name, imagefield] for field in table: if ((field.name not in fieldnames) and (field.writable is False) and (field.update is None) and (field.compute is None)): fieldnames.append(field.name) fields = [table[f] for f in fieldnames] get_config = s3db.get_config onvalidation = (get_config(tablename, 'update_onvalidation') or get_config(tablename, 'onvalidation')) onaccept = (get_config(tablename, 'update_onaccept') or get_config(tablename, 'onaccept')) update_realm = get_config(tablename, 'update_realm') if update_realm: set_realm_entity = current.auth.set_realm_entity update_super = s3db.update_super for row in reader: if (row != None): image = row['file'] try: path = os.path.split(filename)[0] imagepath = os.path.join(path, image) openFile = open(imagepath, 'rb') except IOError: current.log.error(('Unable to open image file %s' % image)) continue image_source = BytesIO(openFile.read()) query = (base_query & (idfield == row['id'])) record = db(query).select(*fields, limitby=(0, 1)).first() try: record_id = record.id except AttributeError: current.log.error(('Unable to get record %s of the resource %s to attach the image file to' % (row['id'], tablename))) continue form = SQLFORM(table, record, fields=['id', imagefield]) form_vars = Storage() form_vars._formname = ('%s/%s' % (tablename, record_id)) form_vars.id = record_id source = Storage() source.filename = imagepath source.file = image_source form_vars[imagefield] = source if form.accepts(form_vars, onvalidation=onvalidation): audit('update', prefix, name, form=form, record=record_id, representation='csv') update_super(table, form_vars) if update_realm: set_realm_entity(table, form_vars, force_update=True) callback(onaccept, form, tablename=tablename) else: for (key, error) in form.errors.items(): current.log.error(('error importing image %s: %s %s' % (image, key, error)))
Import images, such as a logo Args: filename: a CSV list of records and filenames tablename: the name of the table idfield: the field used to identify the record imagefield: the field to where the image will be added Example: bi.import_image ("org_logos.csv", "org_organisation", "name", "logo") and the file org_logos.csv may look as follows id file Sahana Software Foundation sahanalogo.jpg American Red Cross icrc.gif
modules/s3/s3import.py
import_image
annehaley/eden
205
python
def import_image(self, filename, tablename, idfield, imagefield): '\n Import images, such as a logo\n\n Args:\n filename: a CSV list of records and filenames\n tablename: the name of the table\n idfield: the field used to identify the record\n imagefield: the field to where the image will be added\n\n Example:\n bi.import_image ("org_logos.csv", "org_organisation", "name", "logo")\n and the file org_logos.csv may look as follows\n id file\n Sahana Software Foundation sahanalogo.jpg\n American Red Cross icrc.gif\n ' try: openFile = open(filename, 'r', encoding='utf-8') except IOError: return ('Unable to open file %s' % filename) (prefix, name) = tablename.split('_', 1) reader = self.csv.DictReader(openFile) db = current.db s3db = current.s3db audit = current.audit table = s3db[tablename] idfield = table[idfield] base_query = (table.deleted == False) fieldnames = [table._id.name, imagefield] for field in table: if ((field.name not in fieldnames) and (field.writable is False) and (field.update is None) and (field.compute is None)): fieldnames.append(field.name) fields = [table[f] for f in fieldnames] get_config = s3db.get_config onvalidation = (get_config(tablename, 'update_onvalidation') or get_config(tablename, 'onvalidation')) onaccept = (get_config(tablename, 'update_onaccept') or get_config(tablename, 'onaccept')) update_realm = get_config(tablename, 'update_realm') if update_realm: set_realm_entity = current.auth.set_realm_entity update_super = s3db.update_super for row in reader: if (row != None): image = row['file'] try: path = os.path.split(filename)[0] imagepath = os.path.join(path, image) openFile = open(imagepath, 'rb') except IOError: current.log.error(('Unable to open image file %s' % image)) continue image_source = BytesIO(openFile.read()) query = (base_query & (idfield == row['id'])) record = db(query).select(*fields, limitby=(0, 1)).first() try: record_id = record.id except AttributeError: current.log.error(('Unable to get record %s of the resource %s to attach the image file to' % (row['id'], tablename))) continue form = SQLFORM(table, record, fields=['id', imagefield]) form_vars = Storage() form_vars._formname = ('%s/%s' % (tablename, record_id)) form_vars.id = record_id source = Storage() source.filename = imagepath source.file = image_source form_vars[imagefield] = source if form.accepts(form_vars, onvalidation=onvalidation): audit('update', prefix, name, form=form, record=record_id, representation='csv') update_super(table, form_vars) if update_realm: set_realm_entity(table, form_vars, force_update=True) callback(onaccept, form, tablename=tablename) else: for (key, error) in form.errors.items(): current.log.error(('error importing image %s: %s %s' % (image, key, error)))
def import_image(self, filename, tablename, idfield, imagefield): '\n Import images, such as a logo\n\n Args:\n filename: a CSV list of records and filenames\n tablename: the name of the table\n idfield: the field used to identify the record\n imagefield: the field to where the image will be added\n\n Example:\n bi.import_image ("org_logos.csv", "org_organisation", "name", "logo")\n and the file org_logos.csv may look as follows\n id file\n Sahana Software Foundation sahanalogo.jpg\n American Red Cross icrc.gif\n ' try: openFile = open(filename, 'r', encoding='utf-8') except IOError: return ('Unable to open file %s' % filename) (prefix, name) = tablename.split('_', 1) reader = self.csv.DictReader(openFile) db = current.db s3db = current.s3db audit = current.audit table = s3db[tablename] idfield = table[idfield] base_query = (table.deleted == False) fieldnames = [table._id.name, imagefield] for field in table: if ((field.name not in fieldnames) and (field.writable is False) and (field.update is None) and (field.compute is None)): fieldnames.append(field.name) fields = [table[f] for f in fieldnames] get_config = s3db.get_config onvalidation = (get_config(tablename, 'update_onvalidation') or get_config(tablename, 'onvalidation')) onaccept = (get_config(tablename, 'update_onaccept') or get_config(tablename, 'onaccept')) update_realm = get_config(tablename, 'update_realm') if update_realm: set_realm_entity = current.auth.set_realm_entity update_super = s3db.update_super for row in reader: if (row != None): image = row['file'] try: path = os.path.split(filename)[0] imagepath = os.path.join(path, image) openFile = open(imagepath, 'rb') except IOError: current.log.error(('Unable to open image file %s' % image)) continue image_source = BytesIO(openFile.read()) query = (base_query & (idfield == row['id'])) record = db(query).select(*fields, limitby=(0, 1)).first() try: record_id = record.id except AttributeError: current.log.error(('Unable to get record %s of the resource %s to attach the image file to' % (row['id'], tablename))) continue form = SQLFORM(table, record, fields=['id', imagefield]) form_vars = Storage() form_vars._formname = ('%s/%s' % (tablename, record_id)) form_vars.id = record_id source = Storage() source.filename = imagepath source.file = image_source form_vars[imagefield] = source if form.accepts(form_vars, onvalidation=onvalidation): audit('update', prefix, name, form=form, record=record_id, representation='csv') update_super(table, form_vars) if update_realm: set_realm_entity(table, form_vars, force_update=True) callback(onaccept, form, tablename=tablename) else: for (key, error) in form.errors.items(): current.log.error(('error importing image %s: %s %s' % (image, key, error)))<|docstring|>Import images, such as a logo Args: filename: a CSV list of records and filenames tablename: the name of the table idfield: the field used to identify the record imagefield: the field to where the image will be added Example: bi.import_image ("org_logos.csv", "org_organisation", "name", "logo") and the file org_logos.csv may look as follows id file Sahana Software Foundation sahanalogo.jpg American Red Cross icrc.gif<|endoftext|>
a4d4e0dd3db9943e0b211e17bc6a194834f96426ca92052d3da35233ccd1d24b
def import_pr_image(self, filename): '\n Import person images from CSV\n\n Example:\n bi.import_pr_image("pr_image.csv")\n and the file pr_image.csv may look as follows\n First Name,Middle Name,Last Name,Image,Profile,Type\n John,,Doe,jdoe.jpg,Y,\n Type should be an integer If empty then uses default (usually 1 (Photograph))\n ' try: openFile = open(filename, 'r', encoding='utf-8') except IOError: return ('Unable to open file %s' % filename) tablename = 'pr_image' reader = self.csv.DictReader(openFile) db = current.db s3db = current.s3db audit = current.audit ptable = s3db.pr_person table = s3db.pr_image table.pe_id.writable = True type_default = table.type.default get_config = s3db.get_config onvalidation = (get_config(tablename, 'create_onvalidation') or get_config(tablename, 'onvalidation')) onaccept = (get_config(tablename, 'create_onaccept') or get_config(tablename, 'onaccept')) for row in reader: if (row != None): image = row['Image'] try: path = os.path.split(filename)[0] imagepath = os.path.join(path, image) openFile = open(imagepath, 'rb') except IOError: current.log.error(('Unable to open image file %s' % image)) continue image_source = BytesIO(openFile.read()) first_name = row.get('First Name') middle_name = row.get('Middle Name') last_name = row.get('Last Name') query = (((ptable.first_name == first_name) & (ptable.middle_name == middle_name)) & (ptable.last_name == last_name)) person = db(query).select(ptable.pe_id, limitby=(0, 1)).first() try: pe_id = person.pe_id except AttributeError: from s3 import s3_fullname person = Storage(first_name=first_name, middle_name=middle_name, last_name=last_name) current.log.error(('Unable to find person %s to attach the image file to' % s3_fullname(person))) continue form = SQLFORM(table, fields=['pe_id', 'image', 'type', 'profile']) form_vars = Storage() form_vars._formname = ('%s/create' % tablename) source = Storage() source.filename = imagepath source.file = image_source form_vars.pe_id = pe_id form_vars.image = source form_vars['type'] = row.get('Type', type_default) profile = row.get('Profile') if profile: if (profile.upper() in ('Y', 'YES', 'T', 'TRUE')): form_vars.profile = True elif (profile.upper() in ('N', 'NO', 'F', 'FALSE')): form_vars.profile = False else: form_vars.profile = None else: form_vars.profile = None if form.accepts(form_vars, onvalidation=onvalidation): audit('create', 'pr', 'image', form=form, representation='csv') callback(onaccept, form, tablename=tablename) else: current.log.debug('Not Accepted') for (key, error) in form.errors.items(): current.log.error(('error importing image %s: %s %s' % (image, key, error)))
Import person images from CSV Example: bi.import_pr_image("pr_image.csv") and the file pr_image.csv may look as follows First Name,Middle Name,Last Name,Image,Profile,Type John,,Doe,jdoe.jpg,Y, Type should be an integer If empty then uses default (usually 1 (Photograph))
modules/s3/s3import.py
import_pr_image
annehaley/eden
205
python
def import_pr_image(self, filename): '\n Import person images from CSV\n\n Example:\n bi.import_pr_image("pr_image.csv")\n and the file pr_image.csv may look as follows\n First Name,Middle Name,Last Name,Image,Profile,Type\n John,,Doe,jdoe.jpg,Y,\n Type should be an integer If empty then uses default (usually 1 (Photograph))\n ' try: openFile = open(filename, 'r', encoding='utf-8') except IOError: return ('Unable to open file %s' % filename) tablename = 'pr_image' reader = self.csv.DictReader(openFile) db = current.db s3db = current.s3db audit = current.audit ptable = s3db.pr_person table = s3db.pr_image table.pe_id.writable = True type_default = table.type.default get_config = s3db.get_config onvalidation = (get_config(tablename, 'create_onvalidation') or get_config(tablename, 'onvalidation')) onaccept = (get_config(tablename, 'create_onaccept') or get_config(tablename, 'onaccept')) for row in reader: if (row != None): image = row['Image'] try: path = os.path.split(filename)[0] imagepath = os.path.join(path, image) openFile = open(imagepath, 'rb') except IOError: current.log.error(('Unable to open image file %s' % image)) continue image_source = BytesIO(openFile.read()) first_name = row.get('First Name') middle_name = row.get('Middle Name') last_name = row.get('Last Name') query = (((ptable.first_name == first_name) & (ptable.middle_name == middle_name)) & (ptable.last_name == last_name)) person = db(query).select(ptable.pe_id, limitby=(0, 1)).first() try: pe_id = person.pe_id except AttributeError: from s3 import s3_fullname person = Storage(first_name=first_name, middle_name=middle_name, last_name=last_name) current.log.error(('Unable to find person %s to attach the image file to' % s3_fullname(person))) continue form = SQLFORM(table, fields=['pe_id', 'image', 'type', 'profile']) form_vars = Storage() form_vars._formname = ('%s/create' % tablename) source = Storage() source.filename = imagepath source.file = image_source form_vars.pe_id = pe_id form_vars.image = source form_vars['type'] = row.get('Type', type_default) profile = row.get('Profile') if profile: if (profile.upper() in ('Y', 'YES', 'T', 'TRUE')): form_vars.profile = True elif (profile.upper() in ('N', 'NO', 'F', 'FALSE')): form_vars.profile = False else: form_vars.profile = None else: form_vars.profile = None if form.accepts(form_vars, onvalidation=onvalidation): audit('create', 'pr', 'image', form=form, representation='csv') callback(onaccept, form, tablename=tablename) else: current.log.debug('Not Accepted') for (key, error) in form.errors.items(): current.log.error(('error importing image %s: %s %s' % (image, key, error)))
def import_pr_image(self, filename): '\n Import person images from CSV\n\n Example:\n bi.import_pr_image("pr_image.csv")\n and the file pr_image.csv may look as follows\n First Name,Middle Name,Last Name,Image,Profile,Type\n John,,Doe,jdoe.jpg,Y,\n Type should be an integer If empty then uses default (usually 1 (Photograph))\n ' try: openFile = open(filename, 'r', encoding='utf-8') except IOError: return ('Unable to open file %s' % filename) tablename = 'pr_image' reader = self.csv.DictReader(openFile) db = current.db s3db = current.s3db audit = current.audit ptable = s3db.pr_person table = s3db.pr_image table.pe_id.writable = True type_default = table.type.default get_config = s3db.get_config onvalidation = (get_config(tablename, 'create_onvalidation') or get_config(tablename, 'onvalidation')) onaccept = (get_config(tablename, 'create_onaccept') or get_config(tablename, 'onaccept')) for row in reader: if (row != None): image = row['Image'] try: path = os.path.split(filename)[0] imagepath = os.path.join(path, image) openFile = open(imagepath, 'rb') except IOError: current.log.error(('Unable to open image file %s' % image)) continue image_source = BytesIO(openFile.read()) first_name = row.get('First Name') middle_name = row.get('Middle Name') last_name = row.get('Last Name') query = (((ptable.first_name == first_name) & (ptable.middle_name == middle_name)) & (ptable.last_name == last_name)) person = db(query).select(ptable.pe_id, limitby=(0, 1)).first() try: pe_id = person.pe_id except AttributeError: from s3 import s3_fullname person = Storage(first_name=first_name, middle_name=middle_name, last_name=last_name) current.log.error(('Unable to find person %s to attach the image file to' % s3_fullname(person))) continue form = SQLFORM(table, fields=['pe_id', 'image', 'type', 'profile']) form_vars = Storage() form_vars._formname = ('%s/create' % tablename) source = Storage() source.filename = imagepath source.file = image_source form_vars.pe_id = pe_id form_vars.image = source form_vars['type'] = row.get('Type', type_default) profile = row.get('Profile') if profile: if (profile.upper() in ('Y', 'YES', 'T', 'TRUE')): form_vars.profile = True elif (profile.upper() in ('N', 'NO', 'F', 'FALSE')): form_vars.profile = False else: form_vars.profile = None else: form_vars.profile = None if form.accepts(form_vars, onvalidation=onvalidation): audit('create', 'pr', 'image', form=form, representation='csv') callback(onaccept, form, tablename=tablename) else: current.log.debug('Not Accepted') for (key, error) in form.errors.items(): current.log.error(('error importing image %s: %s %s' % (image, key, error)))<|docstring|>Import person images from CSV Example: bi.import_pr_image("pr_image.csv") and the file pr_image.csv may look as follows First Name,Middle Name,Last Name,Image,Profile,Type John,,Doe,jdoe.jpg,Y, Type should be an integer If empty then uses default (usually 1 (Photograph))<|endoftext|>
ef1d35be5c410ddfed4fc576ee187f6ad6a89aac51a4700f9e43390e1eb13b67
@staticmethod def import_font(url): '\n Install a Font\n ' if (url == 'unifont'): url = 'http://unifoundry.com/pub/unifont/unifont-14.0.01/font-builds/unifont-14.0.01.ttf' filename = 'unifont.ttf' extension = 'ttf' else: filename = url.split('/')[(- 1)] (filename, extension) = filename.rsplit('.', 1) if (extension not in ('ttf', 'gz', 'zip')): current.log.warning(('Unsupported font extension: %s' % extension)) return filename = ('%s.ttf' % filename) font_path = os.path.join(current.request.folder, 'static', 'fonts') if os.path.exists(os.path.join(font_path, filename)): current.log.warning(('Using cached copy of %s' % filename)) return cwd = os.getcwd() os.chdir(font_path) try: _file = fetch(url) except URLError as exception: current.log.error(exception) os.chdir(cwd) return if (extension == 'gz'): import tarfile tf = tarfile.open(fileobj=BytesIO(_file)) tf.extractall() elif (extension == 'zip'): import zipfile zf = zipfile.ZipFile(BytesIO(_file)) zf.extractall() else: f = open(filename, 'wb') f.write(_file) f.close() os.chdir(cwd)
Install a Font
modules/s3/s3import.py
import_font
annehaley/eden
205
python
@staticmethod def import_font(url): '\n \n ' if (url == 'unifont'): url = 'http://unifoundry.com/pub/unifont/unifont-14.0.01/font-builds/unifont-14.0.01.ttf' filename = 'unifont.ttf' extension = 'ttf' else: filename = url.split('/')[(- 1)] (filename, extension) = filename.rsplit('.', 1) if (extension not in ('ttf', 'gz', 'zip')): current.log.warning(('Unsupported font extension: %s' % extension)) return filename = ('%s.ttf' % filename) font_path = os.path.join(current.request.folder, 'static', 'fonts') if os.path.exists(os.path.join(font_path, filename)): current.log.warning(('Using cached copy of %s' % filename)) return cwd = os.getcwd() os.chdir(font_path) try: _file = fetch(url) except URLError as exception: current.log.error(exception) os.chdir(cwd) return if (extension == 'gz'): import tarfile tf = tarfile.open(fileobj=BytesIO(_file)) tf.extractall() elif (extension == 'zip'): import zipfile zf = zipfile.ZipFile(BytesIO(_file)) zf.extractall() else: f = open(filename, 'wb') f.write(_file) f.close() os.chdir(cwd)
@staticmethod def import_font(url): '\n \n ' if (url == 'unifont'): url = 'http://unifoundry.com/pub/unifont/unifont-14.0.01/font-builds/unifont-14.0.01.ttf' filename = 'unifont.ttf' extension = 'ttf' else: filename = url.split('/')[(- 1)] (filename, extension) = filename.rsplit('.', 1) if (extension not in ('ttf', 'gz', 'zip')): current.log.warning(('Unsupported font extension: %s' % extension)) return filename = ('%s.ttf' % filename) font_path = os.path.join(current.request.folder, 'static', 'fonts') if os.path.exists(os.path.join(font_path, filename)): current.log.warning(('Using cached copy of %s' % filename)) return cwd = os.getcwd() os.chdir(font_path) try: _file = fetch(url) except URLError as exception: current.log.error(exception) os.chdir(cwd) return if (extension == 'gz'): import tarfile tf = tarfile.open(fileobj=BytesIO(_file)) tf.extractall() elif (extension == 'zip'): import zipfile zf = zipfile.ZipFile(BytesIO(_file)) zf.extractall() else: f = open(filename, 'wb') f.write(_file) f.close() os.chdir(cwd)<|docstring|>Install a Font<|endoftext|>
27017537cb59cca7c0c1e46e94bf638235b30b0e5f454373f52ebd1b45ce1d39
def import_remote_csv(self, url, prefix, resource, stylesheet): ' Import CSV files from remote servers ' extension = url.split('.')[(- 1)] if (extension not in ('csv', 'zip')): current.log.error(('error importing remote file %s: invalid extension' % url)) return cwd = os.getcwd() os_path = os.path os_path_exists = os_path.exists os_path_join = os_path.join TEMP = os_path_join(cwd, 'temp') if (not os_path_exists(TEMP)): import tempfile TEMP = tempfile.gettempdir() tempPath = os_path_join(TEMP, 'remote_csv') if (not os_path_exists(tempPath)): try: os.mkdir(tempPath) except OSError: current.log.error(('Unable to create temp folder %s!' % tempPath)) return filename = url.split('/')[(- 1)] if (extension == 'zip'): filename = filename.replace('.zip', '.csv') if os_path_exists(os_path_join(tempPath, filename)): current.log.warning(('Using cached copy of %s' % filename)) else: os.chdir(tempPath) try: _file = fetch(url) except URLError as exception: current.log.error(exception) os.chdir(cwd) return if (extension == 'zip'): import zipfile try: myfile = zipfile.ZipFile(BytesIO(_file)) except zipfile.BadZipfile as exception: current.log.error(exception) os.chdir(cwd) return files = myfile.infolist() for f in files: filename = f.filename extension = filename.split('.')[(- 1)] if (extension == 'csv'): _file = myfile.read(filename) _f = open(filename, 'w') _f.write(_file) _f.close() break myfile.close() else: f = open(filename, 'w') f.write(_file) f.close() os.chdir(cwd) task = [1, prefix, resource, os_path_join(tempPath, filename), os_path_join(current.request.folder, 'static', 'formats', 's3csv', prefix, stylesheet), None] self.execute_import_task(task)
Import CSV files from remote servers
modules/s3/s3import.py
import_remote_csv
annehaley/eden
205
python
def import_remote_csv(self, url, prefix, resource, stylesheet): ' ' extension = url.split('.')[(- 1)] if (extension not in ('csv', 'zip')): current.log.error(('error importing remote file %s: invalid extension' % url)) return cwd = os.getcwd() os_path = os.path os_path_exists = os_path.exists os_path_join = os_path.join TEMP = os_path_join(cwd, 'temp') if (not os_path_exists(TEMP)): import tempfile TEMP = tempfile.gettempdir() tempPath = os_path_join(TEMP, 'remote_csv') if (not os_path_exists(tempPath)): try: os.mkdir(tempPath) except OSError: current.log.error(('Unable to create temp folder %s!' % tempPath)) return filename = url.split('/')[(- 1)] if (extension == 'zip'): filename = filename.replace('.zip', '.csv') if os_path_exists(os_path_join(tempPath, filename)): current.log.warning(('Using cached copy of %s' % filename)) else: os.chdir(tempPath) try: _file = fetch(url) except URLError as exception: current.log.error(exception) os.chdir(cwd) return if (extension == 'zip'): import zipfile try: myfile = zipfile.ZipFile(BytesIO(_file)) except zipfile.BadZipfile as exception: current.log.error(exception) os.chdir(cwd) return files = myfile.infolist() for f in files: filename = f.filename extension = filename.split('.')[(- 1)] if (extension == 'csv'): _file = myfile.read(filename) _f = open(filename, 'w') _f.write(_file) _f.close() break myfile.close() else: f = open(filename, 'w') f.write(_file) f.close() os.chdir(cwd) task = [1, prefix, resource, os_path_join(tempPath, filename), os_path_join(current.request.folder, 'static', 'formats', 's3csv', prefix, stylesheet), None] self.execute_import_task(task)
def import_remote_csv(self, url, prefix, resource, stylesheet): ' ' extension = url.split('.')[(- 1)] if (extension not in ('csv', 'zip')): current.log.error(('error importing remote file %s: invalid extension' % url)) return cwd = os.getcwd() os_path = os.path os_path_exists = os_path.exists os_path_join = os_path.join TEMP = os_path_join(cwd, 'temp') if (not os_path_exists(TEMP)): import tempfile TEMP = tempfile.gettempdir() tempPath = os_path_join(TEMP, 'remote_csv') if (not os_path_exists(tempPath)): try: os.mkdir(tempPath) except OSError: current.log.error(('Unable to create temp folder %s!' % tempPath)) return filename = url.split('/')[(- 1)] if (extension == 'zip'): filename = filename.replace('.zip', '.csv') if os_path_exists(os_path_join(tempPath, filename)): current.log.warning(('Using cached copy of %s' % filename)) else: os.chdir(tempPath) try: _file = fetch(url) except URLError as exception: current.log.error(exception) os.chdir(cwd) return if (extension == 'zip'): import zipfile try: myfile = zipfile.ZipFile(BytesIO(_file)) except zipfile.BadZipfile as exception: current.log.error(exception) os.chdir(cwd) return files = myfile.infolist() for f in files: filename = f.filename extension = filename.split('.')[(- 1)] if (extension == 'csv'): _file = myfile.read(filename) _f = open(filename, 'w') _f.write(_file) _f.close() break myfile.close() else: f = open(filename, 'w') f.write(_file) f.close() os.chdir(cwd) task = [1, prefix, resource, os_path_join(tempPath, filename), os_path_join(current.request.folder, 'static', 'formats', 's3csv', prefix, stylesheet), None] self.execute_import_task(task)<|docstring|>Import CSV files from remote servers<|endoftext|>
a453defa407816075310f7de9defd5899adaac86a3c2e4b6404cf0f41a7c1260
@staticmethod def import_script(filename): '\n Run a custom Import Script\n\n TODO:\n Report Errors during Script run to console better\n ' from gluon.cfs import getcfs from gluon.compileapp import build_environment from gluon.restricted import restricted environment = build_environment(current.request, current.response, current.session) environment['current'] = current environment['auth'] = current.auth environment['db'] = current.db environment['gis'] = current.gis environment['s3db'] = current.s3db environment['settings'] = current.deployment_settings code = getcfs(filename, filename, None) restricted(code, environment, layer=filename)
Run a custom Import Script TODO: Report Errors during Script run to console better
modules/s3/s3import.py
import_script
annehaley/eden
205
python
@staticmethod def import_script(filename): '\n Run a custom Import Script\n\n TODO:\n Report Errors during Script run to console better\n ' from gluon.cfs import getcfs from gluon.compileapp import build_environment from gluon.restricted import restricted environment = build_environment(current.request, current.response, current.session) environment['current'] = current environment['auth'] = current.auth environment['db'] = current.db environment['gis'] = current.gis environment['s3db'] = current.s3db environment['settings'] = current.deployment_settings code = getcfs(filename, filename, None) restricted(code, environment, layer=filename)
@staticmethod def import_script(filename): '\n Run a custom Import Script\n\n TODO:\n Report Errors during Script run to console better\n ' from gluon.cfs import getcfs from gluon.compileapp import build_environment from gluon.restricted import restricted environment = build_environment(current.request, current.response, current.session) environment['current'] = current environment['auth'] = current.auth environment['db'] = current.db environment['gis'] = current.gis environment['s3db'] = current.s3db environment['settings'] = current.deployment_settings code = getcfs(filename, filename, None) restricted(code, environment, layer=filename)<|docstring|>Run a custom Import Script TODO: Report Errors during Script run to console better<|endoftext|>
aef3bcec138f73a344b1a6101a6f1ac9b0387768c397617358479afb851798d0
def import_task(self, task_name, args_json=None, vars_json=None): '\n Import a Scheduled Task\n ' bulk = current.response.s3.bulk current.response.s3.bulk = True validator = IS_JSONS3() if args_json: (task_args, error) = validator(args_json) if error: self.errorList.append(error) return else: task_args = [] if vars_json: (all_vars, error) = validator(vars_json) if error: self.errorList.append(error) return else: all_vars = {} current.response.s3.bulk = bulk kwargs = {} task_vars = {} options = ('function_name', 'start_time', 'next_run_time', 'stop_time', 'repeats', 'period', 'timeout', 'enabled', 'group_name', 'ignore_duplicate', 'sync_output') for var in all_vars: if (var in options): kwargs[var] = all_vars[var] else: task_vars[var] = all_vars[var] current.s3task.schedule_task(task_name.split(os.path.sep)[(- 1)], args=task_args, vars=task_vars, **kwargs)
Import a Scheduled Task
modules/s3/s3import.py
import_task
annehaley/eden
205
python
def import_task(self, task_name, args_json=None, vars_json=None): '\n \n ' bulk = current.response.s3.bulk current.response.s3.bulk = True validator = IS_JSONS3() if args_json: (task_args, error) = validator(args_json) if error: self.errorList.append(error) return else: task_args = [] if vars_json: (all_vars, error) = validator(vars_json) if error: self.errorList.append(error) return else: all_vars = {} current.response.s3.bulk = bulk kwargs = {} task_vars = {} options = ('function_name', 'start_time', 'next_run_time', 'stop_time', 'repeats', 'period', 'timeout', 'enabled', 'group_name', 'ignore_duplicate', 'sync_output') for var in all_vars: if (var in options): kwargs[var] = all_vars[var] else: task_vars[var] = all_vars[var] current.s3task.schedule_task(task_name.split(os.path.sep)[(- 1)], args=task_args, vars=task_vars, **kwargs)
def import_task(self, task_name, args_json=None, vars_json=None): '\n \n ' bulk = current.response.s3.bulk current.response.s3.bulk = True validator = IS_JSONS3() if args_json: (task_args, error) = validator(args_json) if error: self.errorList.append(error) return else: task_args = [] if vars_json: (all_vars, error) = validator(vars_json) if error: self.errorList.append(error) return else: all_vars = {} current.response.s3.bulk = bulk kwargs = {} task_vars = {} options = ('function_name', 'start_time', 'next_run_time', 'stop_time', 'repeats', 'period', 'timeout', 'enabled', 'group_name', 'ignore_duplicate', 'sync_output') for var in all_vars: if (var in options): kwargs[var] = all_vars[var] else: task_vars[var] = all_vars[var] current.s3task.schedule_task(task_name.split(os.path.sep)[(- 1)], args=task_args, vars=task_vars, **kwargs)<|docstring|>Import a Scheduled Task<|endoftext|>
b79002a2298b291e470989c4bb46ae47a6991d759a7ef1f9b7f04ca3804dabe4
def import_xml(self, filepath, prefix, resourcename, dataformat, source_type=None): '\n Import XML data using an XSLT: static/formats/<dataformat>/import.xsl\n Setting the source_type is possible\n ' prefix = prefix.strip('" ') resourcename = resourcename.strip('" ') try: source = open(filepath, 'rb') except IOError: error_string = 'prepopulate error: file %s missing' self.errorList.append((error_string % filepath)) return stylesheet = os.path.join(current.request.folder, 'static', 'formats', dataformat, 'import.xsl') try: xslt_file = open(stylesheet, 'r') except IOError: error_string = 'prepopulate error: file %s missing' self.errorList.append((error_string % stylesheet)) return else: xslt_file.close() tablename = ('%s_%s' % (prefix, resourcename)) resource = current.s3db.resource(tablename) if (tablename not in self.customised): customise = current.deployment_settings.customise_resource(tablename) if customise: request = S3Request(prefix, resourcename, current.request) customise(request, tablename) self.customised.append(tablename) auth = current.auth auth.rollback = True try: resource.import_xml(source, stylesheet=stylesheet, source_type=source_type) except SyntaxError as e: self.errorList.append(('WARNING: import error - %s (file: %s, stylesheet: %s/import.xsl)' % (e, filepath, dataformat))) auth.rollback = False return if (not resource.error): current.db.commit() else: error = resource.error self.errorList.append(('%s - %s: %s' % (filepath, tablename, error))) errors = current.xml.collect_errors(resource) if errors: self.errorList.extend(errors) current.db.rollback() auth.rollback = False
Import XML data using an XSLT: static/formats/<dataformat>/import.xsl Setting the source_type is possible
modules/s3/s3import.py
import_xml
annehaley/eden
205
python
def import_xml(self, filepath, prefix, resourcename, dataformat, source_type=None): '\n Import XML data using an XSLT: static/formats/<dataformat>/import.xsl\n Setting the source_type is possible\n ' prefix = prefix.strip('" ') resourcename = resourcename.strip('" ') try: source = open(filepath, 'rb') except IOError: error_string = 'prepopulate error: file %s missing' self.errorList.append((error_string % filepath)) return stylesheet = os.path.join(current.request.folder, 'static', 'formats', dataformat, 'import.xsl') try: xslt_file = open(stylesheet, 'r') except IOError: error_string = 'prepopulate error: file %s missing' self.errorList.append((error_string % stylesheet)) return else: xslt_file.close() tablename = ('%s_%s' % (prefix, resourcename)) resource = current.s3db.resource(tablename) if (tablename not in self.customised): customise = current.deployment_settings.customise_resource(tablename) if customise: request = S3Request(prefix, resourcename, current.request) customise(request, tablename) self.customised.append(tablename) auth = current.auth auth.rollback = True try: resource.import_xml(source, stylesheet=stylesheet, source_type=source_type) except SyntaxError as e: self.errorList.append(('WARNING: import error - %s (file: %s, stylesheet: %s/import.xsl)' % (e, filepath, dataformat))) auth.rollback = False return if (not resource.error): current.db.commit() else: error = resource.error self.errorList.append(('%s - %s: %s' % (filepath, tablename, error))) errors = current.xml.collect_errors(resource) if errors: self.errorList.extend(errors) current.db.rollback() auth.rollback = False
def import_xml(self, filepath, prefix, resourcename, dataformat, source_type=None): '\n Import XML data using an XSLT: static/formats/<dataformat>/import.xsl\n Setting the source_type is possible\n ' prefix = prefix.strip('" ') resourcename = resourcename.strip('" ') try: source = open(filepath, 'rb') except IOError: error_string = 'prepopulate error: file %s missing' self.errorList.append((error_string % filepath)) return stylesheet = os.path.join(current.request.folder, 'static', 'formats', dataformat, 'import.xsl') try: xslt_file = open(stylesheet, 'r') except IOError: error_string = 'prepopulate error: file %s missing' self.errorList.append((error_string % stylesheet)) return else: xslt_file.close() tablename = ('%s_%s' % (prefix, resourcename)) resource = current.s3db.resource(tablename) if (tablename not in self.customised): customise = current.deployment_settings.customise_resource(tablename) if customise: request = S3Request(prefix, resourcename, current.request) customise(request, tablename) self.customised.append(tablename) auth = current.auth auth.rollback = True try: resource.import_xml(source, stylesheet=stylesheet, source_type=source_type) except SyntaxError as e: self.errorList.append(('WARNING: import error - %s (file: %s, stylesheet: %s/import.xsl)' % (e, filepath, dataformat))) auth.rollback = False return if (not resource.error): current.db.commit() else: error = resource.error self.errorList.append(('%s - %s: %s' % (filepath, tablename, error))) errors = current.xml.collect_errors(resource) if errors: self.errorList.extend(errors) current.db.rollback() auth.rollback = False<|docstring|>Import XML data using an XSLT: static/formats/<dataformat>/import.xsl Setting the source_type is possible<|endoftext|>
19730f550d959a188d220c97adfecfdc93d88ceb0c82958367a7fd94cdff6daf
def perform_tasks(self, path): '\n Load and then execute the import jobs that are listed in the\n descriptor file (tasks.cfg)\n ' self.load_descriptor(path) for task in self.tasks: if (task[0] == 1): self.execute_import_task(task) elif (task[0] == 2): self.execute_special_task(task)
Load and then execute the import jobs that are listed in the descriptor file (tasks.cfg)
modules/s3/s3import.py
perform_tasks
annehaley/eden
205
python
def perform_tasks(self, path): '\n Load and then execute the import jobs that are listed in the\n descriptor file (tasks.cfg)\n ' self.load_descriptor(path) for task in self.tasks: if (task[0] == 1): self.execute_import_task(task) elif (task[0] == 2): self.execute_special_task(task)
def perform_tasks(self, path): '\n Load and then execute the import jobs that are listed in the\n descriptor file (tasks.cfg)\n ' self.load_descriptor(path) for task in self.tasks: if (task[0] == 1): self.execute_import_task(task) elif (task[0] == 2): self.execute_special_task(task)<|docstring|>Load and then execute the import jobs that are listed in the descriptor file (tasks.cfg)<|endoftext|>
061276582014b8615d2c9714ae63c7495d359e8d74b570697794bee775743912
def schedule(reference): ' Schedule a referenced item for implicit import ' entry = reference.entry if (entry and (entry.element is not None) and (not entry.item_id)): item_id = add_item(element=entry.element) if item_id: entry.item_id = item_id
Schedule a referenced item for implicit import
modules/s3/s3import.py
schedule
annehaley/eden
205
python
def schedule(reference): ' ' entry = reference.entry if (entry and (entry.element is not None) and (not entry.item_id)): item_id = add_item(element=entry.element) if item_id: entry.item_id = item_id
def schedule(reference): ' ' entry = reference.entry if (entry and (entry.element is not None) and (not entry.item_id)): item_id = add_item(element=entry.element) if item_id: entry.item_id = item_id<|docstring|>Schedule a referenced item for implicit import<|endoftext|>
94fa78dc9383c4688af0b90b35e352aed2c0a93aa2d05c1a6e2b99c90e802220
def convertDataForPlot(self, data): '\n Return the proper units on the x-values to be used for plotting.\n Takes the desired values from the GUI selection.\n :param data:\n :return:\n ' x = data[(:, 0)].copy() xType = [str(i.text()) for i in self.menuSpecX.actions() if i.isChecked()][0] x = converter['eV'][xType](x) return [x, (data[(:, 1)] / self.uisbDivideBy.value())]
Return the proper units on the x-values to be used for plotting. Takes the desired values from the GUI selection. :param data: :return:
hsganalysis/UIAnalysis.py
convertDataForPlot
SherwinGroup/HSG-turbo
1
python
def convertDataForPlot(self, data): '\n Return the proper units on the x-values to be used for plotting.\n Takes the desired values from the GUI selection.\n :param data:\n :return:\n ' x = data[(:, 0)].copy() xType = [str(i.text()) for i in self.menuSpecX.actions() if i.isChecked()][0] x = converter['eV'][xType](x) return [x, (data[(:, 1)] / self.uisbDivideBy.value())]
def convertDataForPlot(self, data): '\n Return the proper units on the x-values to be used for plotting.\n Takes the desired values from the GUI selection.\n :param data:\n :return:\n ' x = data[(:, 0)].copy() xType = [str(i.text()) for i in self.menuSpecX.actions() if i.isChecked()][0] x = converter['eV'][xType](x) return [x, (data[(:, 1)] / self.uisbDivideBy.value())]<|docstring|>Return the proper units on the x-values to be used for plotting. Takes the desired values from the GUI selection. :param data: :return:<|endoftext|>
5518b8a6eb5f36d3c3ecce615df10e2045378fb5fcfd5ea44ea7bc4327e69155
def updateTitle(self, bool=True, path=None): '\n Update the window title, either called from signals\n from the menu (where bool=False is due to the signals\n being sent with a value)\n Can be called direclty\n :param bool:\n :param path: list of heirarchy to set title value to\n :return:\n ' if (path is None): child = self.sender() if (child.parentWidget().title() == 'Set Window Title'): if ((child.text() == 'Filename') and (self.dataObj is not None)): self.setWindowTitle(os.path.basename(self.dataObj.fname)) elif (child.text() == 'Other...'): (text, ok) = QtWidgets.QInputDialog.getText(self, 'Enter Window Name', 'Title:', QtWidgets.QLineEdit.Normal, self.windowTitle()) if ok: try: self.setWindowTitle(text.format(**self.dataObj.parameters)) except: self.setWindowTitle(text) self.path = None return path = [str(child.parentWidget().title()), str(child.text())] self.titlePath = path params = self.specParams try: val = params.child(*path).value() except Exception as e: print('ERROR updating window title') print('\tbool=', bool) print('path=', path) name = path[1] pref = (name[:(name.find('(') - 1)] if ('(' in name) else name) self.setWindowTitle('{}: {}'.format(pref, val))
Update the window title, either called from signals from the menu (where bool=False is due to the signals being sent with a value) Can be called direclty :param bool: :param path: list of heirarchy to set title value to :return:
hsganalysis/UIAnalysis.py
updateTitle
SherwinGroup/HSG-turbo
1
python
def updateTitle(self, bool=True, path=None): '\n Update the window title, either called from signals\n from the menu (where bool=False is due to the signals\n being sent with a value)\n Can be called direclty\n :param bool:\n :param path: list of heirarchy to set title value to\n :return:\n ' if (path is None): child = self.sender() if (child.parentWidget().title() == 'Set Window Title'): if ((child.text() == 'Filename') and (self.dataObj is not None)): self.setWindowTitle(os.path.basename(self.dataObj.fname)) elif (child.text() == 'Other...'): (text, ok) = QtWidgets.QInputDialog.getText(self, 'Enter Window Name', 'Title:', QtWidgets.QLineEdit.Normal, self.windowTitle()) if ok: try: self.setWindowTitle(text.format(**self.dataObj.parameters)) except: self.setWindowTitle(text) self.path = None return path = [str(child.parentWidget().title()), str(child.text())] self.titlePath = path params = self.specParams try: val = params.child(*path).value() except Exception as e: print('ERROR updating window title') print('\tbool=', bool) print('path=', path) name = path[1] pref = (name[:(name.find('(') - 1)] if ('(' in name) else name) self.setWindowTitle('{}: {}'.format(pref, val))
def updateTitle(self, bool=True, path=None): '\n Update the window title, either called from signals\n from the menu (where bool=False is due to the signals\n being sent with a value)\n Can be called direclty\n :param bool:\n :param path: list of heirarchy to set title value to\n :return:\n ' if (path is None): child = self.sender() if (child.parentWidget().title() == 'Set Window Title'): if ((child.text() == 'Filename') and (self.dataObj is not None)): self.setWindowTitle(os.path.basename(self.dataObj.fname)) elif (child.text() == 'Other...'): (text, ok) = QtWidgets.QInputDialog.getText(self, 'Enter Window Name', 'Title:', QtWidgets.QLineEdit.Normal, self.windowTitle()) if ok: try: self.setWindowTitle(text.format(**self.dataObj.parameters)) except: self.setWindowTitle(text) self.path = None return path = [str(child.parentWidget().title()), str(child.text())] self.titlePath = path params = self.specParams try: val = params.child(*path).value() except Exception as e: print('ERROR updating window title') print('\tbool=', bool) print('path=', path) name = path[1] pref = (name[:(name.find('(') - 1)] if ('(' in name) else name) self.setWindowTitle('{}: {}'.format(pref, val))<|docstring|>Update the window title, either called from signals from the menu (where bool=False is due to the signals being sent with a value) Can be called direclty :param bool: :param path: list of heirarchy to set title value to :return:<|endoftext|>
45403e681cac4222477c0112877ed45f38aede1b96622216af8379b3e281cb29
def dragEnterEvent(self, event): '\n\n :param event:\n :type event: QtWidgets.QDragEnterEvent\n :return:\n ' event.accept()
:param event: :type event: QtWidgets.QDragEnterEvent :return:
hsganalysis/UIAnalysis.py
dragEnterEvent
SherwinGroup/HSG-turbo
1
python
def dragEnterEvent(self, event): '\n\n :param event:\n :type event: QtWidgets.QDragEnterEvent\n :return:\n ' event.accept()
def dragEnterEvent(self, event): '\n\n :param event:\n :type event: QtWidgets.QDragEnterEvent\n :return:\n ' event.accept()<|docstring|>:param event: :type event: QtWidgets.QDragEnterEvent :return:<|endoftext|>
a50295c2a93cbff408eb92f922379a2bfa22a776b337f03cda8fbf77123f3637
def drop(self, event): '\n :param event:\n :type event: QtWidgets.QDropEvent\n :return:\n ' global fileList if (not event.mimeData().hasUrls()): event.reject() return event.setDropAction(QtCore.Qt.CopyAction) if (event.keyboardModifiers() & QtCore.Qt.ShiftModifier): print('held shift') if (len(event.mimeData().urls()) == 1): filename = str(event.mimeData().urls()[0].toLocalFile()) if os.path.isdir(filename): self.handleFolderDrop(filename) else: cls = BaseWindow.getWindowClass(filename) try: a = cls(filename, parentWin=self) except Exception as e: raise else: fileList.append(a) else: filelist = [str(i.toLocalFile()) for i in event.mimeData().urls()] filelist = [i for i in filelist if ('seriesed' not in i.lower())] objlist = [BaseWindow.getDataClass(i)(i) for i in filelist] if np.all([('hsg' in ii.fname) for ii in objlist]): series = hsg.proc_n_plotCCD(filelist) else: series = objlist for obj in series: if (obj.parameters['series'] == ''): cls = BaseWindow.getWindowClass(obj.fname) fileList.append(cls(obj, parentWin=self)) if (self.titlePath is None): fileList[(- 1)].setWindowTitle(str(obj.parameters['center_lambda'])) else: cls = BaseWindow.getWindowClass(obj.fname) a = cls(obj, parentWin=self) fileList.append(a) if (self.titlePath is None): a.setWindowTitle('Series: {}'.format(obj.parameters['series'])) if (self.dataObj is None): self.close()
:param event: :type event: QtWidgets.QDropEvent :return:
hsganalysis/UIAnalysis.py
drop
SherwinGroup/HSG-turbo
1
python
def drop(self, event): '\n :param event:\n :type event: QtWidgets.QDropEvent\n :return:\n ' global fileList if (not event.mimeData().hasUrls()): event.reject() return event.setDropAction(QtCore.Qt.CopyAction) if (event.keyboardModifiers() & QtCore.Qt.ShiftModifier): print('held shift') if (len(event.mimeData().urls()) == 1): filename = str(event.mimeData().urls()[0].toLocalFile()) if os.path.isdir(filename): self.handleFolderDrop(filename) else: cls = BaseWindow.getWindowClass(filename) try: a = cls(filename, parentWin=self) except Exception as e: raise else: fileList.append(a) else: filelist = [str(i.toLocalFile()) for i in event.mimeData().urls()] filelist = [i for i in filelist if ('seriesed' not in i.lower())] objlist = [BaseWindow.getDataClass(i)(i) for i in filelist] if np.all([('hsg' in ii.fname) for ii in objlist]): series = hsg.proc_n_plotCCD(filelist) else: series = objlist for obj in series: if (obj.parameters['series'] == ): cls = BaseWindow.getWindowClass(obj.fname) fileList.append(cls(obj, parentWin=self)) if (self.titlePath is None): fileList[(- 1)].setWindowTitle(str(obj.parameters['center_lambda'])) else: cls = BaseWindow.getWindowClass(obj.fname) a = cls(obj, parentWin=self) fileList.append(a) if (self.titlePath is None): a.setWindowTitle('Series: {}'.format(obj.parameters['series'])) if (self.dataObj is None): self.close()
def drop(self, event): '\n :param event:\n :type event: QtWidgets.QDropEvent\n :return:\n ' global fileList if (not event.mimeData().hasUrls()): event.reject() return event.setDropAction(QtCore.Qt.CopyAction) if (event.keyboardModifiers() & QtCore.Qt.ShiftModifier): print('held shift') if (len(event.mimeData().urls()) == 1): filename = str(event.mimeData().urls()[0].toLocalFile()) if os.path.isdir(filename): self.handleFolderDrop(filename) else: cls = BaseWindow.getWindowClass(filename) try: a = cls(filename, parentWin=self) except Exception as e: raise else: fileList.append(a) else: filelist = [str(i.toLocalFile()) for i in event.mimeData().urls()] filelist = [i for i in filelist if ('seriesed' not in i.lower())] objlist = [BaseWindow.getDataClass(i)(i) for i in filelist] if np.all([('hsg' in ii.fname) for ii in objlist]): series = hsg.proc_n_plotCCD(filelist) else: series = objlist for obj in series: if (obj.parameters['series'] == ): cls = BaseWindow.getWindowClass(obj.fname) fileList.append(cls(obj, parentWin=self)) if (self.titlePath is None): fileList[(- 1)].setWindowTitle(str(obj.parameters['center_lambda'])) else: cls = BaseWindow.getWindowClass(obj.fname) a = cls(obj, parentWin=self) fileList.append(a) if (self.titlePath is None): a.setWindowTitle('Series: {}'.format(obj.parameters['series'])) if (self.dataObj is None): self.close()<|docstring|>:param event: :type event: QtWidgets.QDropEvent :return:<|endoftext|>
63d5eada3c55916ca70ce60ea3268abed5782ef1dccd711e4f041a1918be3a9c
def handleFitDragEvent(self, obj, val): '\n called when the plot of fit results is dragged/dropped\n :param obj: The thing dragged\n :param val: the pyqtgraph coordinate of the drop point\n :return:\n ' if (self.dataObj is None): return d = self.fitsPlot.getData() self.createCompWindow(data=d, p=val)
called when the plot of fit results is dragged/dropped :param obj: The thing dragged :param val: the pyqtgraph coordinate of the drop point :return:
hsganalysis/UIAnalysis.py
handleFitDragEvent
SherwinGroup/HSG-turbo
1
python
def handleFitDragEvent(self, obj, val): '\n called when the plot of fit results is dragged/dropped\n :param obj: The thing dragged\n :param val: the pyqtgraph coordinate of the drop point\n :return:\n ' if (self.dataObj is None): return d = self.fitsPlot.getData() self.createCompWindow(data=d, p=val)
def handleFitDragEvent(self, obj, val): '\n called when the plot of fit results is dragged/dropped\n :param obj: The thing dragged\n :param val: the pyqtgraph coordinate of the drop point\n :return:\n ' if (self.dataObj is None): return d = self.fitsPlot.getData() self.createCompWindow(data=d, p=val)<|docstring|>called when the plot of fit results is dragged/dropped :param obj: The thing dragged :param val: the pyqtgraph coordinate of the drop point :return:<|endoftext|>
2e954c353fcede23e5adf68baaf851c581dd952d14a80384ba0d14ba5ebe3d5c
def handleSpecDragEvent(self, obj, val): '\n See comments on handleFitDragEvent\n :param obj:\n :param val:\n :return:\n ' if (self.dataObj is None): return self.createCompWindow(data=self.convertDataForPlot(self.dataObj.proc_data), p=val)
See comments on handleFitDragEvent :param obj: :param val: :return:
hsganalysis/UIAnalysis.py
handleSpecDragEvent
SherwinGroup/HSG-turbo
1
python
def handleSpecDragEvent(self, obj, val): '\n See comments on handleFitDragEvent\n :param obj:\n :param val:\n :return:\n ' if (self.dataObj is None): return self.createCompWindow(data=self.convertDataForPlot(self.dataObj.proc_data), p=val)
def handleSpecDragEvent(self, obj, val): '\n See comments on handleFitDragEvent\n :param obj:\n :param val:\n :return:\n ' if (self.dataObj is None): return self.createCompWindow(data=self.convertDataForPlot(self.dataObj.proc_data), p=val)<|docstring|>See comments on handleFitDragEvent :param obj: :param val: :return:<|endoftext|>
d866316b67db4de080c7bef37a157e8e11d6825a559b24155b09898c090829a5
def genParametersOldFormat(self, **kwargs): '\n Generate parameter tree from old version of the head file. Force/coerce header\n information to match what we currently need.\n :param kwargs:\n :return:\n ' newDict = dict(kwargs) if isinstance(kwargs.get('fieldStrength', {}), list): stats = ['kurtosis', 'mean', 'skew', 'std'] sets = ['fieldStrength', 'fieldInt', 'cdRatios', 'fpTime', 'pyroVoltage'] try: newDict['fel_pulses'] = sum(kwargs['fel_pulses']) except TypeError: newDict['fel_pulses'] = kwargs['fel_pulses'] newDict.update({set: {'mean': np.mean(kwargs.get(set, [(- 1)])), 'std': np.std(kwargs.get(set, [(- 1)]))} for set in sets}) else: newDict.update({set: {stat: np.mean([x.get(stat, '-1') for x in kwargs[set]]) for stat in stats} for set in sets}) elif isinstance(kwargs.get('pyroVoltage', {}), list): stats = ['kurtosis', 'mean', 'skew', 'std'] sets = ['pulseDuration', 'pyroVoltage'] try: newDict['fel_pulses'] = sum(kwargs['fel_pulses']) except TypeError: newDict['fel_pulses'] = kwargs['fel_pulses'] newDict.update({set: {'mean': np.mean(kwargs.get(set, [(- 1)])), 'std': np.std(kwargs.get(set, [(- 1)]))} for set in sets}) else: newDict.update({set: {stat: np.mean([x.get(stat, '-1') for x in kwargs[set]]) for stat in stats} for set in sets}) return self.genParameters(**newDict)
Generate parameter tree from old version of the head file. Force/coerce header information to match what we currently need. :param kwargs: :return:
hsganalysis/UIAnalysis.py
genParametersOldFormat
SherwinGroup/HSG-turbo
1
python
def genParametersOldFormat(self, **kwargs): '\n Generate parameter tree from old version of the head file. Force/coerce header\n information to match what we currently need.\n :param kwargs:\n :return:\n ' newDict = dict(kwargs) if isinstance(kwargs.get('fieldStrength', {}), list): stats = ['kurtosis', 'mean', 'skew', 'std'] sets = ['fieldStrength', 'fieldInt', 'cdRatios', 'fpTime', 'pyroVoltage'] try: newDict['fel_pulses'] = sum(kwargs['fel_pulses']) except TypeError: newDict['fel_pulses'] = kwargs['fel_pulses'] newDict.update({set: {'mean': np.mean(kwargs.get(set, [(- 1)])), 'std': np.std(kwargs.get(set, [(- 1)]))} for set in sets}) else: newDict.update({set: {stat: np.mean([x.get(stat, '-1') for x in kwargs[set]]) for stat in stats} for set in sets}) elif isinstance(kwargs.get('pyroVoltage', {}), list): stats = ['kurtosis', 'mean', 'skew', 'std'] sets = ['pulseDuration', 'pyroVoltage'] try: newDict['fel_pulses'] = sum(kwargs['fel_pulses']) except TypeError: newDict['fel_pulses'] = kwargs['fel_pulses'] newDict.update({set: {'mean': np.mean(kwargs.get(set, [(- 1)])), 'std': np.std(kwargs.get(set, [(- 1)]))} for set in sets}) else: newDict.update({set: {stat: np.mean([x.get(stat, '-1') for x in kwargs[set]]) for stat in stats} for set in sets}) return self.genParameters(**newDict)
def genParametersOldFormat(self, **kwargs): '\n Generate parameter tree from old version of the head file. Force/coerce header\n information to match what we currently need.\n :param kwargs:\n :return:\n ' newDict = dict(kwargs) if isinstance(kwargs.get('fieldStrength', {}), list): stats = ['kurtosis', 'mean', 'skew', 'std'] sets = ['fieldStrength', 'fieldInt', 'cdRatios', 'fpTime', 'pyroVoltage'] try: newDict['fel_pulses'] = sum(kwargs['fel_pulses']) except TypeError: newDict['fel_pulses'] = kwargs['fel_pulses'] newDict.update({set: {'mean': np.mean(kwargs.get(set, [(- 1)])), 'std': np.std(kwargs.get(set, [(- 1)]))} for set in sets}) else: newDict.update({set: {stat: np.mean([x.get(stat, '-1') for x in kwargs[set]]) for stat in stats} for set in sets}) elif isinstance(kwargs.get('pyroVoltage', {}), list): stats = ['kurtosis', 'mean', 'skew', 'std'] sets = ['pulseDuration', 'pyroVoltage'] try: newDict['fel_pulses'] = sum(kwargs['fel_pulses']) except TypeError: newDict['fel_pulses'] = kwargs['fel_pulses'] newDict.update({set: {'mean': np.mean(kwargs.get(set, [(- 1)])), 'std': np.std(kwargs.get(set, [(- 1)]))} for set in sets}) else: newDict.update({set: {stat: np.mean([x.get(stat, '-1') for x in kwargs[set]]) for stat in stats} for set in sets}) return self.genParameters(**newDict)<|docstring|>Generate parameter tree from old version of the head file. Force/coerce header information to match what we currently need. :param kwargs: :return:<|endoftext|>
12a1134f9d609e95526b04eadba075b8e76f260a482588fe1ff9dbfb7ff39f8f
def handleSpecDragEvent(self, obj, val): '\n See comments on handleFitDragEvent\n :param obj:\n :param val:\n :return:\n ' if (self.dataObj is None): return self.createCompWindow(data=self.dataObj.proc_data, p=val)
See comments on handleFitDragEvent :param obj: :param val: :return:
hsganalysis/UIAnalysis.py
handleSpecDragEvent
SherwinGroup/HSG-turbo
1
python
def handleSpecDragEvent(self, obj, val): '\n See comments on handleFitDragEvent\n :param obj:\n :param val:\n :return:\n ' if (self.dataObj is None): return self.createCompWindow(data=self.dataObj.proc_data, p=val)
def handleSpecDragEvent(self, obj, val): '\n See comments on handleFitDragEvent\n :param obj:\n :param val:\n :return:\n ' if (self.dataObj is None): return self.createCompWindow(data=self.dataObj.proc_data, p=val)<|docstring|>See comments on handleFitDragEvent :param obj: :param val: :return:<|endoftext|>
a8d7d18ee5561cc0eb6ff5d2d3d0b9b518b762dc4faf772cb3e4f046b287f49a
def containsPoint(self, p): '\n Calculates whether a specified QPoint (in abs coords) is\n within the bounds of myself\n :param p: the q point\n :return: True if it is within my bounds, else false\n ' return self.frameGeometry().contains(p)
Calculates whether a specified QPoint (in abs coords) is within the bounds of myself :param p: the q point :return: True if it is within my bounds, else false
hsganalysis/UIAnalysis.py
containsPoint
SherwinGroup/HSG-turbo
1
python
def containsPoint(self, p): '\n Calculates whether a specified QPoint (in abs coords) is\n within the bounds of myself\n :param p: the q point\n :return: True if it is within my bounds, else false\n ' return self.frameGeometry().contains(p)
def containsPoint(self, p): '\n Calculates whether a specified QPoint (in abs coords) is\n within the bounds of myself\n :param p: the q point\n :return: True if it is within my bounds, else false\n ' return self.frameGeometry().contains(p)<|docstring|>Calculates whether a specified QPoint (in abs coords) is within the bounds of myself :param p: the q point :return: True if it is within my bounds, else false<|endoftext|>
17771ba3a8a20558a70f83f3d88780b23943669b8507f466e4d4526a0f25d4b4
def handleMouseClick(self, obj, pos=None): "\n Handle highlighting curves when they're clicked\n :param obj: a PlotCurveitem if a line is selected, else\n the ViewBox item of the gPlot\n :param pos: If line selected, None\n Else, the position of the click\n :return:\n " if (pos is None): if (obj in self.selectedList): width = 1 self.selectedList.remove(obj) else: width = 3 self.selectedList.append(obj) pen = obj.opts['pen'] pen.setWidth(width) obj.setPen(pen) else: for obj in list(self.curveList.keys()): pen = obj.opts['pen'] pen.setWidth(1) obj.setPen(pen) self.selectedList = [] self.legend.updateSize()
Handle highlighting curves when they're clicked :param obj: a PlotCurveitem if a line is selected, else the ViewBox item of the gPlot :param pos: If line selected, None Else, the position of the click :return:
hsganalysis/UIAnalysis.py
handleMouseClick
SherwinGroup/HSG-turbo
1
python
def handleMouseClick(self, obj, pos=None): "\n Handle highlighting curves when they're clicked\n :param obj: a PlotCurveitem if a line is selected, else\n the ViewBox item of the gPlot\n :param pos: If line selected, None\n Else, the position of the click\n :return:\n " if (pos is None): if (obj in self.selectedList): width = 1 self.selectedList.remove(obj) else: width = 3 self.selectedList.append(obj) pen = obj.opts['pen'] pen.setWidth(width) obj.setPen(pen) else: for obj in list(self.curveList.keys()): pen = obj.opts['pen'] pen.setWidth(1) obj.setPen(pen) self.selectedList = [] self.legend.updateSize()
def handleMouseClick(self, obj, pos=None): "\n Handle highlighting curves when they're clicked\n :param obj: a PlotCurveitem if a line is selected, else\n the ViewBox item of the gPlot\n :param pos: If line selected, None\n Else, the position of the click\n :return:\n " if (pos is None): if (obj in self.selectedList): width = 1 self.selectedList.remove(obj) else: width = 3 self.selectedList.append(obj) pen = obj.opts['pen'] pen.setWidth(width) obj.setPen(pen) else: for obj in list(self.curveList.keys()): pen = obj.opts['pen'] pen.setWidth(1) obj.setPen(pen) self.selectedList = [] self.legend.updateSize()<|docstring|>Handle highlighting curves when they're clicked :param obj: a PlotCurveitem if a line is selected, else the ViewBox item of the gPlot :param pos: If line selected, None Else, the position of the click :return:<|endoftext|>
081cd4360d084649b49f3a9156380caa784102fb56b06644b086f4b8a1eb98b6
def handleAnglesDragEvent(self, obj, val): '\n See comments on handleFitDragEvent\n :param obj:\n :param val:\n :return:\n ' if (self.dataObj is None): return self.createCompWindow(data=self.fitDict, p=val)
See comments on handleFitDragEvent :param obj: :param val: :return:
hsganalysis/UIAnalysis.py
handleAnglesDragEvent
SherwinGroup/HSG-turbo
1
python
def handleAnglesDragEvent(self, obj, val): '\n See comments on handleFitDragEvent\n :param obj:\n :param val:\n :return:\n ' if (self.dataObj is None): return self.createCompWindow(data=self.fitDict, p=val)
def handleAnglesDragEvent(self, obj, val): '\n See comments on handleFitDragEvent\n :param obj:\n :param val:\n :return:\n ' if (self.dataObj is None): return self.createCompWindow(data=self.fitDict, p=val)<|docstring|>See comments on handleFitDragEvent :param obj: :param val: :return:<|endoftext|>
1ee8f8f081652e4ee6bfc37973ac474a3fe0744bd63c751bf097d18a4fecb57d
def check_dynamic_sql(this, args, callee): 'Check for the use of non-static strings when creating/exeucting SQL\n statements.' if ((len(args) >= 1) and (not args[0].is_clean_literal)): this.traverser.warning(err_id=('js', 'instanceactions', 'executeSimpleSQL_dynamic'), warning='SQL statements should be static strings', description='Dynamic SQL statement should be constucted via static strings, in combination with dynamic parameter binding via Sqlite.jsm wrappers (http://mzl.la/sqlite-jsm) or `createAsyncStatement` (https://developer.mozilla.org/en-US/docs/Storage#Binding_parameters)')
Check for the use of non-static strings when creating/exeucting SQL statements.
validator/testcases/javascript/performance.py
check_dynamic_sql
kmaglione/amo-validator
1
python
def check_dynamic_sql(this, args, callee): 'Check for the use of non-static strings when creating/exeucting SQL\n statements.' if ((len(args) >= 1) and (not args[0].is_clean_literal)): this.traverser.warning(err_id=('js', 'instanceactions', 'executeSimpleSQL_dynamic'), warning='SQL statements should be static strings', description='Dynamic SQL statement should be constucted via static strings, in combination with dynamic parameter binding via Sqlite.jsm wrappers (http://mzl.la/sqlite-jsm) or `createAsyncStatement` (https://developer.mozilla.org/en-US/docs/Storage#Binding_parameters)')
def check_dynamic_sql(this, args, callee): 'Check for the use of non-static strings when creating/exeucting SQL\n statements.' if ((len(args) >= 1) and (not args[0].is_clean_literal)): this.traverser.warning(err_id=('js', 'instanceactions', 'executeSimpleSQL_dynamic'), warning='SQL statements should be static strings', description='Dynamic SQL statement should be constucted via static strings, in combination with dynamic parameter binding via Sqlite.jsm wrappers (http://mzl.la/sqlite-jsm) or `createAsyncStatement` (https://developer.mozilla.org/en-US/docs/Storage#Binding_parameters)')<|docstring|>Check for the use of non-static strings when creating/exeucting SQL statements.<|endoftext|>
369bd781510350e56d1439c10e7c76464b485ebf83ba8c5cba0a3edb8427c909
def createStatement(this, args, callee): 'Handle calls to `createStatement`, returning an object which emits\n warnings upon calls to `execute` and `executeStep` rather than\n `executeAsync`.' check_dynamic_sql(this, args, callee) return this.traverser.wrap().query_interface('mozIStorageBaseStatement')
Handle calls to `createStatement`, returning an object which emits warnings upon calls to `execute` and `executeStep` rather than `executeAsync`.
validator/testcases/javascript/performance.py
createStatement
kmaglione/amo-validator
1
python
def createStatement(this, args, callee): 'Handle calls to `createStatement`, returning an object which emits\n warnings upon calls to `execute` and `executeStep` rather than\n `executeAsync`.' check_dynamic_sql(this, args, callee) return this.traverser.wrap().query_interface('mozIStorageBaseStatement')
def createStatement(this, args, callee): 'Handle calls to `createStatement`, returning an object which emits\n warnings upon calls to `execute` and `executeStep` rather than\n `executeAsync`.' check_dynamic_sql(this, args, callee) return this.traverser.wrap().query_interface('mozIStorageBaseStatement')<|docstring|>Handle calls to `createStatement`, returning an object which emits warnings upon calls to `execute` and `executeStep` rather than `executeAsync`.<|endoftext|>
bd1f41c121c2bfc7f0320b6aa81aa8097886b55f618b8ed16127116fc20fd404
@Hook.on_call def executeSimpleSQL(this, args, callee): 'Handle calls to `executeSimpleSQL`, warning that asynchronous\n methods should be used instead. ' check_dynamic_sql(this, args, callee) return {'err_id': ('js', 'instanceactions', 'executeSimpleSQL'), 'warning': 'Synchronous SQL should not be used', 'description': SYNCHRONOUS_SQL_DESCRIPTION}
Handle calls to `executeSimpleSQL`, warning that asynchronous methods should be used instead.
validator/testcases/javascript/performance.py
executeSimpleSQL
kmaglione/amo-validator
1
python
@Hook.on_call def executeSimpleSQL(this, args, callee): 'Handle calls to `executeSimpleSQL`, warning that asynchronous\n methods should be used instead. ' check_dynamic_sql(this, args, callee) return {'err_id': ('js', 'instanceactions', 'executeSimpleSQL'), 'warning': 'Synchronous SQL should not be used', 'description': SYNCHRONOUS_SQL_DESCRIPTION}
@Hook.on_call def executeSimpleSQL(this, args, callee): 'Handle calls to `executeSimpleSQL`, warning that asynchronous\n methods should be used instead. ' check_dynamic_sql(this, args, callee) return {'err_id': ('js', 'instanceactions', 'executeSimpleSQL'), 'warning': 'Synchronous SQL should not be used', 'description': SYNCHRONOUS_SQL_DESCRIPTION}<|docstring|>Handle calls to `executeSimpleSQL`, warning that asynchronous methods should be used instead.<|endoftext|>
1386871f4017fea0624df8571e9f44eba476c66e6bfa81b6a89e97f9613be27e
@Hook.on_call def open(this, args, callee): 'Check that XMLHttpRequest.open is not called synchronously.' if ((len(args) >= 3) and (not args[2].as_bool())): return 'Synchronous HTTP requests can cause serious UI performance problems, especially for users with slow network connections.'
Check that XMLHttpRequest.open is not called synchronously.
validator/testcases/javascript/performance.py
open
kmaglione/amo-validator
1
python
@Hook.on_call def open(this, args, callee): if ((len(args) >= 3) and (not args[2].as_bool())): return 'Synchronous HTTP requests can cause serious UI performance problems, especially for users with slow network connections.'
@Hook.on_call def open(this, args, callee): if ((len(args) >= 3) and (not args[2].as_bool())): return 'Synchronous HTTP requests can cause serious UI performance problems, especially for users with slow network connections.'<|docstring|>Check that XMLHttpRequest.open is not called synchronously.<|endoftext|>
2c9cbc6755873cbcd7343b5274f758239a9e144af8b76fd8a97d370f14526d4c
def load_embeddings(emb_path, emb_dims): 'Load the embeddings from a text file\n \n :param emb_path: Path of the text file\n :param emb_dims: Embedding dimensions\n \n :return emb_tensor: tensor containing all word embeedings\n :return word_to_indx: dictionary with word:index.' pickle_emb_path = (emb_path[:(- 3)] + 'p') if os.path.exists(pickle_emb_path): return p.load(open(pickle_emb_path, 'rb')) lines = codecs.open(emb_path, 'r', 'utf-8').readlines() emb_tensor = [np.zeros(emb_dims)] word_to_indx = {'PADDING_WORD': 0} for (indx, l) in enumerate(lines): (word, emb) = (l.split()[0], l.split()[1:]) if (not (len(emb) == emb_dims)): continue emb_tensor.append(list(np.float_(emb))) word_to_indx[word] = (indx + 1) emb_tensor = np.array(emb_tensor, dtype=np.float32) p.dump((emb_tensor, word_to_indx), open(pickle_emb_path, 'wb')) return (emb_tensor, word_to_indx)
Load the embeddings from a text file :param emb_path: Path of the text file :param emb_dims: Embedding dimensions :return emb_tensor: tensor containing all word embeedings :return word_to_indx: dictionary with word:index.
evalution/embeddings.py
load_embeddings
esantus/evalution2
1
python
def load_embeddings(emb_path, emb_dims): 'Load the embeddings from a text file\n \n :param emb_path: Path of the text file\n :param emb_dims: Embedding dimensions\n \n :return emb_tensor: tensor containing all word embeedings\n :return word_to_indx: dictionary with word:index.' pickle_emb_path = (emb_path[:(- 3)] + 'p') if os.path.exists(pickle_emb_path): return p.load(open(pickle_emb_path, 'rb')) lines = codecs.open(emb_path, 'r', 'utf-8').readlines() emb_tensor = [np.zeros(emb_dims)] word_to_indx = {'PADDING_WORD': 0} for (indx, l) in enumerate(lines): (word, emb) = (l.split()[0], l.split()[1:]) if (not (len(emb) == emb_dims)): continue emb_tensor.append(list(np.float_(emb))) word_to_indx[word] = (indx + 1) emb_tensor = np.array(emb_tensor, dtype=np.float32) p.dump((emb_tensor, word_to_indx), open(pickle_emb_path, 'wb')) return (emb_tensor, word_to_indx)
def load_embeddings(emb_path, emb_dims): 'Load the embeddings from a text file\n \n :param emb_path: Path of the text file\n :param emb_dims: Embedding dimensions\n \n :return emb_tensor: tensor containing all word embeedings\n :return word_to_indx: dictionary with word:index.' pickle_emb_path = (emb_path[:(- 3)] + 'p') if os.path.exists(pickle_emb_path): return p.load(open(pickle_emb_path, 'rb')) lines = codecs.open(emb_path, 'r', 'utf-8').readlines() emb_tensor = [np.zeros(emb_dims)] word_to_indx = {'PADDING_WORD': 0} for (indx, l) in enumerate(lines): (word, emb) = (l.split()[0], l.split()[1:]) if (not (len(emb) == emb_dims)): continue emb_tensor.append(list(np.float_(emb))) word_to_indx[word] = (indx + 1) emb_tensor = np.array(emb_tensor, dtype=np.float32) p.dump((emb_tensor, word_to_indx), open(pickle_emb_path, 'wb')) return (emb_tensor, word_to_indx)<|docstring|>Load the embeddings from a text file :param emb_path: Path of the text file :param emb_dims: Embedding dimensions :return emb_tensor: tensor containing all word embeedings :return word_to_indx: dictionary with word:index.<|endoftext|>
61773fbbc54015d0b43d3da42f933f4e219fd65b6803f67cc284763c82e20d50
def get_direction(self, direction: int): ' returns the GossmapHalfchannel if known by channel_update ' if (not (0 <= direction <= 1)): raise ValueError('direction can only be 0 or 1') return self.half_channels[direction]
returns the GossmapHalfchannel if known by channel_update
contrib/pyln-client/pyln/client/gossmap.py
get_direction
lightning-developer/lightning
2,288
python
def get_direction(self, direction: int): ' ' if (not (0 <= direction <= 1)): raise ValueError('direction can only be 0 or 1') return self.half_channels[direction]
def get_direction(self, direction: int): ' ' if (not (0 <= direction <= 1)): raise ValueError('direction can only be 0 or 1') return self.half_channels[direction]<|docstring|>returns the GossmapHalfchannel if known by channel_update<|endoftext|>
bbf70aab77966cbccabd38c5e2c4aa59416d225a3323bec17301c274b2fada32
def _set_channel_amount(self, rec: bytes): ' Sets channel capacity of last added channel ' (sats,) = struct.unpack('>Q', rec[2:]) self.channels[self._last_scid].satoshis = sats
Sets channel capacity of last added channel
contrib/pyln-client/pyln/client/gossmap.py
_set_channel_amount
lightning-developer/lightning
2,288
python
def _set_channel_amount(self, rec: bytes): ' ' (sats,) = struct.unpack('>Q', rec[2:]) self.channels[self._last_scid].satoshis = sats
def _set_channel_amount(self, rec: bytes): ' ' (sats,) = struct.unpack('>Q', rec[2:]) self.channels[self._last_scid].satoshis = sats<|docstring|>Sets channel capacity of last added channel<|endoftext|>
900dda1a49fc72820a11460deac07d612bd7c96bdd614df39281c4f75c931d72
def get_channel(self, short_channel_id: ShortChannelId): ' Resolves a channel by its short channel id ' if isinstance(short_channel_id, str): short_channel_id = ShortChannelId.from_str(short_channel_id) return self.channels.get(short_channel_id)
Resolves a channel by its short channel id
contrib/pyln-client/pyln/client/gossmap.py
get_channel
lightning-developer/lightning
2,288
python
def get_channel(self, short_channel_id: ShortChannelId): ' ' if isinstance(short_channel_id, str): short_channel_id = ShortChannelId.from_str(short_channel_id) return self.channels.get(short_channel_id)
def get_channel(self, short_channel_id: ShortChannelId): ' ' if isinstance(short_channel_id, str): short_channel_id = ShortChannelId.from_str(short_channel_id) return self.channels.get(short_channel_id)<|docstring|>Resolves a channel by its short channel id<|endoftext|>
15222249ae043fbfb8c35458ae99dfbe4e9ac7ed5380b78d750c832e9d867039
def get_node(self, node_id: Union[(GossmapNodeId, str)]): ' Resolves a node by its public key node_id ' if isinstance(node_id, str): node_id = GossmapNodeId.from_str(node_id) return self.nodes.get(cast(GossmapNodeId, node_id))
Resolves a node by its public key node_id
contrib/pyln-client/pyln/client/gossmap.py
get_node
lightning-developer/lightning
2,288
python
def get_node(self, node_id: Union[(GossmapNodeId, str)]): ' ' if isinstance(node_id, str): node_id = GossmapNodeId.from_str(node_id) return self.nodes.get(cast(GossmapNodeId, node_id))
def get_node(self, node_id: Union[(GossmapNodeId, str)]): ' ' if isinstance(node_id, str): node_id = GossmapNodeId.from_str(node_id) return self.nodes.get(cast(GossmapNodeId, node_id))<|docstring|>Resolves a node by its public key node_id<|endoftext|>
d7339f8f18e35d4aff1b488a4a9b0afd7b246e641c7c9fece06f4cce2c0ef90c
def reopen_store(self): 'FIXME: Implement!' assert False
FIXME: Implement!
contrib/pyln-client/pyln/client/gossmap.py
reopen_store
lightning-developer/lightning
2,288
python
def reopen_store(self): assert False
def reopen_store(self): assert False<|docstring|>FIXME: Implement!<|endoftext|>
1aebab2b28668523a38ce4a4b8321100f6232f54de9ee83432d589e055fb8db3
def _pull_bytes(self, length: int) -> bool: 'Pull bytes from file into our internal buffer' if (len(self.store_buf) < length): self.store_buf += self.store_file.read((length - len(self.store_buf))) return (len(self.store_buf) >= length)
Pull bytes from file into our internal buffer
contrib/pyln-client/pyln/client/gossmap.py
_pull_bytes
lightning-developer/lightning
2,288
python
def _pull_bytes(self, length: int) -> bool: if (len(self.store_buf) < length): self.store_buf += self.store_file.read((length - len(self.store_buf))) return (len(self.store_buf) >= length)
def _pull_bytes(self, length: int) -> bool: if (len(self.store_buf) < length): self.store_buf += self.store_file.read((length - len(self.store_buf))) return (len(self.store_buf) >= length)<|docstring|>Pull bytes from file into our internal buffer<|endoftext|>
43b9813dfcf0d49f778877a8296b1d20c5fc4b2cdb54c23d98e7253669f6604f
def _read_record(self) -> Optional[bytes]: 'If a whole record is not in the file, returns None.\n If deleted, returns empty.' if (not self._pull_bytes(12)): return None hdr = GossipStoreHeader(self.store_buf[:12]) if (not self._pull_bytes((12 + hdr.length))): return None self.bytes_read += len(self.store_buf) ret = self.store_buf[12:] self.store_buf = bytes() if hdr.deleted: ret = bytes() return ret
If a whole record is not in the file, returns None. If deleted, returns empty.
contrib/pyln-client/pyln/client/gossmap.py
_read_record
lightning-developer/lightning
2,288
python
def _read_record(self) -> Optional[bytes]: 'If a whole record is not in the file, returns None.\n If deleted, returns empty.' if (not self._pull_bytes(12)): return None hdr = GossipStoreHeader(self.store_buf[:12]) if (not self._pull_bytes((12 + hdr.length))): return None self.bytes_read += len(self.store_buf) ret = self.store_buf[12:] self.store_buf = bytes() if hdr.deleted: ret = bytes() return ret
def _read_record(self) -> Optional[bytes]: 'If a whole record is not in the file, returns None.\n If deleted, returns empty.' if (not self._pull_bytes(12)): return None hdr = GossipStoreHeader(self.store_buf[:12]) if (not self._pull_bytes((12 + hdr.length))): return None self.bytes_read += len(self.store_buf) ret = self.store_buf[12:] self.store_buf = bytes() if hdr.deleted: ret = bytes() return ret<|docstring|>If a whole record is not in the file, returns None. If deleted, returns empty.<|endoftext|>
01788f1659ac38ebae40e25b598f4770609f0172458a95bdf0feb4d2c07f283d
def refresh(self): 'Catch up with any changes to the gossip store' while True: off = self.bytes_read rec = self._read_record() if (rec is None): break if (len(rec) == 0): continue (rectype,) = struct.unpack('>H', rec[:2]) if (rectype == channel_announcement.number): self._add_channel(rec, off, False) elif (rectype == WIRE_GOSSIP_STORE_PRIVATE_CHANNEL): self._add_channel(rec[((2 + 8) + 2):], (((off + 2) + 8) + 2), True) elif (rectype == WIRE_GOSSIP_STORE_CHANNEL_AMOUNT): self._set_channel_amount(rec) elif (rectype == channel_update.number): self._update_channel(rec, off) elif (rectype == WIRE_GOSSIP_STORE_PRIVATE_UPDATE): self._update_channel(rec[(2 + 2):], ((off + 2) + 2)) elif (rectype == WIRE_GOSSIP_STORE_DELETE_CHAN): self._remove_channel_by_deletemsg(rec) elif (rectype == node_announcement.number): self._add_node_announcement(rec, off) elif (rectype == WIRE_GOSSIP_STORE_ENDED): self.reopen_store() else: continue
Catch up with any changes to the gossip store
contrib/pyln-client/pyln/client/gossmap.py
refresh
lightning-developer/lightning
2,288
python
def refresh(self): while True: off = self.bytes_read rec = self._read_record() if (rec is None): break if (len(rec) == 0): continue (rectype,) = struct.unpack('>H', rec[:2]) if (rectype == channel_announcement.number): self._add_channel(rec, off, False) elif (rectype == WIRE_GOSSIP_STORE_PRIVATE_CHANNEL): self._add_channel(rec[((2 + 8) + 2):], (((off + 2) + 8) + 2), True) elif (rectype == WIRE_GOSSIP_STORE_CHANNEL_AMOUNT): self._set_channel_amount(rec) elif (rectype == channel_update.number): self._update_channel(rec, off) elif (rectype == WIRE_GOSSIP_STORE_PRIVATE_UPDATE): self._update_channel(rec[(2 + 2):], ((off + 2) + 2)) elif (rectype == WIRE_GOSSIP_STORE_DELETE_CHAN): self._remove_channel_by_deletemsg(rec) elif (rectype == node_announcement.number): self._add_node_announcement(rec, off) elif (rectype == WIRE_GOSSIP_STORE_ENDED): self.reopen_store() else: continue
def refresh(self): while True: off = self.bytes_read rec = self._read_record() if (rec is None): break if (len(rec) == 0): continue (rectype,) = struct.unpack('>H', rec[:2]) if (rectype == channel_announcement.number): self._add_channel(rec, off, False) elif (rectype == WIRE_GOSSIP_STORE_PRIVATE_CHANNEL): self._add_channel(rec[((2 + 8) + 2):], (((off + 2) + 8) + 2), True) elif (rectype == WIRE_GOSSIP_STORE_CHANNEL_AMOUNT): self._set_channel_amount(rec) elif (rectype == channel_update.number): self._update_channel(rec, off) elif (rectype == WIRE_GOSSIP_STORE_PRIVATE_UPDATE): self._update_channel(rec[(2 + 2):], ((off + 2) + 2)) elif (rectype == WIRE_GOSSIP_STORE_DELETE_CHAN): self._remove_channel_by_deletemsg(rec) elif (rectype == node_announcement.number): self._add_node_announcement(rec, off) elif (rectype == WIRE_GOSSIP_STORE_ENDED): self.reopen_store() else: continue<|docstring|>Catch up with any changes to the gossip store<|endoftext|>
b8292bf00e41de9b78a19f1129a03ab0fb97a9dc5f2a18bb971a73ee618f8527
def __init__(self, token: str=''): " This is the Base Class for the AniApi wrapper.\n This class will only contain the resources given at the docs,\n oauth will be extended by the other classes.\n\n In this class you will find other than the standard requests the `auth me` requests,\n when you want them oAuth stuff please use the :class:`AniApiOAuth` class,\n it's a subclass of this class.\n\n Attributes:\n -----------\n token : [:class:`str`]\n The API Token you get from https://aniapi.com/profile.\n If your application is inside the read-only scope then you don't need to provide a token.\n\n timeout : [:class:`int`]\n The timeout for the connection.\n " super().__init__() self.headers = default_header(token)
This is the Base Class for the AniApi wrapper. This class will only contain the resources given at the docs, oauth will be extended by the other classes. In this class you will find other than the standard requests the `auth me` requests, when you want them oAuth stuff please use the :class:`AniApiOAuth` class, it's a subclass of this class. Attributes: ----------- token : [:class:`str`] The API Token you get from https://aniapi.com/profile. If your application is inside the read-only scope then you don't need to provide a token. timeout : [:class:`int`] The timeout for the connection.
wrapper.py
__init__
exersalza/AniAPI-pywrapper
0
python
def __init__(self, token: str=): " This is the Base Class for the AniApi wrapper.\n This class will only contain the resources given at the docs,\n oauth will be extended by the other classes.\n\n In this class you will find other than the standard requests the `auth me` requests,\n when you want them oAuth stuff please use the :class:`AniApiOAuth` class,\n it's a subclass of this class.\n\n Attributes:\n -----------\n token : [:class:`str`]\n The API Token you get from https://aniapi.com/profile.\n If your application is inside the read-only scope then you don't need to provide a token.\n\n timeout : [:class:`int`]\n The timeout for the connection.\n " super().__init__() self.headers = default_header(token)
def __init__(self, token: str=): " This is the Base Class for the AniApi wrapper.\n This class will only contain the resources given at the docs,\n oauth will be extended by the other classes.\n\n In this class you will find other than the standard requests the `auth me` requests,\n when you want them oAuth stuff please use the :class:`AniApiOAuth` class,\n it's a subclass of this class.\n\n Attributes:\n -----------\n token : [:class:`str`]\n The API Token you get from https://aniapi.com/profile.\n If your application is inside the read-only scope then you don't need to provide a token.\n\n timeout : [:class:`int`]\n The timeout for the connection.\n " super().__init__() self.headers = default_header(token)<|docstring|>This is the Base Class for the AniApi wrapper. This class will only contain the resources given at the docs, oauth will be extended by the other classes. In this class you will find other than the standard requests the `auth me` requests, when you want them oAuth stuff please use the :class:`AniApiOAuth` class, it's a subclass of this class. Attributes: ----------- token : [:class:`str`] The API Token you get from https://aniapi.com/profile. If your application is inside the read-only scope then you don't need to provide a token. timeout : [:class:`int`] The timeout for the connection.<|endoftext|>
321ade4dff21bac1f5a47df285eaa7117d253b0be5071cf4d03449ff1b269de3
def get_requests(self, _id, url, params, obj) -> dict: ' For development method. this method will be used later to make it easier\n to implement new endpoints.\n\n Parameters\n ----------\n _id : [:class:`int`]\n The id for the url for a specific endpoint e.s. `/anime/{id}`.\n url : [:class:`str`]\n The url identifier for the endpoint e.s. `anime`.\n params : [:class:`dict`]\n The extra filter arguments to deliver\n obj : [:class:`object`]\n The object for the conversion\n\n Returns\n -------\n :class:`dict`\n The converted response\n ' (res, headers) = self.get(f'/{API_VERSION}/{url}/{_id}?{urlencode(params)}', headers=self.headers) data = create_data_dict(res, headers) if _id: data['data'] = obj(**data.get('data')) return data if data.get('data', False): data['data']['documents'] = [obj(**i) for i in data['data']['documents']] data['data'] = DataObj(**data['data']) return data
For development method. this method will be used later to make it easier to implement new endpoints. Parameters ---------- _id : [:class:`int`] The id for the url for a specific endpoint e.s. `/anime/{id}`. url : [:class:`str`] The url identifier for the endpoint e.s. `anime`. params : [:class:`dict`] The extra filter arguments to deliver obj : [:class:`object`] The object for the conversion Returns ------- :class:`dict` The converted response
wrapper.py
get_requests
exersalza/AniAPI-pywrapper
0
python
def get_requests(self, _id, url, params, obj) -> dict: ' For development method. this method will be used later to make it easier\n to implement new endpoints.\n\n Parameters\n ----------\n _id : [:class:`int`]\n The id for the url for a specific endpoint e.s. `/anime/{id}`.\n url : [:class:`str`]\n The url identifier for the endpoint e.s. `anime`.\n params : [:class:`dict`]\n The extra filter arguments to deliver\n obj : [:class:`object`]\n The object for the conversion\n\n Returns\n -------\n :class:`dict`\n The converted response\n ' (res, headers) = self.get(f'/{API_VERSION}/{url}/{_id}?{urlencode(params)}', headers=self.headers) data = create_data_dict(res, headers) if _id: data['data'] = obj(**data.get('data')) return data if data.get('data', False): data['data']['documents'] = [obj(**i) for i in data['data']['documents']] data['data'] = DataObj(**data['data']) return data
def get_requests(self, _id, url, params, obj) -> dict: ' For development method. this method will be used later to make it easier\n to implement new endpoints.\n\n Parameters\n ----------\n _id : [:class:`int`]\n The id for the url for a specific endpoint e.s. `/anime/{id}`.\n url : [:class:`str`]\n The url identifier for the endpoint e.s. `anime`.\n params : [:class:`dict`]\n The extra filter arguments to deliver\n obj : [:class:`object`]\n The object for the conversion\n\n Returns\n -------\n :class:`dict`\n The converted response\n ' (res, headers) = self.get(f'/{API_VERSION}/{url}/{_id}?{urlencode(params)}', headers=self.headers) data = create_data_dict(res, headers) if _id: data['data'] = obj(**data.get('data')) return data if data.get('data', False): data['data']['documents'] = [obj(**i) for i in data['data']['documents']] data['data'] = DataObj(**data['data']) return data<|docstring|>For development method. this method will be used later to make it easier to implement new endpoints. Parameters ---------- _id : [:class:`int`] The id for the url for a specific endpoint e.s. `/anime/{id}`. url : [:class:`str`] The url identifier for the endpoint e.s. `anime`. params : [:class:`dict`] The extra filter arguments to deliver obj : [:class:`object`] The object for the conversion Returns ------- :class:`dict` The converted response<|endoftext|>
c5ef3f595c750c7a522e28ed69794ae5af58b50de124bbcc5e1f8de109eec9ae
def get_anime(self, anime_id: int='', **kwargs) -> Ctx: " Get an Anime object list from the API.\n You can provide an ID or query parameters to get a single AnimeObject (:class:`Anime`) or an :class:`list`\n of objects.\n\n Parameters:\n ----------\n anime_id : Optional[:class:`int`]\n The ID for the Anime you want to get. Beware it's **not** the mal_id,\n tmdb_id or the anilist_id they\n can be different and getting handeld by the `**kwargs` parameter.\n When you provide an ID, you can't use the\n `**kwargs` parameter.\n\n **kwargs : Optional[:class:`dict`]\n The parameters that you want to use to spice up your request.\n Supported Parameters can be found inside the `utils.flags` file.\n\n Returns\n -------\n :class:`Ctx`\n A Context object with the query returns and the rate limit information.\n\n Raises\n -------\n InvalidFlagsException\n When you try to use any flags that are not supported.\n\n Examples\n ---------\n >>> from wrapper import AniApi\n >>> api = AniApi(token='your_token')\n >>> api.get_anime(1, status=0) # Get Anime with ID 1 and status 0 (finished)\n <status_code=200 message='Anime found' data=<id=1 title='Cowboy Bebop' episodes=26 status=0> version='1'>\n " invalid = (set(kwargs) - set(ANIME_REQ)) if invalid: raise InvalidParamsException(f'Invalid parameters: {invalid}') data = self.get_requests(anime_id, 'anime', kwargs, AnimeObj) return Ctx(**data)
Get an Anime object list from the API. You can provide an ID or query parameters to get a single AnimeObject (:class:`Anime`) or an :class:`list` of objects. Parameters: ---------- anime_id : Optional[:class:`int`] The ID for the Anime you want to get. Beware it's **not** the mal_id, tmdb_id or the anilist_id they can be different and getting handeld by the `**kwargs` parameter. When you provide an ID, you can't use the `**kwargs` parameter. **kwargs : Optional[:class:`dict`] The parameters that you want to use to spice up your request. Supported Parameters can be found inside the `utils.flags` file. Returns ------- :class:`Ctx` A Context object with the query returns and the rate limit information. Raises ------- InvalidFlagsException When you try to use any flags that are not supported. Examples --------- >>> from wrapper import AniApi >>> api = AniApi(token='your_token') >>> api.get_anime(1, status=0) # Get Anime with ID 1 and status 0 (finished) <status_code=200 message='Anime found' data=<id=1 title='Cowboy Bebop' episodes=26 status=0> version='1'>
wrapper.py
get_anime
exersalza/AniAPI-pywrapper
0
python
def get_anime(self, anime_id: int=, **kwargs) -> Ctx: " Get an Anime object list from the API.\n You can provide an ID or query parameters to get a single AnimeObject (:class:`Anime`) or an :class:`list`\n of objects.\n\n Parameters:\n ----------\n anime_id : Optional[:class:`int`]\n The ID for the Anime you want to get. Beware it's **not** the mal_id,\n tmdb_id or the anilist_id they\n can be different and getting handeld by the `**kwargs` parameter.\n When you provide an ID, you can't use the\n `**kwargs` parameter.\n\n **kwargs : Optional[:class:`dict`]\n The parameters that you want to use to spice up your request.\n Supported Parameters can be found inside the `utils.flags` file.\n\n Returns\n -------\n :class:`Ctx`\n A Context object with the query returns and the rate limit information.\n\n Raises\n -------\n InvalidFlagsException\n When you try to use any flags that are not supported.\n\n Examples\n ---------\n >>> from wrapper import AniApi\n >>> api = AniApi(token='your_token')\n >>> api.get_anime(1, status=0) # Get Anime with ID 1 and status 0 (finished)\n <status_code=200 message='Anime found' data=<id=1 title='Cowboy Bebop' episodes=26 status=0> version='1'>\n " invalid = (set(kwargs) - set(ANIME_REQ)) if invalid: raise InvalidParamsException(f'Invalid parameters: {invalid}') data = self.get_requests(anime_id, 'anime', kwargs, AnimeObj) return Ctx(**data)
def get_anime(self, anime_id: int=, **kwargs) -> Ctx: " Get an Anime object list from the API.\n You can provide an ID or query parameters to get a single AnimeObject (:class:`Anime`) or an :class:`list`\n of objects.\n\n Parameters:\n ----------\n anime_id : Optional[:class:`int`]\n The ID for the Anime you want to get. Beware it's **not** the mal_id,\n tmdb_id or the anilist_id they\n can be different and getting handeld by the `**kwargs` parameter.\n When you provide an ID, you can't use the\n `**kwargs` parameter.\n\n **kwargs : Optional[:class:`dict`]\n The parameters that you want to use to spice up your request.\n Supported Parameters can be found inside the `utils.flags` file.\n\n Returns\n -------\n :class:`Ctx`\n A Context object with the query returns and the rate limit information.\n\n Raises\n -------\n InvalidFlagsException\n When you try to use any flags that are not supported.\n\n Examples\n ---------\n >>> from wrapper import AniApi\n >>> api = AniApi(token='your_token')\n >>> api.get_anime(1, status=0) # Get Anime with ID 1 and status 0 (finished)\n <status_code=200 message='Anime found' data=<id=1 title='Cowboy Bebop' episodes=26 status=0> version='1'>\n " invalid = (set(kwargs) - set(ANIME_REQ)) if invalid: raise InvalidParamsException(f'Invalid parameters: {invalid}') data = self.get_requests(anime_id, 'anime', kwargs, AnimeObj) return Ctx(**data)<|docstring|>Get an Anime object list from the API. You can provide an ID or query parameters to get a single AnimeObject (:class:`Anime`) or an :class:`list` of objects. Parameters: ---------- anime_id : Optional[:class:`int`] The ID for the Anime you want to get. Beware it's **not** the mal_id, tmdb_id or the anilist_id they can be different and getting handeld by the `**kwargs` parameter. When you provide an ID, you can't use the `**kwargs` parameter. **kwargs : Optional[:class:`dict`] The parameters that you want to use to spice up your request. Supported Parameters can be found inside the `utils.flags` file. Returns ------- :class:`Ctx` A Context object with the query returns and the rate limit information. Raises ------- InvalidFlagsException When you try to use any flags that are not supported. Examples --------- >>> from wrapper import AniApi >>> api = AniApi(token='your_token') >>> api.get_anime(1, status=0) # Get Anime with ID 1 and status 0 (finished) <status_code=200 message='Anime found' data=<id=1 title='Cowboy Bebop' episodes=26 status=0> version='1'><|endoftext|>
643a0bd0c2dd5d6b042b437f4e4cca14a62fe6933f6f978f38e80338d35533fc
def get_random_anime(self, count: int=1, nsfw: bool=False) -> Ctx: " Get one or more random Animes from the API.\n\n Parameters\n ----------\n count : :class:`int`\n The amount of Animes you want to get. Value should be between 1 and 50.\n\n nsfw : :class:`bool`\n If you want to get NSFW Animes. Default is False.\n\n Returns\n -------\n :class:`Ctx`\n Context object with the query returns and the rate limit information.\n\n Raises\n -------\n ValueError\n The count can't be less than 1 or more than 50. The api return 50 at max.\n " if ((count > 50) or (count < 1)): raise ValueError('Count must be less than 50 and more or equal to 1') (res, header) = self.get(f'/{API_VERSION}/random/anime/{count}/{nsfw}', headers=self.headers) data = create_data_dict(res, header) data['data'] = [AnimeObj(**anime) for anime in data['data']] return Ctx(**data)
Get one or more random Animes from the API. Parameters ---------- count : :class:`int` The amount of Animes you want to get. Value should be between 1 and 50. nsfw : :class:`bool` If you want to get NSFW Animes. Default is False. Returns ------- :class:`Ctx` Context object with the query returns and the rate limit information. Raises ------- ValueError The count can't be less than 1 or more than 50. The api return 50 at max.
wrapper.py
get_random_anime
exersalza/AniAPI-pywrapper
0
python
def get_random_anime(self, count: int=1, nsfw: bool=False) -> Ctx: " Get one or more random Animes from the API.\n\n Parameters\n ----------\n count : :class:`int`\n The amount of Animes you want to get. Value should be between 1 and 50.\n\n nsfw : :class:`bool`\n If you want to get NSFW Animes. Default is False.\n\n Returns\n -------\n :class:`Ctx`\n Context object with the query returns and the rate limit information.\n\n Raises\n -------\n ValueError\n The count can't be less than 1 or more than 50. The api return 50 at max.\n " if ((count > 50) or (count < 1)): raise ValueError('Count must be less than 50 and more or equal to 1') (res, header) = self.get(f'/{API_VERSION}/random/anime/{count}/{nsfw}', headers=self.headers) data = create_data_dict(res, header) data['data'] = [AnimeObj(**anime) for anime in data['data']] return Ctx(**data)
def get_random_anime(self, count: int=1, nsfw: bool=False) -> Ctx: " Get one or more random Animes from the API.\n\n Parameters\n ----------\n count : :class:`int`\n The amount of Animes you want to get. Value should be between 1 and 50.\n\n nsfw : :class:`bool`\n If you want to get NSFW Animes. Default is False.\n\n Returns\n -------\n :class:`Ctx`\n Context object with the query returns and the rate limit information.\n\n Raises\n -------\n ValueError\n The count can't be less than 1 or more than 50. The api return 50 at max.\n " if ((count > 50) or (count < 1)): raise ValueError('Count must be less than 50 and more or equal to 1') (res, header) = self.get(f'/{API_VERSION}/random/anime/{count}/{nsfw}', headers=self.headers) data = create_data_dict(res, header) data['data'] = [AnimeObj(**anime) for anime in data['data']] return Ctx(**data)<|docstring|>Get one or more random Animes from the API. Parameters ---------- count : :class:`int` The amount of Animes you want to get. Value should be between 1 and 50. nsfw : :class:`bool` If you want to get NSFW Animes. Default is False. Returns ------- :class:`Ctx` Context object with the query returns and the rate limit information. Raises ------- ValueError The count can't be less than 1 or more than 50. The api return 50 at max.<|endoftext|>
77eacf38bd777dde7e12484b83c38ae65ce7edd2baae2d6e42ac3caa1f18cc08
def get_episode(self, episode_id: int='', **kwargs) -> Ctx: " Get an Episode from the API.\n\n Parameters\n ----------\n episode_id : Optional[:class:`int`]\n Give an ID to get a Specific Episode, note that all other\n parameters get dumped when you provide an ID.\n\n **kwargs :\n Apply filter like `anime_id` or enter a `pagination` valid filter\n can be found inside the `utils.flags` file.\n\n Returns\n -------\n :class:`Ctx`\n A context object with the query returns and the rate limit information.\n Raises\n -------\n InvalidFlagsException\n\n Examples\n ---------\n >>> from wrapper import AniApi\n >>> api = AniApi(token='your_token')\n >>> api.get_episode(1) # Get Episode with ID 1\n <status_code=200 message='Episode found' data=<id=1 anime_id=1 number=1 locale=en> version='1'>\n " invalid = (set(kwargs) - set(EPISODE_REQ)) if invalid: raise InvalidParamsValueException(f'Invalid parameters: {invalid}') data = self.get_requests(episode_id, 'episode', kwargs, EpisodeObj) return Ctx(**data)
Get an Episode from the API. Parameters ---------- episode_id : Optional[:class:`int`] Give an ID to get a Specific Episode, note that all other parameters get dumped when you provide an ID. **kwargs : Apply filter like `anime_id` or enter a `pagination` valid filter can be found inside the `utils.flags` file. Returns ------- :class:`Ctx` A context object with the query returns and the rate limit information. Raises ------- InvalidFlagsException Examples --------- >>> from wrapper import AniApi >>> api = AniApi(token='your_token') >>> api.get_episode(1) # Get Episode with ID 1 <status_code=200 message='Episode found' data=<id=1 anime_id=1 number=1 locale=en> version='1'>
wrapper.py
get_episode
exersalza/AniAPI-pywrapper
0
python
def get_episode(self, episode_id: int=, **kwargs) -> Ctx: " Get an Episode from the API.\n\n Parameters\n ----------\n episode_id : Optional[:class:`int`]\n Give an ID to get a Specific Episode, note that all other\n parameters get dumped when you provide an ID.\n\n **kwargs :\n Apply filter like `anime_id` or enter a `pagination` valid filter\n can be found inside the `utils.flags` file.\n\n Returns\n -------\n :class:`Ctx`\n A context object with the query returns and the rate limit information.\n Raises\n -------\n InvalidFlagsException\n\n Examples\n ---------\n >>> from wrapper import AniApi\n >>> api = AniApi(token='your_token')\n >>> api.get_episode(1) # Get Episode with ID 1\n <status_code=200 message='Episode found' data=<id=1 anime_id=1 number=1 locale=en> version='1'>\n " invalid = (set(kwargs) - set(EPISODE_REQ)) if invalid: raise InvalidParamsValueException(f'Invalid parameters: {invalid}') data = self.get_requests(episode_id, 'episode', kwargs, EpisodeObj) return Ctx(**data)
def get_episode(self, episode_id: int=, **kwargs) -> Ctx: " Get an Episode from the API.\n\n Parameters\n ----------\n episode_id : Optional[:class:`int`]\n Give an ID to get a Specific Episode, note that all other\n parameters get dumped when you provide an ID.\n\n **kwargs :\n Apply filter like `anime_id` or enter a `pagination` valid filter\n can be found inside the `utils.flags` file.\n\n Returns\n -------\n :class:`Ctx`\n A context object with the query returns and the rate limit information.\n Raises\n -------\n InvalidFlagsException\n\n Examples\n ---------\n >>> from wrapper import AniApi\n >>> api = AniApi(token='your_token')\n >>> api.get_episode(1) # Get Episode with ID 1\n <status_code=200 message='Episode found' data=<id=1 anime_id=1 number=1 locale=en> version='1'>\n " invalid = (set(kwargs) - set(EPISODE_REQ)) if invalid: raise InvalidParamsValueException(f'Invalid parameters: {invalid}') data = self.get_requests(episode_id, 'episode', kwargs, EpisodeObj) return Ctx(**data)<|docstring|>Get an Episode from the API. Parameters ---------- episode_id : Optional[:class:`int`] Give an ID to get a Specific Episode, note that all other parameters get dumped when you provide an ID. **kwargs : Apply filter like `anime_id` or enter a `pagination` valid filter can be found inside the `utils.flags` file. Returns ------- :class:`Ctx` A context object with the query returns and the rate limit information. Raises ------- InvalidFlagsException Examples --------- >>> from wrapper import AniApi >>> api = AniApi(token='your_token') >>> api.get_episode(1) # Get Episode with ID 1 <status_code=200 message='Episode found' data=<id=1 anime_id=1 number=1 locale=en> version='1'><|endoftext|>
388197ce62b639d06585c7fed7f8126f1e1c817e9303984cf8fa7f926e02e508
def get_song(self, song_id: int='', **kwargs) -> Ctx: ' Get from 1 up to 100 songs at the time from the Api\n\n Parameters\n ----------\n song_id : Optional[:class:`int`]\n Give an ID to get a Specific Song, note that all other parameters\n get dumped when you provide an ID.\n\n kwargs : Optional[:class:`dict`]\n Apply filter like `anime_id` or enter a `pagination` valid filter can\n be found inside the `utils.flags` file\n or at the docs: https://aniapi.com/docs/resources/song#parameters-1\n\n Returns\n -------\n :class:`Ctx`\n A context object with the query returns and the rate limit information.\n ' invalid = (set(kwargs) - set(SONG_REQ)) if invalid: raise InvalidParamsException(f'Invalid parameters: {invalid}') data = self.get_requests(song_id, 'song', kwargs, SongObj) return Ctx(**data)
Get from 1 up to 100 songs at the time from the Api Parameters ---------- song_id : Optional[:class:`int`] Give an ID to get a Specific Song, note that all other parameters get dumped when you provide an ID. kwargs : Optional[:class:`dict`] Apply filter like `anime_id` or enter a `pagination` valid filter can be found inside the `utils.flags` file or at the docs: https://aniapi.com/docs/resources/song#parameters-1 Returns ------- :class:`Ctx` A context object with the query returns and the rate limit information.
wrapper.py
get_song
exersalza/AniAPI-pywrapper
0
python
def get_song(self, song_id: int=, **kwargs) -> Ctx: ' Get from 1 up to 100 songs at the time from the Api\n\n Parameters\n ----------\n song_id : Optional[:class:`int`]\n Give an ID to get a Specific Song, note that all other parameters\n get dumped when you provide an ID.\n\n kwargs : Optional[:class:`dict`]\n Apply filter like `anime_id` or enter a `pagination` valid filter can\n be found inside the `utils.flags` file\n or at the docs: https://aniapi.com/docs/resources/song#parameters-1\n\n Returns\n -------\n :class:`Ctx`\n A context object with the query returns and the rate limit information.\n ' invalid = (set(kwargs) - set(SONG_REQ)) if invalid: raise InvalidParamsException(f'Invalid parameters: {invalid}') data = self.get_requests(song_id, 'song', kwargs, SongObj) return Ctx(**data)
def get_song(self, song_id: int=, **kwargs) -> Ctx: ' Get from 1 up to 100 songs at the time from the Api\n\n Parameters\n ----------\n song_id : Optional[:class:`int`]\n Give an ID to get a Specific Song, note that all other parameters\n get dumped when you provide an ID.\n\n kwargs : Optional[:class:`dict`]\n Apply filter like `anime_id` or enter a `pagination` valid filter can\n be found inside the `utils.flags` file\n or at the docs: https://aniapi.com/docs/resources/song#parameters-1\n\n Returns\n -------\n :class:`Ctx`\n A context object with the query returns and the rate limit information.\n ' invalid = (set(kwargs) - set(SONG_REQ)) if invalid: raise InvalidParamsException(f'Invalid parameters: {invalid}') data = self.get_requests(song_id, 'song', kwargs, SongObj) return Ctx(**data)<|docstring|>Get from 1 up to 100 songs at the time from the Api Parameters ---------- song_id : Optional[:class:`int`] Give an ID to get a Specific Song, note that all other parameters get dumped when you provide an ID. kwargs : Optional[:class:`dict`] Apply filter like `anime_id` or enter a `pagination` valid filter can be found inside the `utils.flags` file or at the docs: https://aniapi.com/docs/resources/song#parameters-1 Returns ------- :class:`Ctx` A context object with the query returns and the rate limit information.<|endoftext|>
8fe5c882d27f8ee04443c9c95da05fb1b751687a1fb1c6f3df1d2b974ac57ac1
def get_random_song(self, count: int=1) -> Ctx: "\n It's the same as get_random_anime but for another endpoint and without nsfw tag.\n\n Parameters\n ----------\n count : :class:`int`\n The amount of songs you want to get. Value should be between 1 and 50.\n When you go over the value you get 50 at max. so I set a cap at 50.\n\n Returns\n -------\n :class:`Ctx`\n Context object with the query returns and the rate limit information.\n " if ((count > 50) or (count < 1)): raise ValueError('Count must be less than 50 and more or equal to 1') (res, header) = self.get(f'/{API_VERSION}/random/song/{count}', headers=self.headers) data = create_data_dict(res, header) data['data'] = [SongObj(**song) for song in data['data']] return Ctx(**data)
It's the same as get_random_anime but for another endpoint and without nsfw tag. Parameters ---------- count : :class:`int` The amount of songs you want to get. Value should be between 1 and 50. When you go over the value you get 50 at max. so I set a cap at 50. Returns ------- :class:`Ctx` Context object with the query returns and the rate limit information.
wrapper.py
get_random_song
exersalza/AniAPI-pywrapper
0
python
def get_random_song(self, count: int=1) -> Ctx: "\n It's the same as get_random_anime but for another endpoint and without nsfw tag.\n\n Parameters\n ----------\n count : :class:`int`\n The amount of songs you want to get. Value should be between 1 and 50.\n When you go over the value you get 50 at max. so I set a cap at 50.\n\n Returns\n -------\n :class:`Ctx`\n Context object with the query returns and the rate limit information.\n " if ((count > 50) or (count < 1)): raise ValueError('Count must be less than 50 and more or equal to 1') (res, header) = self.get(f'/{API_VERSION}/random/song/{count}', headers=self.headers) data = create_data_dict(res, header) data['data'] = [SongObj(**song) for song in data['data']] return Ctx(**data)
def get_random_song(self, count: int=1) -> Ctx: "\n It's the same as get_random_anime but for another endpoint and without nsfw tag.\n\n Parameters\n ----------\n count : :class:`int`\n The amount of songs you want to get. Value should be between 1 and 50.\n When you go over the value you get 50 at max. so I set a cap at 50.\n\n Returns\n -------\n :class:`Ctx`\n Context object with the query returns and the rate limit information.\n " if ((count > 50) or (count < 1)): raise ValueError('Count must be less than 50 and more or equal to 1') (res, header) = self.get(f'/{API_VERSION}/random/song/{count}', headers=self.headers) data = create_data_dict(res, header) data['data'] = [SongObj(**song) for song in data['data']] return Ctx(**data)<|docstring|>It's the same as get_random_anime but for another endpoint and without nsfw tag. Parameters ---------- count : :class:`int` The amount of songs you want to get. Value should be between 1 and 50. When you go over the value you get 50 at max. so I set a cap at 50. Returns ------- :class:`Ctx` Context object with the query returns and the rate limit information.<|endoftext|>
70c044e33815bc269c52d2fe2b66eb1fb348ae09307822682095032f5f8e138f
def get_resources(self, version: float, _type: int) -> Ctx: ' Get the resources of the AniApi\n\n Parameters\n ----------\n version : :class:`float`\n The version from the resource.\n\n _type : :class:`int`\n The type of resource you want to get.\n 0 = Anime Genres,\n 1 = Locales\n\n Returns\n -------\n :class:`Ctx`\n A context object with the query returns and the rate limit information.\n ' (res, header) = self.get(f'/{API_VERSION}/resources/{version}/{_type}', headers=self.headers) data = create_data_dict(res, header) return Ctx(**data)
Get the resources of the AniApi Parameters ---------- version : :class:`float` The version from the resource. _type : :class:`int` The type of resource you want to get. 0 = Anime Genres, 1 = Locales Returns ------- :class:`Ctx` A context object with the query returns and the rate limit information.
wrapper.py
get_resources
exersalza/AniAPI-pywrapper
0
python
def get_resources(self, version: float, _type: int) -> Ctx: ' Get the resources of the AniApi\n\n Parameters\n ----------\n version : :class:`float`\n The version from the resource.\n\n _type : :class:`int`\n The type of resource you want to get.\n 0 = Anime Genres,\n 1 = Locales\n\n Returns\n -------\n :class:`Ctx`\n A context object with the query returns and the rate limit information.\n ' (res, header) = self.get(f'/{API_VERSION}/resources/{version}/{_type}', headers=self.headers) data = create_data_dict(res, header) return Ctx(**data)
def get_resources(self, version: float, _type: int) -> Ctx: ' Get the resources of the AniApi\n\n Parameters\n ----------\n version : :class:`float`\n The version from the resource.\n\n _type : :class:`int`\n The type of resource you want to get.\n 0 = Anime Genres,\n 1 = Locales\n\n Returns\n -------\n :class:`Ctx`\n A context object with the query returns and the rate limit information.\n ' (res, header) = self.get(f'/{API_VERSION}/resources/{version}/{_type}', headers=self.headers) data = create_data_dict(res, header) return Ctx(**data)<|docstring|>Get the resources of the AniApi Parameters ---------- version : :class:`float` The version from the resource. _type : :class:`int` The type of resource you want to get. 0 = Anime Genres, 1 = Locales Returns ------- :class:`Ctx` A context object with the query returns and the rate limit information.<|endoftext|>
04cd67c73b4fd06da37f5950a5141c7eee661641b1792ae70dc34ad33501ea43
def get_user_story(self, story_id: int='', **kwargs) -> Ctx: ' Get a list or specific UserStory from the API\n\n Parameters\n ----------\n story_id : [:class:`int`]\n The UserStory id to get, note: when you provide an id.\n\n kwargs\n Include filter for the List request\n\n Returns\n -------\n :class:`Ctx`\n Ctx object with the response from the get request\n\n ' invalid = (set(kwargs) - set(USER_STORY_REQ)) if invalid: raise InvalidParamsException(f'Invalid arguments: {invalid}') (res, header) = self.get(f'/{API_VERSION}/user_story/{story_id}?{urlencode(kwargs)}', self.headers) data = create_data_dict(res, header) return Ctx(**data)
Get a list or specific UserStory from the API Parameters ---------- story_id : [:class:`int`] The UserStory id to get, note: when you provide an id. kwargs Include filter for the List request Returns ------- :class:`Ctx` Ctx object with the response from the get request
wrapper.py
get_user_story
exersalza/AniAPI-pywrapper
0
python
def get_user_story(self, story_id: int=, **kwargs) -> Ctx: ' Get a list or specific UserStory from the API\n\n Parameters\n ----------\n story_id : [:class:`int`]\n The UserStory id to get, note: when you provide an id.\n\n kwargs\n Include filter for the List request\n\n Returns\n -------\n :class:`Ctx`\n Ctx object with the response from the get request\n\n ' invalid = (set(kwargs) - set(USER_STORY_REQ)) if invalid: raise InvalidParamsException(f'Invalid arguments: {invalid}') (res, header) = self.get(f'/{API_VERSION}/user_story/{story_id}?{urlencode(kwargs)}', self.headers) data = create_data_dict(res, header) return Ctx(**data)
def get_user_story(self, story_id: int=, **kwargs) -> Ctx: ' Get a list or specific UserStory from the API\n\n Parameters\n ----------\n story_id : [:class:`int`]\n The UserStory id to get, note: when you provide an id.\n\n kwargs\n Include filter for the List request\n\n Returns\n -------\n :class:`Ctx`\n Ctx object with the response from the get request\n\n ' invalid = (set(kwargs) - set(USER_STORY_REQ)) if invalid: raise InvalidParamsException(f'Invalid arguments: {invalid}') (res, header) = self.get(f'/{API_VERSION}/user_story/{story_id}?{urlencode(kwargs)}', self.headers) data = create_data_dict(res, header) return Ctx(**data)<|docstring|>Get a list or specific UserStory from the API Parameters ---------- story_id : [:class:`int`] The UserStory id to get, note: when you provide an id. kwargs Include filter for the List request Returns ------- :class:`Ctx` Ctx object with the response from the get request<|endoftext|>
4deed6d3623bfb06808d49936a724e26239a5ed9877b0604d4157671a9b58e4e
def create_user_story(self, user_id: int, anime_id: int, status: int, **kwargs) -> Ctx: " This will create a UserStory based on the given parameters.\n\n Parameters\n ----------\n user_id : :class:`int`\n The User ID for the UserStory's bind.\n\n anime_id : :class:`int`\n The UserStory's Anime ID.\n\n status : :class:`int`\n The UserStory's watching status.\n\n kwargs : Optional\n These are the optional parameters.\n\n current_episode (int) -> The current watching progress. It must be less than\n the Anime's episode_count value, when you provide a status equal to 1 or 2 this field is auto-calculated.\n\n current_episode_ticks (int) -> The current episode watching time in milliseconds.\n\n Returns\n -------\n :class:`Ctx`\n The response as Ctx object\n " invalid = (set(kwargs) - {'current_episode', 'current_episode_ticks'}) if invalid: raise InvalidParamsException(f'Invalid parameters given: {invalid}') udata = {'user_id': user_id, 'anime_id': anime_id, 'status': status} udata.update(kwargs) (res, header) = self.post(url=f'/{API_VERSION}/user_story/', headers=self.headers, data=udata) data = create_data_dict(res, header) return Ctx(**data)
This will create a UserStory based on the given parameters. Parameters ---------- user_id : :class:`int` The User ID for the UserStory's bind. anime_id : :class:`int` The UserStory's Anime ID. status : :class:`int` The UserStory's watching status. kwargs : Optional These are the optional parameters. current_episode (int) -> The current watching progress. It must be less than the Anime's episode_count value, when you provide a status equal to 1 or 2 this field is auto-calculated. current_episode_ticks (int) -> The current episode watching time in milliseconds. Returns ------- :class:`Ctx` The response as Ctx object
wrapper.py
create_user_story
exersalza/AniAPI-pywrapper
0
python
def create_user_story(self, user_id: int, anime_id: int, status: int, **kwargs) -> Ctx: " This will create a UserStory based on the given parameters.\n\n Parameters\n ----------\n user_id : :class:`int`\n The User ID for the UserStory's bind.\n\n anime_id : :class:`int`\n The UserStory's Anime ID.\n\n status : :class:`int`\n The UserStory's watching status.\n\n kwargs : Optional\n These are the optional parameters.\n\n current_episode (int) -> The current watching progress. It must be less than\n the Anime's episode_count value, when you provide a status equal to 1 or 2 this field is auto-calculated.\n\n current_episode_ticks (int) -> The current episode watching time in milliseconds.\n\n Returns\n -------\n :class:`Ctx`\n The response as Ctx object\n " invalid = (set(kwargs) - {'current_episode', 'current_episode_ticks'}) if invalid: raise InvalidParamsException(f'Invalid parameters given: {invalid}') udata = {'user_id': user_id, 'anime_id': anime_id, 'status': status} udata.update(kwargs) (res, header) = self.post(url=f'/{API_VERSION}/user_story/', headers=self.headers, data=udata) data = create_data_dict(res, header) return Ctx(**data)
def create_user_story(self, user_id: int, anime_id: int, status: int, **kwargs) -> Ctx: " This will create a UserStory based on the given parameters.\n\n Parameters\n ----------\n user_id : :class:`int`\n The User ID for the UserStory's bind.\n\n anime_id : :class:`int`\n The UserStory's Anime ID.\n\n status : :class:`int`\n The UserStory's watching status.\n\n kwargs : Optional\n These are the optional parameters.\n\n current_episode (int) -> The current watching progress. It must be less than\n the Anime's episode_count value, when you provide a status equal to 1 or 2 this field is auto-calculated.\n\n current_episode_ticks (int) -> The current episode watching time in milliseconds.\n\n Returns\n -------\n :class:`Ctx`\n The response as Ctx object\n " invalid = (set(kwargs) - {'current_episode', 'current_episode_ticks'}) if invalid: raise InvalidParamsException(f'Invalid parameters given: {invalid}') udata = {'user_id': user_id, 'anime_id': anime_id, 'status': status} udata.update(kwargs) (res, header) = self.post(url=f'/{API_VERSION}/user_story/', headers=self.headers, data=udata) data = create_data_dict(res, header) return Ctx(**data)<|docstring|>This will create a UserStory based on the given parameters. Parameters ---------- user_id : :class:`int` The User ID for the UserStory's bind. anime_id : :class:`int` The UserStory's Anime ID. status : :class:`int` The UserStory's watching status. kwargs : Optional These are the optional parameters. current_episode (int) -> The current watching progress. It must be less than the Anime's episode_count value, when you provide a status equal to 1 or 2 this field is auto-calculated. current_episode_ticks (int) -> The current episode watching time in milliseconds. Returns ------- :class:`Ctx` The response as Ctx object<|endoftext|>
2adfbe9ef9fde523064db90aa217331a4ef41a9234c5017d5fe3139dacab331d
def update_user_story(self, story_id: int, user_id: int, anime_id: int, status: int, ce: int, cet: int) -> Ctx: "\n Update a UserStory\n\n Parameters\n ----------\n story_id : [:class:`int`]\n -> id, on the docs.\n The UserStory's unique identifier.\n\n user_id : [:class:`int`]\n -> user_id, on the docs.\n The userid that is related to the UserStory.\n\n anime_id : [:class:`int`]\n -> anime_id, on the docs\n The UserStory's anime id.\n\n status : [:class:`int`]\n -> status, on the docs\n The watching status of the UserStory.\n\n ce : [:class:`int`]\n -> current_episode, on the docs\n The current watching progress on the UserStory, note: the watching progress must be less or equal to the\n anime's `episode_count`.\n\n cet : [:class:`int`]\n -> current_episode_ticks, on the docs\n The UserStory's `current_episode` watching time in milliseconds.\n\n Returns\n -------\n :class:`Ctx`\n A response from the API to prove if it works or not.\n " udata = {'id': story_id, 'user_id': user_id, 'anime_id': anime_id, 'status': status, 'current_episode': ce, 'current_episode_ticks': cet} (res, header) = self.post(url=f'/{API_VERSION}/user_story', headers=self.headers, data=udata) data = create_data_dict(res, header) return Ctx(**data)
Update a UserStory Parameters ---------- story_id : [:class:`int`] -> id, on the docs. The UserStory's unique identifier. user_id : [:class:`int`] -> user_id, on the docs. The userid that is related to the UserStory. anime_id : [:class:`int`] -> anime_id, on the docs The UserStory's anime id. status : [:class:`int`] -> status, on the docs The watching status of the UserStory. ce : [:class:`int`] -> current_episode, on the docs The current watching progress on the UserStory, note: the watching progress must be less or equal to the anime's `episode_count`. cet : [:class:`int`] -> current_episode_ticks, on the docs The UserStory's `current_episode` watching time in milliseconds. Returns ------- :class:`Ctx` A response from the API to prove if it works or not.
wrapper.py
update_user_story
exersalza/AniAPI-pywrapper
0
python
def update_user_story(self, story_id: int, user_id: int, anime_id: int, status: int, ce: int, cet: int) -> Ctx: "\n Update a UserStory\n\n Parameters\n ----------\n story_id : [:class:`int`]\n -> id, on the docs.\n The UserStory's unique identifier.\n\n user_id : [:class:`int`]\n -> user_id, on the docs.\n The userid that is related to the UserStory.\n\n anime_id : [:class:`int`]\n -> anime_id, on the docs\n The UserStory's anime id.\n\n status : [:class:`int`]\n -> status, on the docs\n The watching status of the UserStory.\n\n ce : [:class:`int`]\n -> current_episode, on the docs\n The current watching progress on the UserStory, note: the watching progress must be less or equal to the\n anime's `episode_count`.\n\n cet : [:class:`int`]\n -> current_episode_ticks, on the docs\n The UserStory's `current_episode` watching time in milliseconds.\n\n Returns\n -------\n :class:`Ctx`\n A response from the API to prove if it works or not.\n " udata = {'id': story_id, 'user_id': user_id, 'anime_id': anime_id, 'status': status, 'current_episode': ce, 'current_episode_ticks': cet} (res, header) = self.post(url=f'/{API_VERSION}/user_story', headers=self.headers, data=udata) data = create_data_dict(res, header) return Ctx(**data)
def update_user_story(self, story_id: int, user_id: int, anime_id: int, status: int, ce: int, cet: int) -> Ctx: "\n Update a UserStory\n\n Parameters\n ----------\n story_id : [:class:`int`]\n -> id, on the docs.\n The UserStory's unique identifier.\n\n user_id : [:class:`int`]\n -> user_id, on the docs.\n The userid that is related to the UserStory.\n\n anime_id : [:class:`int`]\n -> anime_id, on the docs\n The UserStory's anime id.\n\n status : [:class:`int`]\n -> status, on the docs\n The watching status of the UserStory.\n\n ce : [:class:`int`]\n -> current_episode, on the docs\n The current watching progress on the UserStory, note: the watching progress must be less or equal to the\n anime's `episode_count`.\n\n cet : [:class:`int`]\n -> current_episode_ticks, on the docs\n The UserStory's `current_episode` watching time in milliseconds.\n\n Returns\n -------\n :class:`Ctx`\n A response from the API to prove if it works or not.\n " udata = {'id': story_id, 'user_id': user_id, 'anime_id': anime_id, 'status': status, 'current_episode': ce, 'current_episode_ticks': cet} (res, header) = self.post(url=f'/{API_VERSION}/user_story', headers=self.headers, data=udata) data = create_data_dict(res, header) return Ctx(**data)<|docstring|>Update a UserStory Parameters ---------- story_id : [:class:`int`] -> id, on the docs. The UserStory's unique identifier. user_id : [:class:`int`] -> user_id, on the docs. The userid that is related to the UserStory. anime_id : [:class:`int`] -> anime_id, on the docs The UserStory's anime id. status : [:class:`int`] -> status, on the docs The watching status of the UserStory. ce : [:class:`int`] -> current_episode, on the docs The current watching progress on the UserStory, note: the watching progress must be less or equal to the anime's `episode_count`. cet : [:class:`int`] -> current_episode_ticks, on the docs The UserStory's `current_episode` watching time in milliseconds. Returns ------- :class:`Ctx` A response from the API to prove if it works or not.<|endoftext|>
adf9581a6937a73e6fc09d8f44574612029b573d44d57c6343e86a8773170f07
def delete_user_story(self, _id: int) -> Ctx: '\n Deletes a UserStory on the provided unique identifier.\n\n Parameters\n ----------\n _id : [:class:`int`]\n The id from the UserStory that wanted to be deleted.\n\n Returns\n -------\n :class:`Ctx`\n Context obj with the response inside it\n\n Notes\n -----\n You should only use the endpoint when the User has 0 linked trackers, otherwise it will get re-imported.\n\n ' (res, header) = self.delete(url=f'/{API_VERSION}/user_story/{_id}', headers=self.headers) data = create_data_dict(res, header) return Ctx(**data)
Deletes a UserStory on the provided unique identifier. Parameters ---------- _id : [:class:`int`] The id from the UserStory that wanted to be deleted. Returns ------- :class:`Ctx` Context obj with the response inside it Notes ----- You should only use the endpoint when the User has 0 linked trackers, otherwise it will get re-imported.
wrapper.py
delete_user_story
exersalza/AniAPI-pywrapper
0
python
def delete_user_story(self, _id: int) -> Ctx: '\n Deletes a UserStory on the provided unique identifier.\n\n Parameters\n ----------\n _id : [:class:`int`]\n The id from the UserStory that wanted to be deleted.\n\n Returns\n -------\n :class:`Ctx`\n Context obj with the response inside it\n\n Notes\n -----\n You should only use the endpoint when the User has 0 linked trackers, otherwise it will get re-imported.\n\n ' (res, header) = self.delete(url=f'/{API_VERSION}/user_story/{_id}', headers=self.headers) data = create_data_dict(res, header) return Ctx(**data)
def delete_user_story(self, _id: int) -> Ctx: '\n Deletes a UserStory on the provided unique identifier.\n\n Parameters\n ----------\n _id : [:class:`int`]\n The id from the UserStory that wanted to be deleted.\n\n Returns\n -------\n :class:`Ctx`\n Context obj with the response inside it\n\n Notes\n -----\n You should only use the endpoint when the User has 0 linked trackers, otherwise it will get re-imported.\n\n ' (res, header) = self.delete(url=f'/{API_VERSION}/user_story/{_id}', headers=self.headers) data = create_data_dict(res, header) return Ctx(**data)<|docstring|>Deletes a UserStory on the provided unique identifier. Parameters ---------- _id : [:class:`int`] The id from the UserStory that wanted to be deleted. Returns ------- :class:`Ctx` Context obj with the response inside it Notes ----- You should only use the endpoint when the User has 0 linked trackers, otherwise it will get re-imported.<|endoftext|>
49630a6f8923094c7a2ffa89db0ce92de32a075c9bbff5b3916b219bb794bfcf
def get_user(self, user_id: int='', **kwargs) -> Ctx: "\n Get user list of users or when you provide a user_id to get a specific user\n\n Parameters\n ----------\n user_id : [:class:`int`]\n A UserID for specified search of user.\n\n kwargs\n Bring up pagination or currently two arguments for filtering:\n\n username: is not case-sensitive, it searches for substrings in the username.\n email: it's the same as username.\n\n Returns\n -------\n :class:`ctx`\n Context object with the query results\n " invalid = (set(kwargs) - set(USER_REQ)) if invalid: raise InvalidParamsException(f'Invalid parameters: {invalid}') data = self.get_requests(user_id, 'user', kwargs, UserSObj) return Ctx(**data)
Get user list of users or when you provide a user_id to get a specific user Parameters ---------- user_id : [:class:`int`] A UserID for specified search of user. kwargs Bring up pagination or currently two arguments for filtering: username: is not case-sensitive, it searches for substrings in the username. email: it's the same as username. Returns ------- :class:`ctx` Context object with the query results
wrapper.py
get_user
exersalza/AniAPI-pywrapper
0
python
def get_user(self, user_id: int=, **kwargs) -> Ctx: "\n Get user list of users or when you provide a user_id to get a specific user\n\n Parameters\n ----------\n user_id : [:class:`int`]\n A UserID for specified search of user.\n\n kwargs\n Bring up pagination or currently two arguments for filtering:\n\n username: is not case-sensitive, it searches for substrings in the username.\n email: it's the same as username.\n\n Returns\n -------\n :class:`ctx`\n Context object with the query results\n " invalid = (set(kwargs) - set(USER_REQ)) if invalid: raise InvalidParamsException(f'Invalid parameters: {invalid}') data = self.get_requests(user_id, 'user', kwargs, UserSObj) return Ctx(**data)
def get_user(self, user_id: int=, **kwargs) -> Ctx: "\n Get user list of users or when you provide a user_id to get a specific user\n\n Parameters\n ----------\n user_id : [:class:`int`]\n A UserID for specified search of user.\n\n kwargs\n Bring up pagination or currently two arguments for filtering:\n\n username: is not case-sensitive, it searches for substrings in the username.\n email: it's the same as username.\n\n Returns\n -------\n :class:`ctx`\n Context object with the query results\n " invalid = (set(kwargs) - set(USER_REQ)) if invalid: raise InvalidParamsException(f'Invalid parameters: {invalid}') data = self.get_requests(user_id, 'user', kwargs, UserSObj) return Ctx(**data)<|docstring|>Get user list of users or when you provide a user_id to get a specific user Parameters ---------- user_id : [:class:`int`] A UserID for specified search of user. kwargs Bring up pagination or currently two arguments for filtering: username: is not case-sensitive, it searches for substrings in the username. email: it's the same as username. Returns ------- :class:`ctx` Context object with the query results<|endoftext|>
eb69310d0f2d492c859e1094690988cfcb8947c5c5325087ebc3fea0bb3682af
def update_user(self, user_id: int, gender: int, **kwargs) -> Ctx: " This method will update user information, please read the notes.\n\n Parameters\n ----------\n user_id : [:class:`int`]\n The unique identifier for the user that you want to edit.\n\n gender : [:class:`int`]\n The gender of the user that will be changed or not.\n\n kwargs\n Other settings to change on the user's acc, lists can be found at `utils.flags.UPDATE_USER_REQ` or\n at the docs at: https://aniapi.com/docs/resources/user#parameters-2\n\n Returns\n -------\n :class:`Ctx`\n A Ctx object with the return object\n\n Notes\n -----\n **It is NOT Recommended that you implement such function or form, when you want to do so, please redirect the\n User to the website. More information about it on the docs:\n https://aniapi.com/docs/resource/user#update-an-user**\n " invalid = (set(kwargs) - set(UPDATE_USER_REQ)) if invalid: raise InvalidParamsException(f'Invalid parameters: {invalid}') (res, header) = self.post(f'/{API_VERSION}/user', headers=self.headers, data={'id': user_id, 'gender': gender, **kwargs}) data = create_data_dict(res, header) return Ctx(**data)
This method will update user information, please read the notes. Parameters ---------- user_id : [:class:`int`] The unique identifier for the user that you want to edit. gender : [:class:`int`] The gender of the user that will be changed or not. kwargs Other settings to change on the user's acc, lists can be found at `utils.flags.UPDATE_USER_REQ` or at the docs at: https://aniapi.com/docs/resources/user#parameters-2 Returns ------- :class:`Ctx` A Ctx object with the return object Notes ----- **It is NOT Recommended that you implement such function or form, when you want to do so, please redirect the User to the website. More information about it on the docs: https://aniapi.com/docs/resource/user#update-an-user**
wrapper.py
update_user
exersalza/AniAPI-pywrapper
0
python
def update_user(self, user_id: int, gender: int, **kwargs) -> Ctx: " This method will update user information, please read the notes.\n\n Parameters\n ----------\n user_id : [:class:`int`]\n The unique identifier for the user that you want to edit.\n\n gender : [:class:`int`]\n The gender of the user that will be changed or not.\n\n kwargs\n Other settings to change on the user's acc, lists can be found at `utils.flags.UPDATE_USER_REQ` or\n at the docs at: https://aniapi.com/docs/resources/user#parameters-2\n\n Returns\n -------\n :class:`Ctx`\n A Ctx object with the return object\n\n Notes\n -----\n **It is NOT Recommended that you implement such function or form, when you want to do so, please redirect the\n User to the website. More information about it on the docs:\n https://aniapi.com/docs/resource/user#update-an-user**\n " invalid = (set(kwargs) - set(UPDATE_USER_REQ)) if invalid: raise InvalidParamsException(f'Invalid parameters: {invalid}') (res, header) = self.post(f'/{API_VERSION}/user', headers=self.headers, data={'id': user_id, 'gender': gender, **kwargs}) data = create_data_dict(res, header) return Ctx(**data)
def update_user(self, user_id: int, gender: int, **kwargs) -> Ctx: " This method will update user information, please read the notes.\n\n Parameters\n ----------\n user_id : [:class:`int`]\n The unique identifier for the user that you want to edit.\n\n gender : [:class:`int`]\n The gender of the user that will be changed or not.\n\n kwargs\n Other settings to change on the user's acc, lists can be found at `utils.flags.UPDATE_USER_REQ` or\n at the docs at: https://aniapi.com/docs/resources/user#parameters-2\n\n Returns\n -------\n :class:`Ctx`\n A Ctx object with the return object\n\n Notes\n -----\n **It is NOT Recommended that you implement such function or form, when you want to do so, please redirect the\n User to the website. More information about it on the docs:\n https://aniapi.com/docs/resource/user#update-an-user**\n " invalid = (set(kwargs) - set(UPDATE_USER_REQ)) if invalid: raise InvalidParamsException(f'Invalid parameters: {invalid}') (res, header) = self.post(f'/{API_VERSION}/user', headers=self.headers, data={'id': user_id, 'gender': gender, **kwargs}) data = create_data_dict(res, header) return Ctx(**data)<|docstring|>This method will update user information, please read the notes. Parameters ---------- user_id : [:class:`int`] The unique identifier for the user that you want to edit. gender : [:class:`int`] The gender of the user that will be changed or not. kwargs Other settings to change on the user's acc, lists can be found at `utils.flags.UPDATE_USER_REQ` or at the docs at: https://aniapi.com/docs/resources/user#parameters-2 Returns ------- :class:`Ctx` A Ctx object with the return object Notes ----- **It is NOT Recommended that you implement such function or form, when you want to do so, please redirect the User to the website. More information about it on the docs: https://aniapi.com/docs/resource/user#update-an-user**<|endoftext|>
14d0e6894658f6aa8a82d81cd101c3a4a83707ff6e392a31042787e8eccf8a32
def delete_user(self, _id: int) -> Ctx: '\n This method will delete the user with the given id.\n Parameters\n ----------\n _id : [:class:`int`]\n The unique identifier for the user that you want to delete.\n\n Returns\n -------\n :class:`Ctx`\n A Ctx object with the return object\n ' (res, header) = self.delete(f'/{API_VERSION}/user/{_id}', headers=self.headers) data = create_data_dict(res, header) return Ctx(**data)
This method will delete the user with the given id. Parameters ---------- _id : [:class:`int`] The unique identifier for the user that you want to delete. Returns ------- :class:`Ctx` A Ctx object with the return object
wrapper.py
delete_user
exersalza/AniAPI-pywrapper
0
python
def delete_user(self, _id: int) -> Ctx: '\n This method will delete the user with the given id.\n Parameters\n ----------\n _id : [:class:`int`]\n The unique identifier for the user that you want to delete.\n\n Returns\n -------\n :class:`Ctx`\n A Ctx object with the return object\n ' (res, header) = self.delete(f'/{API_VERSION}/user/{_id}', headers=self.headers) data = create_data_dict(res, header) return Ctx(**data)
def delete_user(self, _id: int) -> Ctx: '\n This method will delete the user with the given id.\n Parameters\n ----------\n _id : [:class:`int`]\n The unique identifier for the user that you want to delete.\n\n Returns\n -------\n :class:`Ctx`\n A Ctx object with the return object\n ' (res, header) = self.delete(f'/{API_VERSION}/user/{_id}', headers=self.headers) data = create_data_dict(res, header) return Ctx(**data)<|docstring|>This method will delete the user with the given id. Parameters ---------- _id : [:class:`int`] The unique identifier for the user that you want to delete. Returns ------- :class:`Ctx` A Ctx object with the return object<|endoftext|>
ae74379eacedc79671e3b0ff44cf5d8316d7da3d05120a2171b651c7c3729341
def auth_me(self, jwt: str) -> Ctx: '\n This method will test the given token and return the user\n information (if it exists and its valid).\n\n Parameters\n ----------\n jwt : :class:`str`\n The JWT token to test.\n\n Returns\n -------\n :class:`Ctx`\n A context object with the response. If the token is invalid you\n will get a status code of 401.\n ' (res, header) = self.get(f'/{API_VERSION}/auth/me', headers=default_header(jwt)) data = create_data_dict(res, header) if (data.get('status_code', 404) != 200): return Ctx(**data) data['data'] = UserBObj(**data.get('data')) return Ctx(**data)
This method will test the given token and return the user information (if it exists and its valid). Parameters ---------- jwt : :class:`str` The JWT token to test. Returns ------- :class:`Ctx` A context object with the response. If the token is invalid you will get a status code of 401.
wrapper.py
auth_me
exersalza/AniAPI-pywrapper
0
python
def auth_me(self, jwt: str) -> Ctx: '\n This method will test the given token and return the user\n information (if it exists and its valid).\n\n Parameters\n ----------\n jwt : :class:`str`\n The JWT token to test.\n\n Returns\n -------\n :class:`Ctx`\n A context object with the response. If the token is invalid you\n will get a status code of 401.\n ' (res, header) = self.get(f'/{API_VERSION}/auth/me', headers=default_header(jwt)) data = create_data_dict(res, header) if (data.get('status_code', 404) != 200): return Ctx(**data) data['data'] = UserBObj(**data.get('data')) return Ctx(**data)
def auth_me(self, jwt: str) -> Ctx: '\n This method will test the given token and return the user\n information (if it exists and its valid).\n\n Parameters\n ----------\n jwt : :class:`str`\n The JWT token to test.\n\n Returns\n -------\n :class:`Ctx`\n A context object with the response. If the token is invalid you\n will get a status code of 401.\n ' (res, header) = self.get(f'/{API_VERSION}/auth/me', headers=default_header(jwt)) data = create_data_dict(res, header) if (data.get('status_code', 404) != 200): return Ctx(**data) data['data'] = UserBObj(**data.get('data')) return Ctx(**data)<|docstring|>This method will test the given token and return the user information (if it exists and its valid). Parameters ---------- jwt : :class:`str` The JWT token to test. Returns ------- :class:`Ctx` A context object with the response. If the token is invalid you will get a status code of 401.<|endoftext|>
868aa9cdf2291379750e9354134860ec4462ff278adb73bf5d5df38ad7406d61
@property @decorators.Cache def misc_web_contents_backend(self): 'Access to chrome://oobe/login page.' return misc_web_contents_backend.MiscWebContentsBackend(self)
Access to chrome://oobe/login page.
telemetry/telemetry/internal/backends/chrome/cros_browser_backend.py
misc_web_contents_backend
Murka96/catapult
0
python
@property @decorators.Cache def misc_web_contents_backend(self): return misc_web_contents_backend.MiscWebContentsBackend(self)
@property @decorators.Cache def misc_web_contents_backend(self): return misc_web_contents_backend.MiscWebContentsBackend(self)<|docstring|>Access to chrome://oobe/login page.<|endoftext|>
cf2f64ddc8220571815eab43db085c5d7472570d4837380b0a8f459a4c3c1769
def _GetLoginStatus(self): 'Returns login status. If logged in, empty string is returned.' status = '' if (not self._IsCryptohomeMounted()): status += 'Cryptohome not mounted. ' if (not self.HasDevToolsConnection()): status += "Browser didn't launch. " if self.oobe_exists: status += 'OOBE not dismissed.' return status
Returns login status. If logged in, empty string is returned.
telemetry/telemetry/internal/backends/chrome/cros_browser_backend.py
_GetLoginStatus
Murka96/catapult
0
python
def _GetLoginStatus(self): status = if (not self._IsCryptohomeMounted()): status += 'Cryptohome not mounted. ' if (not self.HasDevToolsConnection()): status += "Browser didn't launch. " if self.oobe_exists: status += 'OOBE not dismissed.' return status
def _GetLoginStatus(self): status = if (not self._IsCryptohomeMounted()): status += 'Cryptohome not mounted. ' if (not self.HasDevToolsConnection()): status += "Browser didn't launch. " if self.oobe_exists: status += 'OOBE not dismissed.' return status<|docstring|>Returns login status. If logged in, empty string is returned.<|endoftext|>
7532b615df20506e1dd8bc4b344fd36808fa39077207960c97800e51bb5e45bc
def _IsLoggedIn(self): 'Returns True if cryptohome has mounted, the browser is\n responsive to devtools requests, and the oobe has been dismissed.' return (not self._GetLoginStatus())
Returns True if cryptohome has mounted, the browser is responsive to devtools requests, and the oobe has been dismissed.
telemetry/telemetry/internal/backends/chrome/cros_browser_backend.py
_IsLoggedIn
Murka96/catapult
0
python
def _IsLoggedIn(self): 'Returns True if cryptohome has mounted, the browser is\n responsive to devtools requests, and the oobe has been dismissed.' return (not self._GetLoginStatus())
def _IsLoggedIn(self): 'Returns True if cryptohome has mounted, the browser is\n responsive to devtools requests, and the oobe has been dismissed.' return (not self._GetLoginStatus())<|docstring|>Returns True if cryptohome has mounted, the browser is responsive to devtools requests, and the oobe has been dismissed.<|endoftext|>
af367f9a508893455ff4410f6af4ce3644f30f911067b2e25e7a428a1f8b7828
def _SymbolizeMinidump(self, minidump_path): 'Symbolizes the given minidump.\n\n Args:\n minidump_path: the path to the minidump to symbolize\n\n Return:\n A tuple (valid, output). |valid| is True if the minidump was symbolized,\n otherwise False. |output| contains an error message if |valid| is False,\n otherwise it contains the symbolized minidump.\n ' stack = self._GetStackFromMinidump(minidump_path) if (not stack): error_message = 'Failed to symbolize minidump.' return (False, error_message) self._symbolized_minidump_paths.add(minidump_path) return (True, stack)
Symbolizes the given minidump. Args: minidump_path: the path to the minidump to symbolize Return: A tuple (valid, output). |valid| is True if the minidump was symbolized, otherwise False. |output| contains an error message if |valid| is False, otherwise it contains the symbolized minidump.
telemetry/telemetry/internal/backends/chrome/cros_browser_backend.py
_SymbolizeMinidump
Murka96/catapult
0
python
def _SymbolizeMinidump(self, minidump_path): 'Symbolizes the given minidump.\n\n Args:\n minidump_path: the path to the minidump to symbolize\n\n Return:\n A tuple (valid, output). |valid| is True if the minidump was symbolized,\n otherwise False. |output| contains an error message if |valid| is False,\n otherwise it contains the symbolized minidump.\n ' stack = self._GetStackFromMinidump(minidump_path) if (not stack): error_message = 'Failed to symbolize minidump.' return (False, error_message) self._symbolized_minidump_paths.add(minidump_path) return (True, stack)
def _SymbolizeMinidump(self, minidump_path): 'Symbolizes the given minidump.\n\n Args:\n minidump_path: the path to the minidump to symbolize\n\n Return:\n A tuple (valid, output). |valid| is True if the minidump was symbolized,\n otherwise False. |output| contains an error message if |valid| is False,\n otherwise it contains the symbolized minidump.\n ' stack = self._GetStackFromMinidump(minidump_path) if (not stack): error_message = 'Failed to symbolize minidump.' return (False, error_message) self._symbolized_minidump_paths.add(minidump_path) return (True, stack)<|docstring|>Symbolizes the given minidump. Args: minidump_path: the path to the minidump to symbolize Return: A tuple (valid, output). |valid| is True if the minidump was symbolized, otherwise False. |output| contains an error message if |valid| is False, otherwise it contains the symbolized minidump.<|endoftext|>
7b99cd64d7964ade87464b70d7470cec955e012e2aaedf5ceb9fd15997b9036a
def _GetStackFromMinidump(self, minidump): 'Gets the stack trace from the given minidump.\n\n Args:\n minidump: the path to the minidump on disk\n\n Returns:\n None if the stack could not be retrieved for some reason, otherwise a\n string containing the stack trace.\n ' dump_symbolizer = cros_minidump_symbolizer.CrOSMinidumpSymbolizer(self._dump_finder, self._build_dir) return dump_symbolizer.SymbolizeMinidump(minidump)
Gets the stack trace from the given minidump. Args: minidump: the path to the minidump on disk Returns: None if the stack could not be retrieved for some reason, otherwise a string containing the stack trace.
telemetry/telemetry/internal/backends/chrome/cros_browser_backend.py
_GetStackFromMinidump
Murka96/catapult
0
python
def _GetStackFromMinidump(self, minidump): 'Gets the stack trace from the given minidump.\n\n Args:\n minidump: the path to the minidump on disk\n\n Returns:\n None if the stack could not be retrieved for some reason, otherwise a\n string containing the stack trace.\n ' dump_symbolizer = cros_minidump_symbolizer.CrOSMinidumpSymbolizer(self._dump_finder, self._build_dir) return dump_symbolizer.SymbolizeMinidump(minidump)
def _GetStackFromMinidump(self, minidump): 'Gets the stack trace from the given minidump.\n\n Args:\n minidump: the path to the minidump on disk\n\n Returns:\n None if the stack could not be retrieved for some reason, otherwise a\n string containing the stack trace.\n ' dump_symbolizer = cros_minidump_symbolizer.CrOSMinidumpSymbolizer(self._dump_finder, self._build_dir) return dump_symbolizer.SymbolizeMinidump(minidump)<|docstring|>Gets the stack trace from the given minidump. Args: minidump: the path to the minidump on disk Returns: None if the stack could not be retrieved for some reason, otherwise a string containing the stack trace.<|endoftext|>
678b11c2e62962269a3fc028c75b78be8eb56a16b744129ff5798ee201ff0bba
def __init__(self): '\n Newton-Raphson algorithm.\n ' OptSolver.__init__(self) self.parameters = OptSolverNR.parameters.copy() self.linsolver = None self.problem = None
Newton-Raphson algorithm.
optalg/opt_solver/nr.py
__init__
ttinoco/OPTALG
10
python
def __init__(self): '\n \n ' OptSolver.__init__(self) self.parameters = OptSolverNR.parameters.copy() self.linsolver = None self.problem = None
def __init__(self): '\n \n ' OptSolver.__init__(self) self.parameters = OptSolverNR.parameters.copy() self.linsolver = None self.problem = None<|docstring|>Newton-Raphson algorithm.<|endoftext|>
edc5e4325e9303e4b8f56e46b3aa5d3cabf8fc1b20d1a124035edca833791f9b
def query(client, wid, query_contents, dialect='postgresql', limit=None): 'Query metadata.' if (limit is None): kwargs = dict(per_page=100) limit = sys.maxsize else: kwargs = dict(per_page=min(limit, 100)) query_obj = {'dialect': dialect, 'query': query_contents} if (wid is None): query_details = client.public_query_create(query_obj, **kwargs) details_func = client.public_query_details else: query_details = client.workspace_query_create(wid, query_obj, **kwargs) details_func = functools.partial(client.workspace_query_details, wid) results = query_details.results if (not results): return ([], 0) if (len(results) > limit): results = results[:limit] while ((len(results) < limit) and (len(results) < query_details.total)): kwargs['page'] = (kwargs.get('page', 1) + 1) query_details = details_func(query_details.id, **kwargs) results.extend(query_details.results) return (results, query_details.total)
Query metadata.
quetzal/client/helpers/query.py
query
dojeda/quetzal-client
2
python
def query(client, wid, query_contents, dialect='postgresql', limit=None): if (limit is None): kwargs = dict(per_page=100) limit = sys.maxsize else: kwargs = dict(per_page=min(limit, 100)) query_obj = {'dialect': dialect, 'query': query_contents} if (wid is None): query_details = client.public_query_create(query_obj, **kwargs) details_func = client.public_query_details else: query_details = client.workspace_query_create(wid, query_obj, **kwargs) details_func = functools.partial(client.workspace_query_details, wid) results = query_details.results if (not results): return ([], 0) if (len(results) > limit): results = results[:limit] while ((len(results) < limit) and (len(results) < query_details.total)): kwargs['page'] = (kwargs.get('page', 1) + 1) query_details = details_func(query_details.id, **kwargs) results.extend(query_details.results) return (results, query_details.total)
def query(client, wid, query_contents, dialect='postgresql', limit=None): if (limit is None): kwargs = dict(per_page=100) limit = sys.maxsize else: kwargs = dict(per_page=min(limit, 100)) query_obj = {'dialect': dialect, 'query': query_contents} if (wid is None): query_details = client.public_query_create(query_obj, **kwargs) details_func = client.public_query_details else: query_details = client.workspace_query_create(wid, query_obj, **kwargs) details_func = functools.partial(client.workspace_query_details, wid) results = query_details.results if (not results): return ([], 0) if (len(results) > limit): results = results[:limit] while ((len(results) < limit) and (len(results) < query_details.total)): kwargs['page'] = (kwargs.get('page', 1) + 1) query_details = details_func(query_details.id, **kwargs) results.extend(query_details.results) return (results, query_details.total)<|docstring|>Query metadata.<|endoftext|>
23e6a7fecb3c9ea6de257d5e051ad3fd14f1ea527e2ab229e41e5619fb602721
def __init__(self): '\n Creates the himesis graph representing the AToM3 model HNeg_CountryCity_CompleteLHS.\n ' self.is_compiled = True super(HNeg_CountryCity_CompleteLHS, self).__init__(name='HNeg_CountryCity_CompleteLHS', num_nodes=0, edges=[]) self['mm__'] = [] self['MT_constraint__'] = "#===============================================================================\n# This code is executed after the nodes in the LHS have been matched.\n# You can access a matched node labelled n by: PreNode('n').\n# To access attribute x of node n, use: PreNode('n')['x'].\n# The given constraint must evaluate to a boolean expression:\n# returning True enables the rule to be applied,\n# returning False forbids the rule from being applied.\n#===============================================================================\n\nreturn True\n" self['name'] = '' self['GUID__'] = uuid.uuid3(uuid.NAMESPACE_DNS, 'Neg_CountryCity') self.add_node() self.vs[0]['MT_pre__attr1'] = "\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node's attribute value by: attr_value.\n# You can access any attribute x of this node by: this['x'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n" self.vs[0]['MT_label__'] = '1' self.vs[0]['MT_dirty__'] = False self.vs[0]['mm__'] = 'MT_pre__Country' self.vs[0]['GUID__'] = uuid.uuid3(uuid.NAMESPACE_DNS, '') self.add_node() self.vs[1]['MT_pre__attr1'] = "\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node's attribute value by: attr_value.\n# You can access any attribute x of this node by: this['x'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n" self.vs[1]['MT_label__'] = '2' self.vs[1]['MT_dirty__'] = False self.vs[1]['mm__'] = 'MT_pre__City' self.vs[1]['GUID__'] = uuid.uuid3(uuid.NAMESPACE_DNS, '') self.add_node() self.vs[2]['MT_subtypeMatching__'] = False self.vs[2]['MT_pre__attr1'] = "\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node's attribute value by: attr_value.\n# You can access any attribute x of this node by: this['x'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n" self.vs[2]['MT_label__'] = '3' self.vs[2]['MT_subtypes__'] = [] self.vs[2]['MT_dirty__'] = False self.vs[2]['mm__'] = 'MT_pre__Association' self.vs[2]['GUID__'] = uuid.uuid3(uuid.NAMESPACE_DNS, '') self.add_node() self.vs[3]['MT_subtypeMatching__'] = False self.vs[3]['MT_pre__attr1'] = '\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn attr_value == "cities"\n' self.vs[3]['MT_label__'] = '4' self.vs[3]['MT_subtypes__'] = [] self.vs[3]['MT_dirty__'] = False self.vs[3]['mm__'] = 'MT_pre__directLink_S' self.vs[3]['GUID__'] = uuid.uuid3(uuid.NAMESPACE_DNS, 'assoc3') self.add_edges([(0, 3), (3, 1)]) self['equations'] = []
Creates the himesis graph representing the AToM3 model HNeg_CountryCity_CompleteLHS.
ExFamToPerson/contracts/HNeg_CountryCity_CompleteLHS.py
__init__
levilucio/SyVOLT
3
python
def __init__(self): '\n \n ' self.is_compiled = True super(HNeg_CountryCity_CompleteLHS, self).__init__(name='HNeg_CountryCity_CompleteLHS', num_nodes=0, edges=[]) self['mm__'] = [] self['MT_constraint__'] = "#===============================================================================\n# This code is executed after the nodes in the LHS have been matched.\n# You can access a matched node labelled n by: PreNode('n').\n# To access attribute x of node n, use: PreNode('n')['x'].\n# The given constraint must evaluate to a boolean expression:\n# returning True enables the rule to be applied,\n# returning False forbids the rule from being applied.\n#===============================================================================\n\nreturn True\n" self['name'] = self['GUID__'] = uuid.uuid3(uuid.NAMESPACE_DNS, 'Neg_CountryCity') self.add_node() self.vs[0]['MT_pre__attr1'] = "\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node's attribute value by: attr_value.\n# You can access any attribute x of this node by: this['x'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n" self.vs[0]['MT_label__'] = '1' self.vs[0]['MT_dirty__'] = False self.vs[0]['mm__'] = 'MT_pre__Country' self.vs[0]['GUID__'] = uuid.uuid3(uuid.NAMESPACE_DNS, ) self.add_node() self.vs[1]['MT_pre__attr1'] = "\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node's attribute value by: attr_value.\n# You can access any attribute x of this node by: this['x'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n" self.vs[1]['MT_label__'] = '2' self.vs[1]['MT_dirty__'] = False self.vs[1]['mm__'] = 'MT_pre__City' self.vs[1]['GUID__'] = uuid.uuid3(uuid.NAMESPACE_DNS, ) self.add_node() self.vs[2]['MT_subtypeMatching__'] = False self.vs[2]['MT_pre__attr1'] = "\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node's attribute value by: attr_value.\n# You can access any attribute x of this node by: this['x'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n" self.vs[2]['MT_label__'] = '3' self.vs[2]['MT_subtypes__'] = [] self.vs[2]['MT_dirty__'] = False self.vs[2]['mm__'] = 'MT_pre__Association' self.vs[2]['GUID__'] = uuid.uuid3(uuid.NAMESPACE_DNS, ) self.add_node() self.vs[3]['MT_subtypeMatching__'] = False self.vs[3]['MT_pre__attr1'] = '\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn attr_value == "cities"\n' self.vs[3]['MT_label__'] = '4' self.vs[3]['MT_subtypes__'] = [] self.vs[3]['MT_dirty__'] = False self.vs[3]['mm__'] = 'MT_pre__directLink_S' self.vs[3]['GUID__'] = uuid.uuid3(uuid.NAMESPACE_DNS, 'assoc3') self.add_edges([(0, 3), (3, 1)]) self['equations'] = []
def __init__(self): '\n \n ' self.is_compiled = True super(HNeg_CountryCity_CompleteLHS, self).__init__(name='HNeg_CountryCity_CompleteLHS', num_nodes=0, edges=[]) self['mm__'] = [] self['MT_constraint__'] = "#===============================================================================\n# This code is executed after the nodes in the LHS have been matched.\n# You can access a matched node labelled n by: PreNode('n').\n# To access attribute x of node n, use: PreNode('n')['x'].\n# The given constraint must evaluate to a boolean expression:\n# returning True enables the rule to be applied,\n# returning False forbids the rule from being applied.\n#===============================================================================\n\nreturn True\n" self['name'] = self['GUID__'] = uuid.uuid3(uuid.NAMESPACE_DNS, 'Neg_CountryCity') self.add_node() self.vs[0]['MT_pre__attr1'] = "\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node's attribute value by: attr_value.\n# You can access any attribute x of this node by: this['x'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n" self.vs[0]['MT_label__'] = '1' self.vs[0]['MT_dirty__'] = False self.vs[0]['mm__'] = 'MT_pre__Country' self.vs[0]['GUID__'] = uuid.uuid3(uuid.NAMESPACE_DNS, ) self.add_node() self.vs[1]['MT_pre__attr1'] = "\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node's attribute value by: attr_value.\n# You can access any attribute x of this node by: this['x'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n" self.vs[1]['MT_label__'] = '2' self.vs[1]['MT_dirty__'] = False self.vs[1]['mm__'] = 'MT_pre__City' self.vs[1]['GUID__'] = uuid.uuid3(uuid.NAMESPACE_DNS, ) self.add_node() self.vs[2]['MT_subtypeMatching__'] = False self.vs[2]['MT_pre__attr1'] = "\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node's attribute value by: attr_value.\n# You can access any attribute x of this node by: this['x'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n" self.vs[2]['MT_label__'] = '3' self.vs[2]['MT_subtypes__'] = [] self.vs[2]['MT_dirty__'] = False self.vs[2]['mm__'] = 'MT_pre__Association' self.vs[2]['GUID__'] = uuid.uuid3(uuid.NAMESPACE_DNS, ) self.add_node() self.vs[3]['MT_subtypeMatching__'] = False self.vs[3]['MT_pre__attr1'] = '\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn attr_value == "cities"\n' self.vs[3]['MT_label__'] = '4' self.vs[3]['MT_subtypes__'] = [] self.vs[3]['MT_dirty__'] = False self.vs[3]['mm__'] = 'MT_pre__directLink_S' self.vs[3]['GUID__'] = uuid.uuid3(uuid.NAMESPACE_DNS, 'assoc3') self.add_edges([(0, 3), (3, 1)]) self['equations'] = []<|docstring|>Creates the himesis graph representing the AToM3 model HNeg_CountryCity_CompleteLHS.<|endoftext|>
674f48a1f359291fd94c92ea8faa51ce88d36074b686f9ecbd5575e1ce620260
def constraint(self, PreNode, graph): '\n Executable constraint code.\n @param PreNode: Function taking an integer as parameter\n and returns the node corresponding to that label.\n ' return True
Executable constraint code. @param PreNode: Function taking an integer as parameter and returns the node corresponding to that label.
ExFamToPerson/contracts/HNeg_CountryCity_CompleteLHS.py
constraint
levilucio/SyVOLT
3
python
def constraint(self, PreNode, graph): '\n Executable constraint code.\n @param PreNode: Function taking an integer as parameter\n and returns the node corresponding to that label.\n ' return True
def constraint(self, PreNode, graph): '\n Executable constraint code.\n @param PreNode: Function taking an integer as parameter\n and returns the node corresponding to that label.\n ' return True<|docstring|>Executable constraint code. @param PreNode: Function taking an integer as parameter and returns the node corresponding to that label.<|endoftext|>
23c035ea440c6463b748a9ee1d29f6652b94abd416d541e0efe66301bc56b3e2
def __init__(self, mha_node: BaseNode): '\n Extract MHA params from layer attributes\n Args:\n mha_node: MHA node\n ' if (BATCH_FIRST in mha_node.framework_attr.keys()): if (mha_node.framework_attr[BATCH_FIRST] is not True): raise Exception('Only batch first network is supported') else: raise Exception('Only batch first network is supported') if (ADD_ZERO_ATTN in mha_node.framework_attr.keys()): if (mha_node.framework_attr[ADD_ZERO_ATTN] is not False): raise Exception('Add Zero Attn feature is Not Implemented') if (BIAS_K and (BIAS_V in mha_node.weights.keys())): if (mha_node.weights[BIAS_K] and (mha_node.weights[BIAS_V] is not None)): raise Exception('Add BIAS_KV feature is Not Implemented') self.embed_dim = mha_node.framework_attr[EMBED_DIM] self.num_heads = mha_node.framework_attr[NUM_HEADS] if (KEY_DIM in mha_node.framework_attr): self.kdim = mha_node.framework_attr[KEY_DIM] else: self.kdim = False if (VALUE_DIM in mha_node.framework_attr): self.vdim = mha_node.framework_attr[VALUE_DIM] else: self.vdim = False self.qdim = int((self.embed_dim / self.num_heads)) (self.q_input, self.k_input, self.v_input) = mha_node.input_shape assert (self.q_input[0] == self.k_input[0] == self.v_input[0]), 'Batch size must be equal to all inputs' assert (self.k_input[1] == self.v_input[1]), "key's sequence dim do not match value's" assert (self.kdim == self.k_input[2]), "key's embed dim do not match excepted" assert (self.vdim == self.v_input[2]), "value's embed dim do not match excepted" self.kv_seq = self.k_input[1] self.q_seq = self.q_input[1] self.q_input_shape = tuple(self.q_input) self.q_transpose_in_shape = tuple([self.q_input[0], self.embed_dim, self.q_seq]) self.q_reshape_in_shape = tuple([self.q_input[0], self.num_heads, self.qdim, self.q_seq]) self.q_transpose_shape = tuple([self.q_input[0], self.num_heads, self.q_seq, self.qdim]) self.q_split_shape = tuple([self.q_input[0], 1, self.q_seq, self.qdim]) self.q_head_shape = tuple([self.q_input[0], self.q_seq, self.qdim]) self.k_input_shape = tuple(self.k_input) self.k_transpose_in_shape = tuple([self.k_input[0], self.kdim, self.kv_seq]) self.k_proj_in_shape = tuple([self.k_input[0], self.embed_dim, self.kv_seq]) self.k_reshape_in_shape = tuple([self.k_input[0], self.num_heads, self.qdim, self.kv_seq]) self.k_split_shape = tuple([self.k_input[0], 1, self.qdim, self.kv_seq]) self.k_head_shape = tuple([self.k_input[0], self.qdim, self.kv_seq]) self.v_input_shape = tuple(self.v_input) self.v_transpose_in_shape = tuple([self.v_input[0], self.vdim, self.kv_seq]) self.v_proj_in_shape = tuple([self.v_input[0], self.embed_dim, self.kv_seq]) self.v_reshape_in_shape = tuple([self.v_input[0], self.num_heads, self.qdim, self.kv_seq]) self.v_transpose_shape = tuple([self.v_input[0], self.num_heads, self.kv_seq, self.qdim]) self.v_split_shape = tuple([self.v_input[0], 1, self.kv_seq, self.qdim]) self.v_head_shape = tuple([self.v_input[0], self.kv_seq, self.qdim]) self.attn_mat_shape = tuple([self.q_input[0], 1, self.q_seq, self.kv_seq]) self.attn_shape = self.q_split_shape self.attn_cat_shape = tuple([self.q_input[0], self.num_heads, self.q_seq, self.qdim]) self.attn_transpose_shape = tuple([self.q_input[0], self.q_seq, self.num_heads, self.qdim]) self.attn_reshape_shape = self.q_input_shape self.transpose_out_proj_shape = self.q_transpose_in_shape self.output_shape = self.q_input_shape
Extract MHA params from layer attributes Args: mha_node: MHA node
model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/multi_head_attention_decomposition.py
__init__
reuvenperetz/model_optimization
42
python
def __init__(self, mha_node: BaseNode): '\n Extract MHA params from layer attributes\n Args:\n mha_node: MHA node\n ' if (BATCH_FIRST in mha_node.framework_attr.keys()): if (mha_node.framework_attr[BATCH_FIRST] is not True): raise Exception('Only batch first network is supported') else: raise Exception('Only batch first network is supported') if (ADD_ZERO_ATTN in mha_node.framework_attr.keys()): if (mha_node.framework_attr[ADD_ZERO_ATTN] is not False): raise Exception('Add Zero Attn feature is Not Implemented') if (BIAS_K and (BIAS_V in mha_node.weights.keys())): if (mha_node.weights[BIAS_K] and (mha_node.weights[BIAS_V] is not None)): raise Exception('Add BIAS_KV feature is Not Implemented') self.embed_dim = mha_node.framework_attr[EMBED_DIM] self.num_heads = mha_node.framework_attr[NUM_HEADS] if (KEY_DIM in mha_node.framework_attr): self.kdim = mha_node.framework_attr[KEY_DIM] else: self.kdim = False if (VALUE_DIM in mha_node.framework_attr): self.vdim = mha_node.framework_attr[VALUE_DIM] else: self.vdim = False self.qdim = int((self.embed_dim / self.num_heads)) (self.q_input, self.k_input, self.v_input) = mha_node.input_shape assert (self.q_input[0] == self.k_input[0] == self.v_input[0]), 'Batch size must be equal to all inputs' assert (self.k_input[1] == self.v_input[1]), "key's sequence dim do not match value's" assert (self.kdim == self.k_input[2]), "key's embed dim do not match excepted" assert (self.vdim == self.v_input[2]), "value's embed dim do not match excepted" self.kv_seq = self.k_input[1] self.q_seq = self.q_input[1] self.q_input_shape = tuple(self.q_input) self.q_transpose_in_shape = tuple([self.q_input[0], self.embed_dim, self.q_seq]) self.q_reshape_in_shape = tuple([self.q_input[0], self.num_heads, self.qdim, self.q_seq]) self.q_transpose_shape = tuple([self.q_input[0], self.num_heads, self.q_seq, self.qdim]) self.q_split_shape = tuple([self.q_input[0], 1, self.q_seq, self.qdim]) self.q_head_shape = tuple([self.q_input[0], self.q_seq, self.qdim]) self.k_input_shape = tuple(self.k_input) self.k_transpose_in_shape = tuple([self.k_input[0], self.kdim, self.kv_seq]) self.k_proj_in_shape = tuple([self.k_input[0], self.embed_dim, self.kv_seq]) self.k_reshape_in_shape = tuple([self.k_input[0], self.num_heads, self.qdim, self.kv_seq]) self.k_split_shape = tuple([self.k_input[0], 1, self.qdim, self.kv_seq]) self.k_head_shape = tuple([self.k_input[0], self.qdim, self.kv_seq]) self.v_input_shape = tuple(self.v_input) self.v_transpose_in_shape = tuple([self.v_input[0], self.vdim, self.kv_seq]) self.v_proj_in_shape = tuple([self.v_input[0], self.embed_dim, self.kv_seq]) self.v_reshape_in_shape = tuple([self.v_input[0], self.num_heads, self.qdim, self.kv_seq]) self.v_transpose_shape = tuple([self.v_input[0], self.num_heads, self.kv_seq, self.qdim]) self.v_split_shape = tuple([self.v_input[0], 1, self.kv_seq, self.qdim]) self.v_head_shape = tuple([self.v_input[0], self.kv_seq, self.qdim]) self.attn_mat_shape = tuple([self.q_input[0], 1, self.q_seq, self.kv_seq]) self.attn_shape = self.q_split_shape self.attn_cat_shape = tuple([self.q_input[0], self.num_heads, self.q_seq, self.qdim]) self.attn_transpose_shape = tuple([self.q_input[0], self.q_seq, self.num_heads, self.qdim]) self.attn_reshape_shape = self.q_input_shape self.transpose_out_proj_shape = self.q_transpose_in_shape self.output_shape = self.q_input_shape
def __init__(self, mha_node: BaseNode): '\n Extract MHA params from layer attributes\n Args:\n mha_node: MHA node\n ' if (BATCH_FIRST in mha_node.framework_attr.keys()): if (mha_node.framework_attr[BATCH_FIRST] is not True): raise Exception('Only batch first network is supported') else: raise Exception('Only batch first network is supported') if (ADD_ZERO_ATTN in mha_node.framework_attr.keys()): if (mha_node.framework_attr[ADD_ZERO_ATTN] is not False): raise Exception('Add Zero Attn feature is Not Implemented') if (BIAS_K and (BIAS_V in mha_node.weights.keys())): if (mha_node.weights[BIAS_K] and (mha_node.weights[BIAS_V] is not None)): raise Exception('Add BIAS_KV feature is Not Implemented') self.embed_dim = mha_node.framework_attr[EMBED_DIM] self.num_heads = mha_node.framework_attr[NUM_HEADS] if (KEY_DIM in mha_node.framework_attr): self.kdim = mha_node.framework_attr[KEY_DIM] else: self.kdim = False if (VALUE_DIM in mha_node.framework_attr): self.vdim = mha_node.framework_attr[VALUE_DIM] else: self.vdim = False self.qdim = int((self.embed_dim / self.num_heads)) (self.q_input, self.k_input, self.v_input) = mha_node.input_shape assert (self.q_input[0] == self.k_input[0] == self.v_input[0]), 'Batch size must be equal to all inputs' assert (self.k_input[1] == self.v_input[1]), "key's sequence dim do not match value's" assert (self.kdim == self.k_input[2]), "key's embed dim do not match excepted" assert (self.vdim == self.v_input[2]), "value's embed dim do not match excepted" self.kv_seq = self.k_input[1] self.q_seq = self.q_input[1] self.q_input_shape = tuple(self.q_input) self.q_transpose_in_shape = tuple([self.q_input[0], self.embed_dim, self.q_seq]) self.q_reshape_in_shape = tuple([self.q_input[0], self.num_heads, self.qdim, self.q_seq]) self.q_transpose_shape = tuple([self.q_input[0], self.num_heads, self.q_seq, self.qdim]) self.q_split_shape = tuple([self.q_input[0], 1, self.q_seq, self.qdim]) self.q_head_shape = tuple([self.q_input[0], self.q_seq, self.qdim]) self.k_input_shape = tuple(self.k_input) self.k_transpose_in_shape = tuple([self.k_input[0], self.kdim, self.kv_seq]) self.k_proj_in_shape = tuple([self.k_input[0], self.embed_dim, self.kv_seq]) self.k_reshape_in_shape = tuple([self.k_input[0], self.num_heads, self.qdim, self.kv_seq]) self.k_split_shape = tuple([self.k_input[0], 1, self.qdim, self.kv_seq]) self.k_head_shape = tuple([self.k_input[0], self.qdim, self.kv_seq]) self.v_input_shape = tuple(self.v_input) self.v_transpose_in_shape = tuple([self.v_input[0], self.vdim, self.kv_seq]) self.v_proj_in_shape = tuple([self.v_input[0], self.embed_dim, self.kv_seq]) self.v_reshape_in_shape = tuple([self.v_input[0], self.num_heads, self.qdim, self.kv_seq]) self.v_transpose_shape = tuple([self.v_input[0], self.num_heads, self.kv_seq, self.qdim]) self.v_split_shape = tuple([self.v_input[0], 1, self.kv_seq, self.qdim]) self.v_head_shape = tuple([self.v_input[0], self.kv_seq, self.qdim]) self.attn_mat_shape = tuple([self.q_input[0], 1, self.q_seq, self.kv_seq]) self.attn_shape = self.q_split_shape self.attn_cat_shape = tuple([self.q_input[0], self.num_heads, self.q_seq, self.qdim]) self.attn_transpose_shape = tuple([self.q_input[0], self.q_seq, self.num_heads, self.qdim]) self.attn_reshape_shape = self.q_input_shape self.transpose_out_proj_shape = self.q_transpose_in_shape self.output_shape = self.q_input_shape<|docstring|>Extract MHA params from layer attributes Args: mha_node: MHA node<|endoftext|>
c8b426896275625242138a3d5870702de77eb01dd07242d65957b58a79ffcd08
def __init__(self): '\n Matches MultiHeadAttention node.\n ' super().__init__(matcher_instance=NodeOperationMatcher(nn.MultiheadAttention))
Matches MultiHeadAttention node.
model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/multi_head_attention_decomposition.py
__init__
reuvenperetz/model_optimization
42
python
def __init__(self): '\n \n ' super().__init__(matcher_instance=NodeOperationMatcher(nn.MultiheadAttention))
def __init__(self): '\n \n ' super().__init__(matcher_instance=NodeOperationMatcher(nn.MultiheadAttention))<|docstring|>Matches MultiHeadAttention node.<|endoftext|>
b464395b7aaaee3661c9e578f75f871609784e1ab69dd2c6f15694a5fd4ed625
def _project_input(self, graph: Graph, mha_node: BaseNode, params: MHAParams) -> List[BaseNode]: '\n This method creates the nodes required to project q, k, v\n We implement the projection as Conv1d\n Due to the above we add transpose node to each of the inputs in order to swap the channel axis according\n to Conv1d expected input shape\n We describe below the shape transformation of each input (q, k, v) from the input shape,\n to transposing and projecting\n\n Args:\n graph: Graph to apply the substitution on.\n mha_node: MHA node.\n params: MHAnode params.\n\n Returns:\n List of nodes transposing the inputs and nodes after input projection.\n ' factor = (params.qdim ** (- 0.5)) if (IN_PROJ_WEIGHT in mha_node.weights.keys()): in_proj_w = np.expand_dims(mha_node.get_weights_by_keys(IN_PROJ_WEIGHT).copy(), (- 1)) (qk, kk, vk) = np.split(in_proj_w, 3) qk = (qk * factor) else: qk = np.expand_dims((mha_node.get_weights_by_keys(Q_PROJ_WEIGHT).copy() * factor), (- 1)) kk = np.expand_dims(mha_node.get_weights_by_keys(K_PROJ_WEIGHT).copy(), (- 1)) vk = np.expand_dims(mha_node.get_weights_by_keys(V_PROJ_WEIGHT).copy(), (- 1)) if (IN_PROJ_BIAS in mha_node.weights.keys()): in_proj_bias = mha_node.get_weights_by_keys(IN_PROJ_BIAS).copy() (qb, kb, vb) = np.split(in_proj_bias, 3) qb = (qb * factor) in_bias = True q_node_weights = {KERNEL: qk, BIAS: qb} k_node_weights = {KERNEL: kk, BIAS: kb} v_node_weights = {KERNEL: vk, BIAS: vb} else: in_bias = False q_node_weights = {KERNEL: qk} k_node_weights = {KERNEL: kk} v_node_weights = {KERNEL: vk} query_name = f'{mha_node.name}_query_in_proj' q_transpose_node = FunctionalNode(name=(query_name + '_transpose'), framework_attr={}, input_shape=params.q_input_shape, output_shape=params.q_transpose_in_shape, weights={}, layer_class=torch.transpose, op_call_args=[1, 2], op_call_kwargs={}, functional_op=torch.transpose) graph.add_node(q_transpose_node) q_node = BaseNode(name=query_name, framework_attr={IN_CHANNELS: params.embed_dim, OUT_CHANNELS: params.embed_dim, KERNEL_SIZE: 1, BIAS: in_bias}, input_shape=params.q_transpose_in_shape, output_shape=params.q_transpose_in_shape, weights=q_node_weights, layer_class=nn.Conv1d) graph.add_node_with_in_edges(q_node, [q_transpose_node]) key_name = f'{mha_node.name}_key_in_proj' k_transpose_node = FunctionalNode(name=(key_name + '_transpose'), framework_attr={}, input_shape=params.k_input_shape, output_shape=params.k_transpose_in_shape, weights={}, layer_class=torch.transpose, op_call_args=[1, 2], op_call_kwargs={}, functional_op=torch.transpose) graph.add_node(k_transpose_node) k_node = BaseNode(name=key_name, framework_attr={IN_CHANNELS: params.kdim, OUT_CHANNELS: params.embed_dim, KERNEL_SIZE: 1, BIAS: in_bias}, input_shape=params.k_transpose_in_shape, output_shape=params.k_proj_in_shape, weights=k_node_weights, layer_class=nn.Conv1d) graph.add_node_with_in_edges(k_node, [k_transpose_node]) value_name = f'{mha_node.name}_value_in_proj' v_transpose_node = FunctionalNode(name=(value_name + '_transpose'), framework_attr={}, input_shape=params.v_input_shape, output_shape=params.v_transpose_in_shape, weights={}, layer_class=torch.transpose, op_call_args=[1, 2], op_call_kwargs={}, functional_op=torch.transpose) graph.add_node(v_transpose_node) v_node = BaseNode(name=value_name, framework_attr={IN_CHANNELS: params.vdim, OUT_CHANNELS: params.embed_dim, KERNEL_SIZE: 1, BIAS: in_bias}, input_shape=params.v_transpose_in_shape, output_shape=params.v_proj_in_shape, weights=v_node_weights, layer_class=nn.Conv1d) graph.add_node_with_in_edges(v_node, [v_transpose_node]) return (q_transpose_node, k_transpose_node, v_transpose_node, q_node, k_node, v_node)
This method creates the nodes required to project q, k, v We implement the projection as Conv1d Due to the above we add transpose node to each of the inputs in order to swap the channel axis according to Conv1d expected input shape We describe below the shape transformation of each input (q, k, v) from the input shape, to transposing and projecting Args: graph: Graph to apply the substitution on. mha_node: MHA node. params: MHAnode params. Returns: List of nodes transposing the inputs and nodes after input projection.
model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/multi_head_attention_decomposition.py
_project_input
reuvenperetz/model_optimization
42
python
def _project_input(self, graph: Graph, mha_node: BaseNode, params: MHAParams) -> List[BaseNode]: '\n This method creates the nodes required to project q, k, v\n We implement the projection as Conv1d\n Due to the above we add transpose node to each of the inputs in order to swap the channel axis according\n to Conv1d expected input shape\n We describe below the shape transformation of each input (q, k, v) from the input shape,\n to transposing and projecting\n\n Args:\n graph: Graph to apply the substitution on.\n mha_node: MHA node.\n params: MHAnode params.\n\n Returns:\n List of nodes transposing the inputs and nodes after input projection.\n ' factor = (params.qdim ** (- 0.5)) if (IN_PROJ_WEIGHT in mha_node.weights.keys()): in_proj_w = np.expand_dims(mha_node.get_weights_by_keys(IN_PROJ_WEIGHT).copy(), (- 1)) (qk, kk, vk) = np.split(in_proj_w, 3) qk = (qk * factor) else: qk = np.expand_dims((mha_node.get_weights_by_keys(Q_PROJ_WEIGHT).copy() * factor), (- 1)) kk = np.expand_dims(mha_node.get_weights_by_keys(K_PROJ_WEIGHT).copy(), (- 1)) vk = np.expand_dims(mha_node.get_weights_by_keys(V_PROJ_WEIGHT).copy(), (- 1)) if (IN_PROJ_BIAS in mha_node.weights.keys()): in_proj_bias = mha_node.get_weights_by_keys(IN_PROJ_BIAS).copy() (qb, kb, vb) = np.split(in_proj_bias, 3) qb = (qb * factor) in_bias = True q_node_weights = {KERNEL: qk, BIAS: qb} k_node_weights = {KERNEL: kk, BIAS: kb} v_node_weights = {KERNEL: vk, BIAS: vb} else: in_bias = False q_node_weights = {KERNEL: qk} k_node_weights = {KERNEL: kk} v_node_weights = {KERNEL: vk} query_name = f'{mha_node.name}_query_in_proj' q_transpose_node = FunctionalNode(name=(query_name + '_transpose'), framework_attr={}, input_shape=params.q_input_shape, output_shape=params.q_transpose_in_shape, weights={}, layer_class=torch.transpose, op_call_args=[1, 2], op_call_kwargs={}, functional_op=torch.transpose) graph.add_node(q_transpose_node) q_node = BaseNode(name=query_name, framework_attr={IN_CHANNELS: params.embed_dim, OUT_CHANNELS: params.embed_dim, KERNEL_SIZE: 1, BIAS: in_bias}, input_shape=params.q_transpose_in_shape, output_shape=params.q_transpose_in_shape, weights=q_node_weights, layer_class=nn.Conv1d) graph.add_node_with_in_edges(q_node, [q_transpose_node]) key_name = f'{mha_node.name}_key_in_proj' k_transpose_node = FunctionalNode(name=(key_name + '_transpose'), framework_attr={}, input_shape=params.k_input_shape, output_shape=params.k_transpose_in_shape, weights={}, layer_class=torch.transpose, op_call_args=[1, 2], op_call_kwargs={}, functional_op=torch.transpose) graph.add_node(k_transpose_node) k_node = BaseNode(name=key_name, framework_attr={IN_CHANNELS: params.kdim, OUT_CHANNELS: params.embed_dim, KERNEL_SIZE: 1, BIAS: in_bias}, input_shape=params.k_transpose_in_shape, output_shape=params.k_proj_in_shape, weights=k_node_weights, layer_class=nn.Conv1d) graph.add_node_with_in_edges(k_node, [k_transpose_node]) value_name = f'{mha_node.name}_value_in_proj' v_transpose_node = FunctionalNode(name=(value_name + '_transpose'), framework_attr={}, input_shape=params.v_input_shape, output_shape=params.v_transpose_in_shape, weights={}, layer_class=torch.transpose, op_call_args=[1, 2], op_call_kwargs={}, functional_op=torch.transpose) graph.add_node(v_transpose_node) v_node = BaseNode(name=value_name, framework_attr={IN_CHANNELS: params.vdim, OUT_CHANNELS: params.embed_dim, KERNEL_SIZE: 1, BIAS: in_bias}, input_shape=params.v_transpose_in_shape, output_shape=params.v_proj_in_shape, weights=v_node_weights, layer_class=nn.Conv1d) graph.add_node_with_in_edges(v_node, [v_transpose_node]) return (q_transpose_node, k_transpose_node, v_transpose_node, q_node, k_node, v_node)
def _project_input(self, graph: Graph, mha_node: BaseNode, params: MHAParams) -> List[BaseNode]: '\n This method creates the nodes required to project q, k, v\n We implement the projection as Conv1d\n Due to the above we add transpose node to each of the inputs in order to swap the channel axis according\n to Conv1d expected input shape\n We describe below the shape transformation of each input (q, k, v) from the input shape,\n to transposing and projecting\n\n Args:\n graph: Graph to apply the substitution on.\n mha_node: MHA node.\n params: MHAnode params.\n\n Returns:\n List of nodes transposing the inputs and nodes after input projection.\n ' factor = (params.qdim ** (- 0.5)) if (IN_PROJ_WEIGHT in mha_node.weights.keys()): in_proj_w = np.expand_dims(mha_node.get_weights_by_keys(IN_PROJ_WEIGHT).copy(), (- 1)) (qk, kk, vk) = np.split(in_proj_w, 3) qk = (qk * factor) else: qk = np.expand_dims((mha_node.get_weights_by_keys(Q_PROJ_WEIGHT).copy() * factor), (- 1)) kk = np.expand_dims(mha_node.get_weights_by_keys(K_PROJ_WEIGHT).copy(), (- 1)) vk = np.expand_dims(mha_node.get_weights_by_keys(V_PROJ_WEIGHT).copy(), (- 1)) if (IN_PROJ_BIAS in mha_node.weights.keys()): in_proj_bias = mha_node.get_weights_by_keys(IN_PROJ_BIAS).copy() (qb, kb, vb) = np.split(in_proj_bias, 3) qb = (qb * factor) in_bias = True q_node_weights = {KERNEL: qk, BIAS: qb} k_node_weights = {KERNEL: kk, BIAS: kb} v_node_weights = {KERNEL: vk, BIAS: vb} else: in_bias = False q_node_weights = {KERNEL: qk} k_node_weights = {KERNEL: kk} v_node_weights = {KERNEL: vk} query_name = f'{mha_node.name}_query_in_proj' q_transpose_node = FunctionalNode(name=(query_name + '_transpose'), framework_attr={}, input_shape=params.q_input_shape, output_shape=params.q_transpose_in_shape, weights={}, layer_class=torch.transpose, op_call_args=[1, 2], op_call_kwargs={}, functional_op=torch.transpose) graph.add_node(q_transpose_node) q_node = BaseNode(name=query_name, framework_attr={IN_CHANNELS: params.embed_dim, OUT_CHANNELS: params.embed_dim, KERNEL_SIZE: 1, BIAS: in_bias}, input_shape=params.q_transpose_in_shape, output_shape=params.q_transpose_in_shape, weights=q_node_weights, layer_class=nn.Conv1d) graph.add_node_with_in_edges(q_node, [q_transpose_node]) key_name = f'{mha_node.name}_key_in_proj' k_transpose_node = FunctionalNode(name=(key_name + '_transpose'), framework_attr={}, input_shape=params.k_input_shape, output_shape=params.k_transpose_in_shape, weights={}, layer_class=torch.transpose, op_call_args=[1, 2], op_call_kwargs={}, functional_op=torch.transpose) graph.add_node(k_transpose_node) k_node = BaseNode(name=key_name, framework_attr={IN_CHANNELS: params.kdim, OUT_CHANNELS: params.embed_dim, KERNEL_SIZE: 1, BIAS: in_bias}, input_shape=params.k_transpose_in_shape, output_shape=params.k_proj_in_shape, weights=k_node_weights, layer_class=nn.Conv1d) graph.add_node_with_in_edges(k_node, [k_transpose_node]) value_name = f'{mha_node.name}_value_in_proj' v_transpose_node = FunctionalNode(name=(value_name + '_transpose'), framework_attr={}, input_shape=params.v_input_shape, output_shape=params.v_transpose_in_shape, weights={}, layer_class=torch.transpose, op_call_args=[1, 2], op_call_kwargs={}, functional_op=torch.transpose) graph.add_node(v_transpose_node) v_node = BaseNode(name=value_name, framework_attr={IN_CHANNELS: params.vdim, OUT_CHANNELS: params.embed_dim, KERNEL_SIZE: 1, BIAS: in_bias}, input_shape=params.v_transpose_in_shape, output_shape=params.v_proj_in_shape, weights=v_node_weights, layer_class=nn.Conv1d) graph.add_node_with_in_edges(v_node, [v_transpose_node]) return (q_transpose_node, k_transpose_node, v_transpose_node, q_node, k_node, v_node)<|docstring|>This method creates the nodes required to project q, k, v We implement the projection as Conv1d Due to the above we add transpose node to each of the inputs in order to swap the channel axis according to Conv1d expected input shape We describe below the shape transformation of each input (q, k, v) from the input shape, to transposing and projecting Args: graph: Graph to apply the substitution on. mha_node: MHA node. params: MHAnode params. Returns: List of nodes transposing the inputs and nodes after input projection.<|endoftext|>
d5949f2e21c4f32df122799a06a6722acbc51b281f8fc68e227901beaa3813c0
@staticmethod def _arrange_before_split(graph: Graph, mha_node: BaseNode, q_node: BaseNode, k_node: BaseNode, v_node: BaseNode, params: MHAParams) -> List[BaseNode]: '\n This method creates the nodes required for arranging the shapes of q, k, v, after\n the input projection, before the split by head operation.\n\n Args:\n graph: Graph to apply the substitution on.\n mha_node: MHA node.\n q_node: query node after input projection.\n k_node: key node after input projection.\n v_node: value node after input projection.\n params: MHAnode params.\n\n Returns:\n List of nodes after shape arranging.\n ' query_name = f'{mha_node.name}_query' q_reshape_node = FunctionalNode(name=(query_name + '_reshape'), framework_attr={}, input_shape=params.q_transpose_in_shape, output_shape=params.q_reshape_in_shape, weights={}, layer_class=torch.reshape, op_call_args=[params.q_reshape_in_shape], op_call_kwargs={}, functional_op=torch.reshape) graph.add_node_with_in_edges(q_reshape_node, [q_node]) q_transpose_node = FunctionalNode(name=(query_name + '_transpose'), framework_attr={}, input_shape=params.q_reshape_in_shape, output_shape=params.q_transpose_shape, weights={}, layer_class=torch.transpose, op_call_args=[2, 3], op_call_kwargs={}, functional_op=torch.transpose) graph.add_node_with_in_edges(q_transpose_node, [q_reshape_node]) key_name = f'{mha_node.name}_key' k_reshape_node = FunctionalNode(name=(key_name + '_reshape'), framework_attr={}, input_shape=params.k_proj_in_shape, output_shape=params.k_reshape_in_shape, weights={}, layer_class=torch.reshape, op_call_args=[params.k_reshape_in_shape], op_call_kwargs={}, functional_op=torch.reshape) graph.add_node_with_in_edges(k_reshape_node, [k_node]) value_name = f'{mha_node.name}_value' v_reshape_node = FunctionalNode(name=(value_name + '_reshape'), framework_attr={}, input_shape=params.v_proj_in_shape, output_shape=params.v_reshape_in_shape, weights={}, layer_class=torch.reshape, op_call_args=[params.v_reshape_in_shape], op_call_kwargs={}, functional_op=torch.reshape) graph.add_node_with_in_edges(v_reshape_node, [v_node]) v_transpose_node = FunctionalNode(name=(value_name + '_transpose'), framework_attr={}, input_shape=params.v_reshape_in_shape, output_shape=params.v_transpose_shape, weights={}, layer_class=torch.transpose, op_call_args=[2, 3], op_call_kwargs={}, functional_op=torch.transpose) graph.add_node_with_in_edges(v_transpose_node, [v_reshape_node]) return (q_transpose_node, k_reshape_node, v_transpose_node)
This method creates the nodes required for arranging the shapes of q, k, v, after the input projection, before the split by head operation. Args: graph: Graph to apply the substitution on. mha_node: MHA node. q_node: query node after input projection. k_node: key node after input projection. v_node: value node after input projection. params: MHAnode params. Returns: List of nodes after shape arranging.
model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/multi_head_attention_decomposition.py
_arrange_before_split
reuvenperetz/model_optimization
42
python
@staticmethod def _arrange_before_split(graph: Graph, mha_node: BaseNode, q_node: BaseNode, k_node: BaseNode, v_node: BaseNode, params: MHAParams) -> List[BaseNode]: '\n This method creates the nodes required for arranging the shapes of q, k, v, after\n the input projection, before the split by head operation.\n\n Args:\n graph: Graph to apply the substitution on.\n mha_node: MHA node.\n q_node: query node after input projection.\n k_node: key node after input projection.\n v_node: value node after input projection.\n params: MHAnode params.\n\n Returns:\n List of nodes after shape arranging.\n ' query_name = f'{mha_node.name}_query' q_reshape_node = FunctionalNode(name=(query_name + '_reshape'), framework_attr={}, input_shape=params.q_transpose_in_shape, output_shape=params.q_reshape_in_shape, weights={}, layer_class=torch.reshape, op_call_args=[params.q_reshape_in_shape], op_call_kwargs={}, functional_op=torch.reshape) graph.add_node_with_in_edges(q_reshape_node, [q_node]) q_transpose_node = FunctionalNode(name=(query_name + '_transpose'), framework_attr={}, input_shape=params.q_reshape_in_shape, output_shape=params.q_transpose_shape, weights={}, layer_class=torch.transpose, op_call_args=[2, 3], op_call_kwargs={}, functional_op=torch.transpose) graph.add_node_with_in_edges(q_transpose_node, [q_reshape_node]) key_name = f'{mha_node.name}_key' k_reshape_node = FunctionalNode(name=(key_name + '_reshape'), framework_attr={}, input_shape=params.k_proj_in_shape, output_shape=params.k_reshape_in_shape, weights={}, layer_class=torch.reshape, op_call_args=[params.k_reshape_in_shape], op_call_kwargs={}, functional_op=torch.reshape) graph.add_node_with_in_edges(k_reshape_node, [k_node]) value_name = f'{mha_node.name}_value' v_reshape_node = FunctionalNode(name=(value_name + '_reshape'), framework_attr={}, input_shape=params.v_proj_in_shape, output_shape=params.v_reshape_in_shape, weights={}, layer_class=torch.reshape, op_call_args=[params.v_reshape_in_shape], op_call_kwargs={}, functional_op=torch.reshape) graph.add_node_with_in_edges(v_reshape_node, [v_node]) v_transpose_node = FunctionalNode(name=(value_name + '_transpose'), framework_attr={}, input_shape=params.v_reshape_in_shape, output_shape=params.v_transpose_shape, weights={}, layer_class=torch.transpose, op_call_args=[2, 3], op_call_kwargs={}, functional_op=torch.transpose) graph.add_node_with_in_edges(v_transpose_node, [v_reshape_node]) return (q_transpose_node, k_reshape_node, v_transpose_node)
@staticmethod def _arrange_before_split(graph: Graph, mha_node: BaseNode, q_node: BaseNode, k_node: BaseNode, v_node: BaseNode, params: MHAParams) -> List[BaseNode]: '\n This method creates the nodes required for arranging the shapes of q, k, v, after\n the input projection, before the split by head operation.\n\n Args:\n graph: Graph to apply the substitution on.\n mha_node: MHA node.\n q_node: query node after input projection.\n k_node: key node after input projection.\n v_node: value node after input projection.\n params: MHAnode params.\n\n Returns:\n List of nodes after shape arranging.\n ' query_name = f'{mha_node.name}_query' q_reshape_node = FunctionalNode(name=(query_name + '_reshape'), framework_attr={}, input_shape=params.q_transpose_in_shape, output_shape=params.q_reshape_in_shape, weights={}, layer_class=torch.reshape, op_call_args=[params.q_reshape_in_shape], op_call_kwargs={}, functional_op=torch.reshape) graph.add_node_with_in_edges(q_reshape_node, [q_node]) q_transpose_node = FunctionalNode(name=(query_name + '_transpose'), framework_attr={}, input_shape=params.q_reshape_in_shape, output_shape=params.q_transpose_shape, weights={}, layer_class=torch.transpose, op_call_args=[2, 3], op_call_kwargs={}, functional_op=torch.transpose) graph.add_node_with_in_edges(q_transpose_node, [q_reshape_node]) key_name = f'{mha_node.name}_key' k_reshape_node = FunctionalNode(name=(key_name + '_reshape'), framework_attr={}, input_shape=params.k_proj_in_shape, output_shape=params.k_reshape_in_shape, weights={}, layer_class=torch.reshape, op_call_args=[params.k_reshape_in_shape], op_call_kwargs={}, functional_op=torch.reshape) graph.add_node_with_in_edges(k_reshape_node, [k_node]) value_name = f'{mha_node.name}_value' v_reshape_node = FunctionalNode(name=(value_name + '_reshape'), framework_attr={}, input_shape=params.v_proj_in_shape, output_shape=params.v_reshape_in_shape, weights={}, layer_class=torch.reshape, op_call_args=[params.v_reshape_in_shape], op_call_kwargs={}, functional_op=torch.reshape) graph.add_node_with_in_edges(v_reshape_node, [v_node]) v_transpose_node = FunctionalNode(name=(value_name + '_transpose'), framework_attr={}, input_shape=params.v_reshape_in_shape, output_shape=params.v_transpose_shape, weights={}, layer_class=torch.transpose, op_call_args=[2, 3], op_call_kwargs={}, functional_op=torch.transpose) graph.add_node_with_in_edges(v_transpose_node, [v_reshape_node]) return (q_transpose_node, k_reshape_node, v_transpose_node)<|docstring|>This method creates the nodes required for arranging the shapes of q, k, v, after the input projection, before the split by head operation. Args: graph: Graph to apply the substitution on. mha_node: MHA node. q_node: query node after input projection. k_node: key node after input projection. v_node: value node after input projection. params: MHAnode params. Returns: List of nodes after shape arranging.<|endoftext|>
72e3b99dab22a2699dcf2a3f66fe4116b98c9ac62812a7282c029273e0b1d0c3
@staticmethod def _split_projected(graph: Graph, name: str, q_transpose_node: BaseNode, k_reshape_node: BaseNode, v_transpose_node: BaseNode, params: MHAParams) -> List[BaseNode]: '\n This method creates the nodes required for splitting q, k, v to query, key and value per head\n (total of num_heads q, k and v).\n\n Args:\n graph: Graph to apply the substitution on.\n name: MHA node name.\n q_transpose_node: query node after shape arranging.\n k_reshape_node: key node after shape arranging.\n v_transpose_node: value node after shape arranging.\n params: MHAnode params.\n\n Returns:\n List of nodes after splitting.\n ' query_name = f'{name}_query' q_split_node = FunctionalNode(name=(query_name + '_split'), framework_attr={DIM: 1}, input_shape=params.q_transpose_shape, output_shape=([params.q_split_shape] * params.num_heads), weights={}, layer_class=torch.split, op_call_args=[1], op_call_kwargs={DIM: 1}, functional_op=torch.split) graph.add_node_with_in_edges(q_split_node, [q_transpose_node]) key_name = f'{name}_key' k_split_node = FunctionalNode(name=(key_name + '_split'), framework_attr={DIM: 1}, input_shape=params.k_reshape_in_shape, output_shape=([params.k_split_shape] * params.num_heads), weights={}, layer_class=torch.split, op_call_args=[1], op_call_kwargs={DIM: 1}, functional_op=torch.split) graph.add_node_with_in_edges(k_split_node, [k_reshape_node]) value_name = f'{name}_value' v_split_node = FunctionalNode(name=(value_name + '_split'), framework_attr={DIM: 1}, input_shape=params.v_reshape_in_shape, output_shape=([params.v_split_shape] * params.num_heads), weights={}, layer_class=torch.split, op_call_args=[1], op_call_kwargs={DIM: 1}, functional_op=torch.split) graph.add_node_with_in_edges(v_split_node, [v_transpose_node]) return (q_split_node, k_split_node, v_split_node)
This method creates the nodes required for splitting q, k, v to query, key and value per head (total of num_heads q, k and v). Args: graph: Graph to apply the substitution on. name: MHA node name. q_transpose_node: query node after shape arranging. k_reshape_node: key node after shape arranging. v_transpose_node: value node after shape arranging. params: MHAnode params. Returns: List of nodes after splitting.
model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/multi_head_attention_decomposition.py
_split_projected
reuvenperetz/model_optimization
42
python
@staticmethod def _split_projected(graph: Graph, name: str, q_transpose_node: BaseNode, k_reshape_node: BaseNode, v_transpose_node: BaseNode, params: MHAParams) -> List[BaseNode]: '\n This method creates the nodes required for splitting q, k, v to query, key and value per head\n (total of num_heads q, k and v).\n\n Args:\n graph: Graph to apply the substitution on.\n name: MHA node name.\n q_transpose_node: query node after shape arranging.\n k_reshape_node: key node after shape arranging.\n v_transpose_node: value node after shape arranging.\n params: MHAnode params.\n\n Returns:\n List of nodes after splitting.\n ' query_name = f'{name}_query' q_split_node = FunctionalNode(name=(query_name + '_split'), framework_attr={DIM: 1}, input_shape=params.q_transpose_shape, output_shape=([params.q_split_shape] * params.num_heads), weights={}, layer_class=torch.split, op_call_args=[1], op_call_kwargs={DIM: 1}, functional_op=torch.split) graph.add_node_with_in_edges(q_split_node, [q_transpose_node]) key_name = f'{name}_key' k_split_node = FunctionalNode(name=(key_name + '_split'), framework_attr={DIM: 1}, input_shape=params.k_reshape_in_shape, output_shape=([params.k_split_shape] * params.num_heads), weights={}, layer_class=torch.split, op_call_args=[1], op_call_kwargs={DIM: 1}, functional_op=torch.split) graph.add_node_with_in_edges(k_split_node, [k_reshape_node]) value_name = f'{name}_value' v_split_node = FunctionalNode(name=(value_name + '_split'), framework_attr={DIM: 1}, input_shape=params.v_reshape_in_shape, output_shape=([params.v_split_shape] * params.num_heads), weights={}, layer_class=torch.split, op_call_args=[1], op_call_kwargs={DIM: 1}, functional_op=torch.split) graph.add_node_with_in_edges(v_split_node, [v_transpose_node]) return (q_split_node, k_split_node, v_split_node)
@staticmethod def _split_projected(graph: Graph, name: str, q_transpose_node: BaseNode, k_reshape_node: BaseNode, v_transpose_node: BaseNode, params: MHAParams) -> List[BaseNode]: '\n This method creates the nodes required for splitting q, k, v to query, key and value per head\n (total of num_heads q, k and v).\n\n Args:\n graph: Graph to apply the substitution on.\n name: MHA node name.\n q_transpose_node: query node after shape arranging.\n k_reshape_node: key node after shape arranging.\n v_transpose_node: value node after shape arranging.\n params: MHAnode params.\n\n Returns:\n List of nodes after splitting.\n ' query_name = f'{name}_query' q_split_node = FunctionalNode(name=(query_name + '_split'), framework_attr={DIM: 1}, input_shape=params.q_transpose_shape, output_shape=([params.q_split_shape] * params.num_heads), weights={}, layer_class=torch.split, op_call_args=[1], op_call_kwargs={DIM: 1}, functional_op=torch.split) graph.add_node_with_in_edges(q_split_node, [q_transpose_node]) key_name = f'{name}_key' k_split_node = FunctionalNode(name=(key_name + '_split'), framework_attr={DIM: 1}, input_shape=params.k_reshape_in_shape, output_shape=([params.k_split_shape] * params.num_heads), weights={}, layer_class=torch.split, op_call_args=[1], op_call_kwargs={DIM: 1}, functional_op=torch.split) graph.add_node_with_in_edges(k_split_node, [k_reshape_node]) value_name = f'{name}_value' v_split_node = FunctionalNode(name=(value_name + '_split'), framework_attr={DIM: 1}, input_shape=params.v_reshape_in_shape, output_shape=([params.v_split_shape] * params.num_heads), weights={}, layer_class=torch.split, op_call_args=[1], op_call_kwargs={DIM: 1}, functional_op=torch.split) graph.add_node_with_in_edges(v_split_node, [v_transpose_node]) return (q_split_node, k_split_node, v_split_node)<|docstring|>This method creates the nodes required for splitting q, k, v to query, key and value per head (total of num_heads q, k and v). Args: graph: Graph to apply the substitution on. name: MHA node name. q_transpose_node: query node after shape arranging. k_reshape_node: key node after shape arranging. v_transpose_node: value node after shape arranging. params: MHAnode params. Returns: List of nodes after splitting.<|endoftext|>
ca0c4fd983a6c7ea40b4f3a5cdff6d58c17effb88c4a29c9556ea4564b7f19b8
@staticmethod def _calc_attention_head(graph: Graph, q_in_node: BaseNode, k_in_node: BaseNode, v_in_node: BaseNode, mha_node: BaseNode, head_index: int, params: MHAParams) -> BaseNode: '\n This method creates the nodes required for attention calc by head\n\n Args:\n graph: Graph to apply the substitution on.\n q_in_node: query node after shape arranging.\n k_in_node: key node after shape arranging.\n v_in_node: value node after shape arranging.\n mha_node: MHA node.\n head_index: index of the head.\n params: MHAnode params.\n\n Returns:\n Node after attention calc.\n ' get_q_name = f'{mha_node.name}_get_q_{head_index}' get_q_node = FunctionalNode(name=get_q_name, framework_attr={}, input_shape=((params.q_split_shape,) * params.num_heads), output_shape=params.q_split_shape, weights={}, layer_class=operator.getitem, op_call_args=[head_index], op_call_kwargs={}, functional_op=operator.getitem) graph.add_node_with_in_edges(get_q_node, [q_in_node], [head_index]) get_k_name = f'{mha_node.name}_get_k_{head_index}' get_k_node = FunctionalNode(name=get_k_name, framework_attr={}, input_shape=((params.k_split_shape,) * params.num_heads), output_shape=params.k_split_shape, weights={}, layer_class=operator.getitem, op_call_args=[head_index], op_call_kwargs={}, functional_op=operator.getitem) graph.add_node_with_in_edges(get_k_node, [k_in_node], [head_index]) matmul_name = f'{mha_node.name}_matmul_{head_index}' matmul_node = FunctionalNode(name=matmul_name, framework_attr={}, input_shape=(params.q_split_shape, params.k_split_shape), output_shape=params.attn_mat_shape, weights={}, layer_class=torch.matmul, op_call_args=[], op_call_kwargs={}, functional_op=torch.matmul) graph.add_node_with_in_edges(matmul_node, [get_q_node, get_k_node]) softmax_name = f'{mha_node.name}_softmax_{head_index}' softmax_node = BaseNode(name=softmax_name, framework_attr={DIM: (- 1)}, input_shape=params.attn_mat_shape, output_shape=params.attn_mat_shape, weights={}, layer_class=nn.Softmax) graph.add_node_with_in_edges(softmax_node, [matmul_node]) get_v_name = f'{mha_node.name}_get_v_{head_index}' get_v_node = FunctionalNode(name=get_v_name, framework_attr={}, input_shape=((params.v_split_shape,) * params.num_heads), output_shape=params.v_split_shape, weights={}, layer_class=operator.getitem, op_call_args=[head_index], op_call_kwargs={}, functional_op=operator.getitem) graph.add_node_with_in_edges(get_v_node, [v_in_node], [head_index]) matmulv_name = f'{mha_node.name}_dotv_{head_index}' matmulv_node = FunctionalNode(name=matmulv_name, framework_attr={}, input_shape=(params.attn_mat_shape, params.v_split_shape), output_shape=params.attn_shape, weights={}, layer_class=torch.matmul, op_call_args=[], op_call_kwargs={}, functional_op=torch.matmul) graph.add_node_with_in_edges(matmulv_node, [softmax_node, get_v_node]) return matmulv_node
This method creates the nodes required for attention calc by head Args: graph: Graph to apply the substitution on. q_in_node: query node after shape arranging. k_in_node: key node after shape arranging. v_in_node: value node after shape arranging. mha_node: MHA node. head_index: index of the head. params: MHAnode params. Returns: Node after attention calc.
model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/multi_head_attention_decomposition.py
_calc_attention_head
reuvenperetz/model_optimization
42
python
@staticmethod def _calc_attention_head(graph: Graph, q_in_node: BaseNode, k_in_node: BaseNode, v_in_node: BaseNode, mha_node: BaseNode, head_index: int, params: MHAParams) -> BaseNode: '\n This method creates the nodes required for attention calc by head\n\n Args:\n graph: Graph to apply the substitution on.\n q_in_node: query node after shape arranging.\n k_in_node: key node after shape arranging.\n v_in_node: value node after shape arranging.\n mha_node: MHA node.\n head_index: index of the head.\n params: MHAnode params.\n\n Returns:\n Node after attention calc.\n ' get_q_name = f'{mha_node.name}_get_q_{head_index}' get_q_node = FunctionalNode(name=get_q_name, framework_attr={}, input_shape=((params.q_split_shape,) * params.num_heads), output_shape=params.q_split_shape, weights={}, layer_class=operator.getitem, op_call_args=[head_index], op_call_kwargs={}, functional_op=operator.getitem) graph.add_node_with_in_edges(get_q_node, [q_in_node], [head_index]) get_k_name = f'{mha_node.name}_get_k_{head_index}' get_k_node = FunctionalNode(name=get_k_name, framework_attr={}, input_shape=((params.k_split_shape,) * params.num_heads), output_shape=params.k_split_shape, weights={}, layer_class=operator.getitem, op_call_args=[head_index], op_call_kwargs={}, functional_op=operator.getitem) graph.add_node_with_in_edges(get_k_node, [k_in_node], [head_index]) matmul_name = f'{mha_node.name}_matmul_{head_index}' matmul_node = FunctionalNode(name=matmul_name, framework_attr={}, input_shape=(params.q_split_shape, params.k_split_shape), output_shape=params.attn_mat_shape, weights={}, layer_class=torch.matmul, op_call_args=[], op_call_kwargs={}, functional_op=torch.matmul) graph.add_node_with_in_edges(matmul_node, [get_q_node, get_k_node]) softmax_name = f'{mha_node.name}_softmax_{head_index}' softmax_node = BaseNode(name=softmax_name, framework_attr={DIM: (- 1)}, input_shape=params.attn_mat_shape, output_shape=params.attn_mat_shape, weights={}, layer_class=nn.Softmax) graph.add_node_with_in_edges(softmax_node, [matmul_node]) get_v_name = f'{mha_node.name}_get_v_{head_index}' get_v_node = FunctionalNode(name=get_v_name, framework_attr={}, input_shape=((params.v_split_shape,) * params.num_heads), output_shape=params.v_split_shape, weights={}, layer_class=operator.getitem, op_call_args=[head_index], op_call_kwargs={}, functional_op=operator.getitem) graph.add_node_with_in_edges(get_v_node, [v_in_node], [head_index]) matmulv_name = f'{mha_node.name}_dotv_{head_index}' matmulv_node = FunctionalNode(name=matmulv_name, framework_attr={}, input_shape=(params.attn_mat_shape, params.v_split_shape), output_shape=params.attn_shape, weights={}, layer_class=torch.matmul, op_call_args=[], op_call_kwargs={}, functional_op=torch.matmul) graph.add_node_with_in_edges(matmulv_node, [softmax_node, get_v_node]) return matmulv_node
@staticmethod def _calc_attention_head(graph: Graph, q_in_node: BaseNode, k_in_node: BaseNode, v_in_node: BaseNode, mha_node: BaseNode, head_index: int, params: MHAParams) -> BaseNode: '\n This method creates the nodes required for attention calc by head\n\n Args:\n graph: Graph to apply the substitution on.\n q_in_node: query node after shape arranging.\n k_in_node: key node after shape arranging.\n v_in_node: value node after shape arranging.\n mha_node: MHA node.\n head_index: index of the head.\n params: MHAnode params.\n\n Returns:\n Node after attention calc.\n ' get_q_name = f'{mha_node.name}_get_q_{head_index}' get_q_node = FunctionalNode(name=get_q_name, framework_attr={}, input_shape=((params.q_split_shape,) * params.num_heads), output_shape=params.q_split_shape, weights={}, layer_class=operator.getitem, op_call_args=[head_index], op_call_kwargs={}, functional_op=operator.getitem) graph.add_node_with_in_edges(get_q_node, [q_in_node], [head_index]) get_k_name = f'{mha_node.name}_get_k_{head_index}' get_k_node = FunctionalNode(name=get_k_name, framework_attr={}, input_shape=((params.k_split_shape,) * params.num_heads), output_shape=params.k_split_shape, weights={}, layer_class=operator.getitem, op_call_args=[head_index], op_call_kwargs={}, functional_op=operator.getitem) graph.add_node_with_in_edges(get_k_node, [k_in_node], [head_index]) matmul_name = f'{mha_node.name}_matmul_{head_index}' matmul_node = FunctionalNode(name=matmul_name, framework_attr={}, input_shape=(params.q_split_shape, params.k_split_shape), output_shape=params.attn_mat_shape, weights={}, layer_class=torch.matmul, op_call_args=[], op_call_kwargs={}, functional_op=torch.matmul) graph.add_node_with_in_edges(matmul_node, [get_q_node, get_k_node]) softmax_name = f'{mha_node.name}_softmax_{head_index}' softmax_node = BaseNode(name=softmax_name, framework_attr={DIM: (- 1)}, input_shape=params.attn_mat_shape, output_shape=params.attn_mat_shape, weights={}, layer_class=nn.Softmax) graph.add_node_with_in_edges(softmax_node, [matmul_node]) get_v_name = f'{mha_node.name}_get_v_{head_index}' get_v_node = FunctionalNode(name=get_v_name, framework_attr={}, input_shape=((params.v_split_shape,) * params.num_heads), output_shape=params.v_split_shape, weights={}, layer_class=operator.getitem, op_call_args=[head_index], op_call_kwargs={}, functional_op=operator.getitem) graph.add_node_with_in_edges(get_v_node, [v_in_node], [head_index]) matmulv_name = f'{mha_node.name}_dotv_{head_index}' matmulv_node = FunctionalNode(name=matmulv_name, framework_attr={}, input_shape=(params.attn_mat_shape, params.v_split_shape), output_shape=params.attn_shape, weights={}, layer_class=torch.matmul, op_call_args=[], op_call_kwargs={}, functional_op=torch.matmul) graph.add_node_with_in_edges(matmulv_node, [softmax_node, get_v_node]) return matmulv_node<|docstring|>This method creates the nodes required for attention calc by head Args: graph: Graph to apply the substitution on. q_in_node: query node after shape arranging. k_in_node: key node after shape arranging. v_in_node: value node after shape arranging. mha_node: MHA node. head_index: index of the head. params: MHAnode params. Returns: Node after attention calc.<|endoftext|>
c02649fa1b9038429bf099b42679d80739e1b598b2a1213d5471cc6a04638a24
@staticmethod def _cat_heads_reshape(graph: Graph, name: str, att_head_output_nodes: List[BaseNode], params: MHAParams) -> BaseNode: '\n This method creates the nodes required for concatenating all heads after attention\n\n Args:\n graph: Graph to apply the substitution on.\n name: MHA node name.\n att_head_output_nodes: list of nodes after attention.\n params: MHAnode params.\n\n Returns:\n Node after cat and reshape.\n ' cat_node = FunctionalNode(name=f'{name}_cat', framework_attr={DIM: 1}, input_shape=((params.attn_shape,) * params.num_heads), output_shape=params.attn_cat_shape, weights={}, layer_class=torch.cat, op_call_args=[], op_call_kwargs={DIM: 1}, functional_op=torch.cat, inputs_as_list=True) graph.add_node_with_in_edges(cat_node, att_head_output_nodes) transpose_node = FunctionalNode(name=f'{name}_transpose', framework_attr={}, input_shape=params.attn_cat_shape, output_shape=params.attn_transpose_shape, weights={}, layer_class=torch.transpose, op_call_args=[1, 2], op_call_kwargs={}, functional_op=torch.transpose) graph.add_node_with_in_edges(transpose_node, [cat_node]) attn_reshape_node = FunctionalNode(name=f'{name}_attn_reshape', framework_attr={}, input_shape=params.attn_transpose_shape, output_shape=params.attn_reshape_shape, weights={}, layer_class=torch.reshape, op_call_args=[params.attn_reshape_shape], op_call_kwargs={}, functional_op=torch.reshape) graph.add_node_with_in_edges(attn_reshape_node, [transpose_node]) return attn_reshape_node
This method creates the nodes required for concatenating all heads after attention Args: graph: Graph to apply the substitution on. name: MHA node name. att_head_output_nodes: list of nodes after attention. params: MHAnode params. Returns: Node after cat and reshape.
model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/multi_head_attention_decomposition.py
_cat_heads_reshape
reuvenperetz/model_optimization
42
python
@staticmethod def _cat_heads_reshape(graph: Graph, name: str, att_head_output_nodes: List[BaseNode], params: MHAParams) -> BaseNode: '\n This method creates the nodes required for concatenating all heads after attention\n\n Args:\n graph: Graph to apply the substitution on.\n name: MHA node name.\n att_head_output_nodes: list of nodes after attention.\n params: MHAnode params.\n\n Returns:\n Node after cat and reshape.\n ' cat_node = FunctionalNode(name=f'{name}_cat', framework_attr={DIM: 1}, input_shape=((params.attn_shape,) * params.num_heads), output_shape=params.attn_cat_shape, weights={}, layer_class=torch.cat, op_call_args=[], op_call_kwargs={DIM: 1}, functional_op=torch.cat, inputs_as_list=True) graph.add_node_with_in_edges(cat_node, att_head_output_nodes) transpose_node = FunctionalNode(name=f'{name}_transpose', framework_attr={}, input_shape=params.attn_cat_shape, output_shape=params.attn_transpose_shape, weights={}, layer_class=torch.transpose, op_call_args=[1, 2], op_call_kwargs={}, functional_op=torch.transpose) graph.add_node_with_in_edges(transpose_node, [cat_node]) attn_reshape_node = FunctionalNode(name=f'{name}_attn_reshape', framework_attr={}, input_shape=params.attn_transpose_shape, output_shape=params.attn_reshape_shape, weights={}, layer_class=torch.reshape, op_call_args=[params.attn_reshape_shape], op_call_kwargs={}, functional_op=torch.reshape) graph.add_node_with_in_edges(attn_reshape_node, [transpose_node]) return attn_reshape_node
@staticmethod def _cat_heads_reshape(graph: Graph, name: str, att_head_output_nodes: List[BaseNode], params: MHAParams) -> BaseNode: '\n This method creates the nodes required for concatenating all heads after attention\n\n Args:\n graph: Graph to apply the substitution on.\n name: MHA node name.\n att_head_output_nodes: list of nodes after attention.\n params: MHAnode params.\n\n Returns:\n Node after cat and reshape.\n ' cat_node = FunctionalNode(name=f'{name}_cat', framework_attr={DIM: 1}, input_shape=((params.attn_shape,) * params.num_heads), output_shape=params.attn_cat_shape, weights={}, layer_class=torch.cat, op_call_args=[], op_call_kwargs={DIM: 1}, functional_op=torch.cat, inputs_as_list=True) graph.add_node_with_in_edges(cat_node, att_head_output_nodes) transpose_node = FunctionalNode(name=f'{name}_transpose', framework_attr={}, input_shape=params.attn_cat_shape, output_shape=params.attn_transpose_shape, weights={}, layer_class=torch.transpose, op_call_args=[1, 2], op_call_kwargs={}, functional_op=torch.transpose) graph.add_node_with_in_edges(transpose_node, [cat_node]) attn_reshape_node = FunctionalNode(name=f'{name}_attn_reshape', framework_attr={}, input_shape=params.attn_transpose_shape, output_shape=params.attn_reshape_shape, weights={}, layer_class=torch.reshape, op_call_args=[params.attn_reshape_shape], op_call_kwargs={}, functional_op=torch.reshape) graph.add_node_with_in_edges(attn_reshape_node, [transpose_node]) return attn_reshape_node<|docstring|>This method creates the nodes required for concatenating all heads after attention Args: graph: Graph to apply the substitution on. name: MHA node name. att_head_output_nodes: list of nodes after attention. params: MHAnode params. Returns: Node after cat and reshape.<|endoftext|>
e596b327b2e9a66dfea4876472dcc13f0d575cb6fd35f201a5044cd32176ce4e
def _project_output(self, graph: Graph, mha_node: BaseNode, attn_reshape_node: BaseNode, params: MHAParams) -> BaseNode: '\n This method creates the nodes required for output projecting\n\n Args:\n graph: Graph to apply the substitution on.\n mha_node: MHA node.\n attn_reshape_node: attention node.\n params: MHAnode params.\n\n Returns:\n Node after projection.\n ' outk = np.expand_dims(mha_node.get_weights_by_keys(OUT_PROJ_WEIGHT).copy(), (- 1)) if (OUT_PROJ_BIAS in mha_node.weights.keys()): outb = mha_node.get_weights_by_keys(OUT_PROJ_BIAS).copy() out_bias = True out_proj_node_weights = {KERNEL: outk, BIAS: outb} else: out_bias = False out_proj_node_weights = {KERNEL: outk} out_name = f'{mha_node.name}_project_out' transpose_node = FunctionalNode(name=(out_name + '_transpose'), framework_attr={}, input_shape=params.attn_reshape_shape, output_shape=params.transpose_out_proj_shape, weights={}, layer_class=torch.transpose, op_call_args=[1, 2], op_call_kwargs={}, functional_op=torch.transpose) graph.add_node_with_in_edges(transpose_node, [attn_reshape_node]) proj_out_node = BaseNode(name=out_name, framework_attr={IN_CHANNELS: params.embed_dim, OUT_CHANNELS: params.embed_dim, KERNEL_SIZE: 1, BIAS: out_bias}, input_shape=params.transpose_out_proj_shape, output_shape=params.transpose_out_proj_shape, weights=out_proj_node_weights, layer_class=nn.Conv1d) graph.add_node_with_in_edges(proj_out_node, [transpose_node]) transpose_out_node = FunctionalNode(name=f'{mha_node.name}_transpose_output', framework_attr={}, input_shape=params.transpose_out_proj_shape, output_shape=params.output_shape, weights={}, layer_class=torch.transpose, op_call_args=[1, 2], op_call_kwargs={}, functional_op=torch.transpose) graph.add_node_with_in_edges(transpose_out_node, [proj_out_node]) return transpose_out_node
This method creates the nodes required for output projecting Args: graph: Graph to apply the substitution on. mha_node: MHA node. attn_reshape_node: attention node. params: MHAnode params. Returns: Node after projection.
model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/multi_head_attention_decomposition.py
_project_output
reuvenperetz/model_optimization
42
python
def _project_output(self, graph: Graph, mha_node: BaseNode, attn_reshape_node: BaseNode, params: MHAParams) -> BaseNode: '\n This method creates the nodes required for output projecting\n\n Args:\n graph: Graph to apply the substitution on.\n mha_node: MHA node.\n attn_reshape_node: attention node.\n params: MHAnode params.\n\n Returns:\n Node after projection.\n ' outk = np.expand_dims(mha_node.get_weights_by_keys(OUT_PROJ_WEIGHT).copy(), (- 1)) if (OUT_PROJ_BIAS in mha_node.weights.keys()): outb = mha_node.get_weights_by_keys(OUT_PROJ_BIAS).copy() out_bias = True out_proj_node_weights = {KERNEL: outk, BIAS: outb} else: out_bias = False out_proj_node_weights = {KERNEL: outk} out_name = f'{mha_node.name}_project_out' transpose_node = FunctionalNode(name=(out_name + '_transpose'), framework_attr={}, input_shape=params.attn_reshape_shape, output_shape=params.transpose_out_proj_shape, weights={}, layer_class=torch.transpose, op_call_args=[1, 2], op_call_kwargs={}, functional_op=torch.transpose) graph.add_node_with_in_edges(transpose_node, [attn_reshape_node]) proj_out_node = BaseNode(name=out_name, framework_attr={IN_CHANNELS: params.embed_dim, OUT_CHANNELS: params.embed_dim, KERNEL_SIZE: 1, BIAS: out_bias}, input_shape=params.transpose_out_proj_shape, output_shape=params.transpose_out_proj_shape, weights=out_proj_node_weights, layer_class=nn.Conv1d) graph.add_node_with_in_edges(proj_out_node, [transpose_node]) transpose_out_node = FunctionalNode(name=f'{mha_node.name}_transpose_output', framework_attr={}, input_shape=params.transpose_out_proj_shape, output_shape=params.output_shape, weights={}, layer_class=torch.transpose, op_call_args=[1, 2], op_call_kwargs={}, functional_op=torch.transpose) graph.add_node_with_in_edges(transpose_out_node, [proj_out_node]) return transpose_out_node
def _project_output(self, graph: Graph, mha_node: BaseNode, attn_reshape_node: BaseNode, params: MHAParams) -> BaseNode: '\n This method creates the nodes required for output projecting\n\n Args:\n graph: Graph to apply the substitution on.\n mha_node: MHA node.\n attn_reshape_node: attention node.\n params: MHAnode params.\n\n Returns:\n Node after projection.\n ' outk = np.expand_dims(mha_node.get_weights_by_keys(OUT_PROJ_WEIGHT).copy(), (- 1)) if (OUT_PROJ_BIAS in mha_node.weights.keys()): outb = mha_node.get_weights_by_keys(OUT_PROJ_BIAS).copy() out_bias = True out_proj_node_weights = {KERNEL: outk, BIAS: outb} else: out_bias = False out_proj_node_weights = {KERNEL: outk} out_name = f'{mha_node.name}_project_out' transpose_node = FunctionalNode(name=(out_name + '_transpose'), framework_attr={}, input_shape=params.attn_reshape_shape, output_shape=params.transpose_out_proj_shape, weights={}, layer_class=torch.transpose, op_call_args=[1, 2], op_call_kwargs={}, functional_op=torch.transpose) graph.add_node_with_in_edges(transpose_node, [attn_reshape_node]) proj_out_node = BaseNode(name=out_name, framework_attr={IN_CHANNELS: params.embed_dim, OUT_CHANNELS: params.embed_dim, KERNEL_SIZE: 1, BIAS: out_bias}, input_shape=params.transpose_out_proj_shape, output_shape=params.transpose_out_proj_shape, weights=out_proj_node_weights, layer_class=nn.Conv1d) graph.add_node_with_in_edges(proj_out_node, [transpose_node]) transpose_out_node = FunctionalNode(name=f'{mha_node.name}_transpose_output', framework_attr={}, input_shape=params.transpose_out_proj_shape, output_shape=params.output_shape, weights={}, layer_class=torch.transpose, op_call_args=[1, 2], op_call_kwargs={}, functional_op=torch.transpose) graph.add_node_with_in_edges(transpose_out_node, [proj_out_node]) return transpose_out_node<|docstring|>This method creates the nodes required for output projecting Args: graph: Graph to apply the substitution on. mha_node: MHA node. attn_reshape_node: attention node. params: MHAnode params. Returns: Node after projection.<|endoftext|>
b3c340f72878e1f1176546040ab8c0d2dfd6a920658bb1abcce9f910e7fd08ad
@staticmethod def _connect_to_graph(graph: Graph, mha_node: BaseNode, q_node: BaseNode, k_node: BaseNode, v_node: BaseNode, output_permute_node: BaseNode): '\n connect subgraph to input graph\n Args:\n graph: input graph\n mha_node: MHA node to substitute inputs and outputs with\n q_node: 1st input to MHA node\n k_node: 2nd input to MHA node\n v_node: 3rd input to MHA node\n output_permute_node: output node of MHA node\n ' (query_in_edge, key_in_edge, value_in_edge) = graph.in_edges(mha_node) graph.add_edge(query_in_edge[0], q_node, **graph.get_edge_data(*query_in_edge, 0)) graph.add_edge(key_in_edge[0], k_node, **graph.get_edge_data(*key_in_edge, 0)) graph.add_edge(value_in_edge[0], v_node, **graph.get_edge_data(*value_in_edge, 0)) graph.remove_edge(query_in_edge[0], mha_node) graph.remove_edge(key_in_edge[0], mha_node) graph.remove_edge(value_in_edge[0], mha_node) graph.reconnect_out_edges(current_node=mha_node, new_node=output_permute_node)
connect subgraph to input graph Args: graph: input graph mha_node: MHA node to substitute inputs and outputs with q_node: 1st input to MHA node k_node: 2nd input to MHA node v_node: 3rd input to MHA node output_permute_node: output node of MHA node
model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/multi_head_attention_decomposition.py
_connect_to_graph
reuvenperetz/model_optimization
42
python
@staticmethod def _connect_to_graph(graph: Graph, mha_node: BaseNode, q_node: BaseNode, k_node: BaseNode, v_node: BaseNode, output_permute_node: BaseNode): '\n connect subgraph to input graph\n Args:\n graph: input graph\n mha_node: MHA node to substitute inputs and outputs with\n q_node: 1st input to MHA node\n k_node: 2nd input to MHA node\n v_node: 3rd input to MHA node\n output_permute_node: output node of MHA node\n ' (query_in_edge, key_in_edge, value_in_edge) = graph.in_edges(mha_node) graph.add_edge(query_in_edge[0], q_node, **graph.get_edge_data(*query_in_edge, 0)) graph.add_edge(key_in_edge[0], k_node, **graph.get_edge_data(*key_in_edge, 0)) graph.add_edge(value_in_edge[0], v_node, **graph.get_edge_data(*value_in_edge, 0)) graph.remove_edge(query_in_edge[0], mha_node) graph.remove_edge(key_in_edge[0], mha_node) graph.remove_edge(value_in_edge[0], mha_node) graph.reconnect_out_edges(current_node=mha_node, new_node=output_permute_node)
@staticmethod def _connect_to_graph(graph: Graph, mha_node: BaseNode, q_node: BaseNode, k_node: BaseNode, v_node: BaseNode, output_permute_node: BaseNode): '\n connect subgraph to input graph\n Args:\n graph: input graph\n mha_node: MHA node to substitute inputs and outputs with\n q_node: 1st input to MHA node\n k_node: 2nd input to MHA node\n v_node: 3rd input to MHA node\n output_permute_node: output node of MHA node\n ' (query_in_edge, key_in_edge, value_in_edge) = graph.in_edges(mha_node) graph.add_edge(query_in_edge[0], q_node, **graph.get_edge_data(*query_in_edge, 0)) graph.add_edge(key_in_edge[0], k_node, **graph.get_edge_data(*key_in_edge, 0)) graph.add_edge(value_in_edge[0], v_node, **graph.get_edge_data(*value_in_edge, 0)) graph.remove_edge(query_in_edge[0], mha_node) graph.remove_edge(key_in_edge[0], mha_node) graph.remove_edge(value_in_edge[0], mha_node) graph.reconnect_out_edges(current_node=mha_node, new_node=output_permute_node)<|docstring|>connect subgraph to input graph Args: graph: input graph mha_node: MHA node to substitute inputs and outputs with q_node: 1st input to MHA node k_node: 2nd input to MHA node v_node: 3rd input to MHA node output_permute_node: output node of MHA node<|endoftext|>
91262c8c70d024e8ca5cad2dae6ab8eea023b474a4ed4e06f8cee9f0b8515d41
def substitute(self, graph: Graph, mha_node: BaseNode) -> Graph: '\n connect subgraph to input graph\n Args:\n graph: input graph\n mha_node: MHA node to substitute inputs and outputs with\n Returns:\n Graph after applying the substitution.\n ' if mha_node.reuse: raise Exception("MCT doesn't support reuse of MultiHeadAttention layer") params = MHAParams(mha_node) (q_transpose_node, k_transpose_node, v_transpose_node, q_node, k_node, v_node) = self._project_input(graph, mha_node, params) (q_fixed_node, k_fixed_node, v_fixed_node) = self._arrange_before_split(graph, mha_node, q_node, k_node, v_node, params) (q_split_node, k_split_node, v_split_node) = self._split_projected(graph, mha_node.name, q_fixed_node, k_fixed_node, v_fixed_node, params) att_head_output_nodes = [] for h in range(params.num_heads): dotv_node = self._calc_attention_head(graph, q_split_node, k_split_node, v_split_node, mha_node, h, params) att_head_output_nodes.append(dotv_node) attn_reshape_node = self._cat_heads_reshape(graph, mha_node.name, att_head_output_nodes, params) proj_output = self._project_output(graph, mha_node, attn_reshape_node, params) self._connect_to_graph(graph, mha_node, q_transpose_node, k_transpose_node, v_transpose_node, proj_output) graph.remove_node(mha_node, new_graph_outputs=[OutTensor(proj_output, 0)]) return graph
connect subgraph to input graph Args: graph: input graph mha_node: MHA node to substitute inputs and outputs with Returns: Graph after applying the substitution.
model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/multi_head_attention_decomposition.py
substitute
reuvenperetz/model_optimization
42
python
def substitute(self, graph: Graph, mha_node: BaseNode) -> Graph: '\n connect subgraph to input graph\n Args:\n graph: input graph\n mha_node: MHA node to substitute inputs and outputs with\n Returns:\n Graph after applying the substitution.\n ' if mha_node.reuse: raise Exception("MCT doesn't support reuse of MultiHeadAttention layer") params = MHAParams(mha_node) (q_transpose_node, k_transpose_node, v_transpose_node, q_node, k_node, v_node) = self._project_input(graph, mha_node, params) (q_fixed_node, k_fixed_node, v_fixed_node) = self._arrange_before_split(graph, mha_node, q_node, k_node, v_node, params) (q_split_node, k_split_node, v_split_node) = self._split_projected(graph, mha_node.name, q_fixed_node, k_fixed_node, v_fixed_node, params) att_head_output_nodes = [] for h in range(params.num_heads): dotv_node = self._calc_attention_head(graph, q_split_node, k_split_node, v_split_node, mha_node, h, params) att_head_output_nodes.append(dotv_node) attn_reshape_node = self._cat_heads_reshape(graph, mha_node.name, att_head_output_nodes, params) proj_output = self._project_output(graph, mha_node, attn_reshape_node, params) self._connect_to_graph(graph, mha_node, q_transpose_node, k_transpose_node, v_transpose_node, proj_output) graph.remove_node(mha_node, new_graph_outputs=[OutTensor(proj_output, 0)]) return graph
def substitute(self, graph: Graph, mha_node: BaseNode) -> Graph: '\n connect subgraph to input graph\n Args:\n graph: input graph\n mha_node: MHA node to substitute inputs and outputs with\n Returns:\n Graph after applying the substitution.\n ' if mha_node.reuse: raise Exception("MCT doesn't support reuse of MultiHeadAttention layer") params = MHAParams(mha_node) (q_transpose_node, k_transpose_node, v_transpose_node, q_node, k_node, v_node) = self._project_input(graph, mha_node, params) (q_fixed_node, k_fixed_node, v_fixed_node) = self._arrange_before_split(graph, mha_node, q_node, k_node, v_node, params) (q_split_node, k_split_node, v_split_node) = self._split_projected(graph, mha_node.name, q_fixed_node, k_fixed_node, v_fixed_node, params) att_head_output_nodes = [] for h in range(params.num_heads): dotv_node = self._calc_attention_head(graph, q_split_node, k_split_node, v_split_node, mha_node, h, params) att_head_output_nodes.append(dotv_node) attn_reshape_node = self._cat_heads_reshape(graph, mha_node.name, att_head_output_nodes, params) proj_output = self._project_output(graph, mha_node, attn_reshape_node, params) self._connect_to_graph(graph, mha_node, q_transpose_node, k_transpose_node, v_transpose_node, proj_output) graph.remove_node(mha_node, new_graph_outputs=[OutTensor(proj_output, 0)]) return graph<|docstring|>connect subgraph to input graph Args: graph: input graph mha_node: MHA node to substitute inputs and outputs with Returns: Graph after applying the substitution.<|endoftext|>
ddb1462a2a8cace7e123b6e2474c09d3dc1a398e4baa93c275d61dcbadda1771
def get_rot_matrix(theta): '\n returns the rotation matrix given a theta value(radians)\n ' return np.asarray([[np.cos(theta), (- np.sin(theta))], [np.sin(theta), np.cos(theta)]])
returns the rotation matrix given a theta value(radians)
envs/gridworld_drone.py
get_rot_matrix
ranok92/deepirl
2
python
def get_rot_matrix(theta): '\n \n ' return np.asarray([[np.cos(theta), (- np.sin(theta))], [np.sin(theta), np.cos(theta)]])
def get_rot_matrix(theta): '\n \n ' return np.asarray([[np.cos(theta), (- np.sin(theta))], [np.sin(theta), np.cos(theta)]])<|docstring|>returns the rotation matrix given a theta value(radians)<|endoftext|>
e7525739d22209ae7bf567cb7ecc34d1dc3ac123ccb9e8e1275d22861aa535d6
def draw_obstacle(self, obs): '\n ped_position : [row, col]\n ' font = pygame.freetype.Font(None, 15) index = (int(obs['id']) % len(self.ped_images)) self.gameDisplay.blit(self.ped_images[index], ((obs['position'][1] - (self.obs_width / 2)), (obs['position'][0] - (self.obs_width / 2)))) font.render_to(self.gameDisplay, (((obs['position'][1] - (self.obs_width / 2)) - 10), ((obs['position'][0] - (self.obs_width / 2)) - 5)), obs['id'], fgcolor=(0, 0, 0)) if self.show_orientation: if (obs['orientation'] is not None): pygame.draw.line(self.gameDisplay, self.black, [obs['position'][1], obs['position'][0]], [(obs['position'][1] + (obs['orientation'][1] * 10)), (obs['position'][0] + (obs['orientation'][0] * 10))], 2)
ped_position : [row, col]
envs/gridworld_drone.py
draw_obstacle
ranok92/deepirl
2
python
def draw_obstacle(self, obs): '\n \n ' font = pygame.freetype.Font(None, 15) index = (int(obs['id']) % len(self.ped_images)) self.gameDisplay.blit(self.ped_images[index], ((obs['position'][1] - (self.obs_width / 2)), (obs['position'][0] - (self.obs_width / 2)))) font.render_to(self.gameDisplay, (((obs['position'][1] - (self.obs_width / 2)) - 10), ((obs['position'][0] - (self.obs_width / 2)) - 5)), obs['id'], fgcolor=(0, 0, 0)) if self.show_orientation: if (obs['orientation'] is not None): pygame.draw.line(self.gameDisplay, self.black, [obs['position'][1], obs['position'][0]], [(obs['position'][1] + (obs['orientation'][1] * 10)), (obs['position'][0] + (obs['orientation'][0] * 10))], 2)
def draw_obstacle(self, obs): '\n \n ' font = pygame.freetype.Font(None, 15) index = (int(obs['id']) % len(self.ped_images)) self.gameDisplay.blit(self.ped_images[index], ((obs['position'][1] - (self.obs_width / 2)), (obs['position'][0] - (self.obs_width / 2)))) font.render_to(self.gameDisplay, (((obs['position'][1] - (self.obs_width / 2)) - 10), ((obs['position'][0] - (self.obs_width / 2)) - 5)), obs['id'], fgcolor=(0, 0, 0)) if self.show_orientation: if (obs['orientation'] is not None): pygame.draw.line(self.gameDisplay, self.black, [obs['position'][1], obs['position'][0]], [(obs['position'][1] + (obs['orientation'][1] * 10)), (obs['position'][0] + (obs['orientation'][0] * 10))], 2)<|docstring|>ped_position : [row, col]<|endoftext|>
2903be17202abd69537513eb2d7f44dcff1a581e6baa96bb03bd69f57792a960
def generate_annotation_list(self): '\n Reads lines from an annotation file and creates a list\n ' if (self.annotation_file is not None): if (not os.path.isfile(self.annotation_file)): print('The annotation file does not exist.') exit() with open(self.annotation_file) as f: for line in f: line = line.strip().split(' ') if (len(line) == 2): print('Overriding the size of the environment from the annotation file.') self.rows = int(line[1]) self.cols = int(line[0]) if (len(line) == 4): self.annotation_list.append(line) else: self.annotation_list = []
Reads lines from an annotation file and creates a list
envs/gridworld_drone.py
generate_annotation_list
ranok92/deepirl
2
python
def generate_annotation_list(self): '\n \n ' if (self.annotation_file is not None): if (not os.path.isfile(self.annotation_file)): print('The annotation file does not exist.') exit() with open(self.annotation_file) as f: for line in f: line = line.strip().split(' ') if (len(line) == 2): print('Overriding the size of the environment from the annotation file.') self.rows = int(line[1]) self.cols = int(line[0]) if (len(line) == 4): self.annotation_list.append(line) else: self.annotation_list = []
def generate_annotation_list(self): '\n \n ' if (self.annotation_file is not None): if (not os.path.isfile(self.annotation_file)): print('The annotation file does not exist.') exit() with open(self.annotation_file) as f: for line in f: line = line.strip().split(' ') if (len(line) == 2): print('Overriding the size of the environment from the annotation file.') self.rows = int(line[1]) self.cols = int(line[0]) if (len(line) == 4): self.annotation_list.append(line) else: self.annotation_list = []<|docstring|>Reads lines from an annotation file and creates a list<|endoftext|>
55d50b9bf5e2d3f8406261f4082e22b70e29ad70cec63cf42e491c0572eeda4f
def generate_pedestrian_dict(self): "\n Unlike the annotation dict, where the frames are the keys and the information is stored\n based on each frame. Here the information is stored based on the pedestrians i.e. each pedestrian\n corresponds to a key in the dictionary and the corresponding to that key is a list consisting of the\n trajectory information of that particular pedestrian\n\n ***THERE SHOULD NOT BE ANY SKIPPING OF FRAMES***\n\n The format of the dictionary:\n\n pedestrian_dict['ped_id']['frame_no']{'pos': numpy, 'orientation': numpy, 'speed': float}\n\n " for entry in self.annotation_list: if (entry[1] not in self.pedestrian_dict.keys()): self.pedestrian_dict[str(entry[1])] = {} self.pedestrian_dict[str(entry[1])]['initial_frame'] = str(entry[0]) self.pedestrian_dict[str(entry[1])]['final_frame'] = str(entry[0]) speed = None orientation = None pos = np.asarray([float(entry[2]), float(entry[3])]) else: pos = np.asarray([float(entry[2]), float(entry[3])]) orientation = (pos - self.pedestrian_dict[str(entry[1])][str((int(entry[0]) - 1))]['position']) speed = np.linalg.norm(orientation) self.pedestrian_dict[str(entry[1])][str(entry[0])] = {} if (int(self.pedestrian_dict[str(entry[1])]['final_frame']) < int(entry[0])): self.pedestrian_dict[str(entry[1])]['final_frame'] = str(entry[0]) "\n the format of the dictionary : ped_dict['ped_id']['frame_id']['pos', 'orientation', 'speed']\n " self.pedestrian_dict[str(entry[1])][str(entry[0])]['position'] = pos self.pedestrian_dict[str(entry[1])][str(entry[0])]['orientation'] = orientation self.pedestrian_dict[str(entry[1])][str(entry[0])]['speed'] = speed self.last_pedestrian = max(list(map(int, list(self.pedestrian_dict.keys()))))
Unlike the annotation dict, where the frames are the keys and the information is stored based on each frame. Here the information is stored based on the pedestrians i.e. each pedestrian corresponds to a key in the dictionary and the corresponding to that key is a list consisting of the trajectory information of that particular pedestrian ***THERE SHOULD NOT BE ANY SKIPPING OF FRAMES*** The format of the dictionary: pedestrian_dict['ped_id']['frame_no']{'pos': numpy, 'orientation': numpy, 'speed': float}
envs/gridworld_drone.py
generate_pedestrian_dict
ranok92/deepirl
2
python
def generate_pedestrian_dict(self): "\n Unlike the annotation dict, where the frames are the keys and the information is stored\n based on each frame. Here the information is stored based on the pedestrians i.e. each pedestrian\n corresponds to a key in the dictionary and the corresponding to that key is a list consisting of the\n trajectory information of that particular pedestrian\n\n ***THERE SHOULD NOT BE ANY SKIPPING OF FRAMES***\n\n The format of the dictionary:\n\n pedestrian_dict['ped_id']['frame_no']{'pos': numpy, 'orientation': numpy, 'speed': float}\n\n " for entry in self.annotation_list: if (entry[1] not in self.pedestrian_dict.keys()): self.pedestrian_dict[str(entry[1])] = {} self.pedestrian_dict[str(entry[1])]['initial_frame'] = str(entry[0]) self.pedestrian_dict[str(entry[1])]['final_frame'] = str(entry[0]) speed = None orientation = None pos = np.asarray([float(entry[2]), float(entry[3])]) else: pos = np.asarray([float(entry[2]), float(entry[3])]) orientation = (pos - self.pedestrian_dict[str(entry[1])][str((int(entry[0]) - 1))]['position']) speed = np.linalg.norm(orientation) self.pedestrian_dict[str(entry[1])][str(entry[0])] = {} if (int(self.pedestrian_dict[str(entry[1])]['final_frame']) < int(entry[0])): self.pedestrian_dict[str(entry[1])]['final_frame'] = str(entry[0]) "\n the format of the dictionary : ped_dict['ped_id']['frame_id']['pos', 'orientation', 'speed']\n " self.pedestrian_dict[str(entry[1])][str(entry[0])]['position'] = pos self.pedestrian_dict[str(entry[1])][str(entry[0])]['orientation'] = orientation self.pedestrian_dict[str(entry[1])][str(entry[0])]['speed'] = speed self.last_pedestrian = max(list(map(int, list(self.pedestrian_dict.keys()))))
def generate_pedestrian_dict(self): "\n Unlike the annotation dict, where the frames are the keys and the information is stored\n based on each frame. Here the information is stored based on the pedestrians i.e. each pedestrian\n corresponds to a key in the dictionary and the corresponding to that key is a list consisting of the\n trajectory information of that particular pedestrian\n\n ***THERE SHOULD NOT BE ANY SKIPPING OF FRAMES***\n\n The format of the dictionary:\n\n pedestrian_dict['ped_id']['frame_no']{'pos': numpy, 'orientation': numpy, 'speed': float}\n\n " for entry in self.annotation_list: if (entry[1] not in self.pedestrian_dict.keys()): self.pedestrian_dict[str(entry[1])] = {} self.pedestrian_dict[str(entry[1])]['initial_frame'] = str(entry[0]) self.pedestrian_dict[str(entry[1])]['final_frame'] = str(entry[0]) speed = None orientation = None pos = np.asarray([float(entry[2]), float(entry[3])]) else: pos = np.asarray([float(entry[2]), float(entry[3])]) orientation = (pos - self.pedestrian_dict[str(entry[1])][str((int(entry[0]) - 1))]['position']) speed = np.linalg.norm(orientation) self.pedestrian_dict[str(entry[1])][str(entry[0])] = {} if (int(self.pedestrian_dict[str(entry[1])]['final_frame']) < int(entry[0])): self.pedestrian_dict[str(entry[1])]['final_frame'] = str(entry[0]) "\n the format of the dictionary : ped_dict['ped_id']['frame_id']['pos', 'orientation', 'speed']\n " self.pedestrian_dict[str(entry[1])][str(entry[0])]['position'] = pos self.pedestrian_dict[str(entry[1])][str(entry[0])]['orientation'] = orientation self.pedestrian_dict[str(entry[1])][str(entry[0])]['speed'] = speed self.last_pedestrian = max(list(map(int, list(self.pedestrian_dict.keys()))))<|docstring|>Unlike the annotation dict, where the frames are the keys and the information is stored based on each frame. Here the information is stored based on the pedestrians i.e. each pedestrian corresponds to a key in the dictionary and the corresponding to that key is a list consisting of the trajectory information of that particular pedestrian ***THERE SHOULD NOT BE ANY SKIPPING OF FRAMES*** The format of the dictionary: pedestrian_dict['ped_id']['frame_no']{'pos': numpy, 'orientation': numpy, 'speed': float}<|endoftext|>
55813f2100740293981bf3a6fc51d9edb5457d2fd0ca67e6764fa5340a971a02
def generate_annotation_dict_universal(self): '\n Reads information from files with the following (general) format\n frame , id, y_coord, x_coord\n ' print('Loading information. . .') subject_final_frame = (- 1) for entry in self.annotation_list: if (self.cur_ped is not None): if (float(entry[1]) == self.subject): if (subject_final_frame < int(entry[0])): subject_final_frame = int(entry[0]) self.goal_state = np.array([float(entry[2]), float(entry[3])]) if (entry[0] not in self.annotation_dict): self.annotation_dict[entry[0]] = [] self.annotation_dict[entry[0]].append(entry) if (self.cur_ped is None): if (self.initial_frame > int(entry[0])): self.initial_frame = int(entry[0]) if (self.final_frame < int(entry[0])): self.final_frame = int(entry[0]) elif (float(entry[1]) == self.cur_ped): if (self.initial_frame > int(entry[0])): self.initial_frame = int(entry[0]) if (self.final_frame < int(entry[0])): self.final_frame = int(entry[0]) print('Done loading information.') print('initial_frame', self.initial_frame) print('final_frame', self.final_frame) print('cellWidth', self.cellWidth)
Reads information from files with the following (general) format frame , id, y_coord, x_coord
envs/gridworld_drone.py
generate_annotation_dict_universal
ranok92/deepirl
2
python
def generate_annotation_dict_universal(self): '\n Reads information from files with the following (general) format\n frame , id, y_coord, x_coord\n ' print('Loading information. . .') subject_final_frame = (- 1) for entry in self.annotation_list: if (self.cur_ped is not None): if (float(entry[1]) == self.subject): if (subject_final_frame < int(entry[0])): subject_final_frame = int(entry[0]) self.goal_state = np.array([float(entry[2]), float(entry[3])]) if (entry[0] not in self.annotation_dict): self.annotation_dict[entry[0]] = [] self.annotation_dict[entry[0]].append(entry) if (self.cur_ped is None): if (self.initial_frame > int(entry[0])): self.initial_frame = int(entry[0]) if (self.final_frame < int(entry[0])): self.final_frame = int(entry[0]) elif (float(entry[1]) == self.cur_ped): if (self.initial_frame > int(entry[0])): self.initial_frame = int(entry[0]) if (self.final_frame < int(entry[0])): self.final_frame = int(entry[0]) print('Done loading information.') print('initial_frame', self.initial_frame) print('final_frame', self.final_frame) print('cellWidth', self.cellWidth)
def generate_annotation_dict_universal(self): '\n Reads information from files with the following (general) format\n frame , id, y_coord, x_coord\n ' print('Loading information. . .') subject_final_frame = (- 1) for entry in self.annotation_list: if (self.cur_ped is not None): if (float(entry[1]) == self.subject): if (subject_final_frame < int(entry[0])): subject_final_frame = int(entry[0]) self.goal_state = np.array([float(entry[2]), float(entry[3])]) if (entry[0] not in self.annotation_dict): self.annotation_dict[entry[0]] = [] self.annotation_dict[entry[0]].append(entry) if (self.cur_ped is None): if (self.initial_frame > int(entry[0])): self.initial_frame = int(entry[0]) if (self.final_frame < int(entry[0])): self.final_frame = int(entry[0]) elif (float(entry[1]) == self.cur_ped): if (self.initial_frame > int(entry[0])): self.initial_frame = int(entry[0]) if (self.final_frame < int(entry[0])): self.final_frame = int(entry[0]) print('Done loading information.') print('initial_frame', self.initial_frame) print('final_frame', self.final_frame) print('cellWidth', self.cellWidth)<|docstring|>Reads information from files with the following (general) format frame , id, y_coord, x_coord<|endoftext|>
c6a4f32e91fc97cbddd993b32380726b0bc6910e104db4f5cfc6f9d9f1add71b
def get_state_from_frame_universal(self, frame_info): '\n For processed datasets\n ' self.obstacles = [] for element in frame_info: if (float(element[1]) not in self.skip_list): obs = self.pedestrian_dict[element[1]][str(self.current_frame)] obs['id'] = element[1] self.obstacles.append(obs) if (not self.external_control): if (float(element[1]) == self.cur_ped): agent = self.pedestrian_dict[element[1]][str(self.current_frame)] self.agent_state = agent self.state['agent_state'] = utils.copy_dict(self.agent_state) ref_vector = np.asarray([(- 1), 0]) if (self.state['agent_state']['orientation'] is not None): self.cur_heading_dir = ((360 + rad_to_deg(total_angle_between(self.state['agent_state']['orientation'], ref_vector))) % 360) else: self.cur_heading_dir = 0 if (float(element[1]) == self.ghost): self.ghost_state = self.pedestrian_dict[element[1]][str(self.current_frame)] self.ghost_state_history.append((copy.deepcopy(self.ghost_state), self.current_frame)) self.state['obstacles'] = self.obstacles
For processed datasets
envs/gridworld_drone.py
get_state_from_frame_universal
ranok92/deepirl
2
python
def get_state_from_frame_universal(self, frame_info): '\n \n ' self.obstacles = [] for element in frame_info: if (float(element[1]) not in self.skip_list): obs = self.pedestrian_dict[element[1]][str(self.current_frame)] obs['id'] = element[1] self.obstacles.append(obs) if (not self.external_control): if (float(element[1]) == self.cur_ped): agent = self.pedestrian_dict[element[1]][str(self.current_frame)] self.agent_state = agent self.state['agent_state'] = utils.copy_dict(self.agent_state) ref_vector = np.asarray([(- 1), 0]) if (self.state['agent_state']['orientation'] is not None): self.cur_heading_dir = ((360 + rad_to_deg(total_angle_between(self.state['agent_state']['orientation'], ref_vector))) % 360) else: self.cur_heading_dir = 0 if (float(element[1]) == self.ghost): self.ghost_state = self.pedestrian_dict[element[1]][str(self.current_frame)] self.ghost_state_history.append((copy.deepcopy(self.ghost_state), self.current_frame)) self.state['obstacles'] = self.obstacles
def get_state_from_frame_universal(self, frame_info): '\n \n ' self.obstacles = [] for element in frame_info: if (float(element[1]) not in self.skip_list): obs = self.pedestrian_dict[element[1]][str(self.current_frame)] obs['id'] = element[1] self.obstacles.append(obs) if (not self.external_control): if (float(element[1]) == self.cur_ped): agent = self.pedestrian_dict[element[1]][str(self.current_frame)] self.agent_state = agent self.state['agent_state'] = utils.copy_dict(self.agent_state) ref_vector = np.asarray([(- 1), 0]) if (self.state['agent_state']['orientation'] is not None): self.cur_heading_dir = ((360 + rad_to_deg(total_angle_between(self.state['agent_state']['orientation'], ref_vector))) % 360) else: self.cur_heading_dir = 0 if (float(element[1]) == self.ghost): self.ghost_state = self.pedestrian_dict[element[1]][str(self.current_frame)] self.ghost_state_history.append((copy.deepcopy(self.ghost_state), self.current_frame)) self.state['obstacles'] = self.obstacles<|docstring|>For processed datasets<|endoftext|>