body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
|---|---|---|---|---|---|---|---|
def make_catalog_db(catalogitems):
'Takes an array of catalog items and builds some indexes so we can\n get our common data faster. Returns a dict we can use like a database'
name_table = {}
pkgid_table = {}
itemindex = (- 1)
for item in catalogitems:
itemindex = (itemindex + 1)
name = item.get('name', 'NO NAME')
vers = item.get('version', 'NO VERSION')
if ((name == 'NO NAME') or (vers == 'NO VERSION')):
display.display_warning('Bad pkginfo: %s', item)
vers = pkgutils.trim_version_string(vers)
if (not (name in name_table)):
name_table[name] = {}
if (not (vers in name_table[name])):
name_table[name][vers] = []
name_table[name][vers].append(itemindex)
for receipt in item.get('receipts', []):
if (('packageid' in receipt) and ('version' in receipt)):
pkg_id = receipt['packageid']
version = receipt['version']
if (not (pkg_id in pkgid_table)):
pkgid_table[pkg_id] = {}
if (not (version in pkgid_table[pkg_id])):
pkgid_table[pkg_id][version] = []
pkgid_table[pkg_id][version].append(itemindex)
updaters = [item for item in catalogitems if item.get('update_for')]
for update in updaters:
if is_a_string(update['update_for']):
update['update_for'] = [update['update_for']]
autoremoveitems = [item.get('name') for item in catalogitems if item.get('autoremove')]
autoremoveitems = list(set(autoremoveitems))
pkgdb = {}
pkgdb['named'] = name_table
pkgdb['receipts'] = pkgid_table
pkgdb['updaters'] = updaters
pkgdb['autoremoveitems'] = autoremoveitems
pkgdb['items'] = catalogitems
return pkgdb
| 4,497,944,335,075,595,300
|
Takes an array of catalog items and builds some indexes so we can
get our common data faster. Returns a dict we can use like a database
|
code/client/munkilib/updatecheck/catalogs.py
|
make_catalog_db
|
Artoria2e5/munki
|
python
|
def make_catalog_db(catalogitems):
'Takes an array of catalog items and builds some indexes so we can\n get our common data faster. Returns a dict we can use like a database'
name_table = {}
pkgid_table = {}
itemindex = (- 1)
for item in catalogitems:
itemindex = (itemindex + 1)
name = item.get('name', 'NO NAME')
vers = item.get('version', 'NO VERSION')
if ((name == 'NO NAME') or (vers == 'NO VERSION')):
display.display_warning('Bad pkginfo: %s', item)
vers = pkgutils.trim_version_string(vers)
if (not (name in name_table)):
name_table[name] = {}
if (not (vers in name_table[name])):
name_table[name][vers] = []
name_table[name][vers].append(itemindex)
for receipt in item.get('receipts', []):
if (('packageid' in receipt) and ('version' in receipt)):
pkg_id = receipt['packageid']
version = receipt['version']
if (not (pkg_id in pkgid_table)):
pkgid_table[pkg_id] = {}
if (not (version in pkgid_table[pkg_id])):
pkgid_table[pkg_id][version] = []
pkgid_table[pkg_id][version].append(itemindex)
updaters = [item for item in catalogitems if item.get('update_for')]
for update in updaters:
if is_a_string(update['update_for']):
update['update_for'] = [update['update_for']]
autoremoveitems = [item.get('name') for item in catalogitems if item.get('autoremove')]
autoremoveitems = list(set(autoremoveitems))
pkgdb = {}
pkgdb['named'] = name_table
pkgdb['receipts'] = pkgid_table
pkgdb['updaters'] = updaters
pkgdb['autoremoveitems'] = autoremoveitems
pkgdb['items'] = catalogitems
return pkgdb
|
def add_package_ids(catalogitems, itemname_to_pkgid, pkgid_to_itemname):
'Adds packageids from each catalogitem to two dictionaries.\n One maps itemnames to receipt pkgids, the other maps receipt pkgids\n to itemnames'
for item in catalogitems:
name = item.get('name')
if (not name):
continue
if item.get('receipts'):
if (not (name in itemname_to_pkgid)):
itemname_to_pkgid[name] = {}
for receipt in item['receipts']:
if (('packageid' in receipt) and ('version' in receipt)):
pkgid = receipt['packageid']
vers = receipt['version']
if (not (pkgid in itemname_to_pkgid[name])):
itemname_to_pkgid[name][pkgid] = []
if (not (vers in itemname_to_pkgid[name][pkgid])):
itemname_to_pkgid[name][pkgid].append(vers)
if (not (pkgid in pkgid_to_itemname)):
pkgid_to_itemname[pkgid] = {}
if (not (name in pkgid_to_itemname[pkgid])):
pkgid_to_itemname[pkgid][name] = []
if (not (vers in pkgid_to_itemname[pkgid][name])):
pkgid_to_itemname[pkgid][name].append(vers)
| -4,838,195,185,309,404,000
|
Adds packageids from each catalogitem to two dictionaries.
One maps itemnames to receipt pkgids, the other maps receipt pkgids
to itemnames
|
code/client/munkilib/updatecheck/catalogs.py
|
add_package_ids
|
Artoria2e5/munki
|
python
|
def add_package_ids(catalogitems, itemname_to_pkgid, pkgid_to_itemname):
'Adds packageids from each catalogitem to two dictionaries.\n One maps itemnames to receipt pkgids, the other maps receipt pkgids\n to itemnames'
for item in catalogitems:
name = item.get('name')
if (not name):
continue
if item.get('receipts'):
if (not (name in itemname_to_pkgid)):
itemname_to_pkgid[name] = {}
for receipt in item['receipts']:
if (('packageid' in receipt) and ('version' in receipt)):
pkgid = receipt['packageid']
vers = receipt['version']
if (not (pkgid in itemname_to_pkgid[name])):
itemname_to_pkgid[name][pkgid] = []
if (not (vers in itemname_to_pkgid[name][pkgid])):
itemname_to_pkgid[name][pkgid].append(vers)
if (not (pkgid in pkgid_to_itemname)):
pkgid_to_itemname[pkgid] = {}
if (not (name in pkgid_to_itemname[pkgid])):
pkgid_to_itemname[pkgid][name] = []
if (not (vers in pkgid_to_itemname[pkgid][name])):
pkgid_to_itemname[pkgid][name].append(vers)
|
def split_name_and_version(some_string):
"Splits a string into the name and version number.\n\n Name and version must be separated with a hyphen ('-')\n or double hyphen ('--').\n 'TextWrangler-2.3b1' becomes ('TextWrangler', '2.3b1')\n 'AdobePhotoshopCS3--11.2.1' becomes ('AdobePhotoshopCS3', '11.2.1')\n 'MicrosoftOffice2008-12.2.1' becomes ('MicrosoftOffice2008', '12.2.1')\n "
for delim in ('--', '-'):
if (some_string.count(delim) > 0):
chunks = some_string.split(delim)
vers = chunks.pop()
name = delim.join(chunks)
if (vers[0] in '0123456789'):
return (name, vers)
return (some_string, '')
| -8,237,331,361,948,013,000
|
Splits a string into the name and version number.
Name and version must be separated with a hyphen ('-')
or double hyphen ('--').
'TextWrangler-2.3b1' becomes ('TextWrangler', '2.3b1')
'AdobePhotoshopCS3--11.2.1' becomes ('AdobePhotoshopCS3', '11.2.1')
'MicrosoftOffice2008-12.2.1' becomes ('MicrosoftOffice2008', '12.2.1')
|
code/client/munkilib/updatecheck/catalogs.py
|
split_name_and_version
|
Artoria2e5/munki
|
python
|
def split_name_and_version(some_string):
"Splits a string into the name and version number.\n\n Name and version must be separated with a hyphen ('-')\n or double hyphen ('--').\n 'TextWrangler-2.3b1' becomes ('TextWrangler', '2.3b1')\n 'AdobePhotoshopCS3--11.2.1' becomes ('AdobePhotoshopCS3', '11.2.1')\n 'MicrosoftOffice2008-12.2.1' becomes ('MicrosoftOffice2008', '12.2.1')\n "
for delim in ('--', '-'):
if (some_string.count(delim) > 0):
chunks = some_string.split(delim)
vers = chunks.pop()
name = delim.join(chunks)
if (vers[0] in '0123456789'):
return (name, vers)
return (some_string, )
|
def get_all_items_with_name(name, cataloglist):
'Searches the catalogs in a list for all items matching a given name.\n\n Returns:\n list of pkginfo items; sorted with newest version first. No precedence\n is given to catalog order.\n '
def item_version(item):
'Returns a MunkiLooseVersion for pkginfo item'
return pkgutils.MunkiLooseVersion(item['version'])
itemlist = []
name = split_name_and_version(name)[0]
display.display_debug1('Looking for all items matching: %s...', name)
for catalogname in cataloglist:
if (not (catalogname in list(_CATALOG.keys()))):
continue
if (name in _CATALOG[catalogname]['named']):
versionsmatchingname = _CATALOG[catalogname]['named'][name]
for vers in versionsmatchingname:
if (vers == 'latest'):
continue
indexlist = _CATALOG[catalogname]['named'][name][vers]
for index in indexlist:
thisitem = _CATALOG[catalogname]['items'][index]
if (not (thisitem in itemlist)):
display.display_debug1('Adding item %s, version %s from catalog %s...', name, thisitem['version'], catalogname)
itemlist.append(thisitem)
if itemlist:
itemlist.sort(key=item_version, reverse=True)
return itemlist
| 6,239,491,405,437,279,000
|
Searches the catalogs in a list for all items matching a given name.
Returns:
list of pkginfo items; sorted with newest version first. No precedence
is given to catalog order.
|
code/client/munkilib/updatecheck/catalogs.py
|
get_all_items_with_name
|
Artoria2e5/munki
|
python
|
def get_all_items_with_name(name, cataloglist):
'Searches the catalogs in a list for all items matching a given name.\n\n Returns:\n list of pkginfo items; sorted with newest version first. No precedence\n is given to catalog order.\n '
def item_version(item):
'Returns a MunkiLooseVersion for pkginfo item'
return pkgutils.MunkiLooseVersion(item['version'])
itemlist = []
name = split_name_and_version(name)[0]
display.display_debug1('Looking for all items matching: %s...', name)
for catalogname in cataloglist:
if (not (catalogname in list(_CATALOG.keys()))):
continue
if (name in _CATALOG[catalogname]['named']):
versionsmatchingname = _CATALOG[catalogname]['named'][name]
for vers in versionsmatchingname:
if (vers == 'latest'):
continue
indexlist = _CATALOG[catalogname]['named'][name][vers]
for index in indexlist:
thisitem = _CATALOG[catalogname]['items'][index]
if (not (thisitem in itemlist)):
display.display_debug1('Adding item %s, version %s from catalog %s...', name, thisitem['version'], catalogname)
itemlist.append(thisitem)
if itemlist:
itemlist.sort(key=item_version, reverse=True)
return itemlist
|
def get_auto_removal_items(installinfo, cataloglist):
'Gets a list of items marked for automatic removal from the catalogs\n in cataloglist. Filters those against items in the processed_installs\n list, which should contain everything that is supposed to be installed.\n Then filters against the removals list, which contains all the removals\n that have already been processed.\n '
autoremovalnames = []
for catalogname in (cataloglist or []):
if (catalogname in list(_CATALOG.keys())):
autoremovalnames += _CATALOG[catalogname]['autoremoveitems']
processed_installs_names = [split_name_and_version(item)[0] for item in installinfo['processed_installs']]
autoremovalnames = [item for item in autoremovalnames if ((item not in processed_installs_names) and (item not in installinfo['processed_uninstalls']))]
return autoremovalnames
| 4,459,911,686,786,282,500
|
Gets a list of items marked for automatic removal from the catalogs
in cataloglist. Filters those against items in the processed_installs
list, which should contain everything that is supposed to be installed.
Then filters against the removals list, which contains all the removals
that have already been processed.
|
code/client/munkilib/updatecheck/catalogs.py
|
get_auto_removal_items
|
Artoria2e5/munki
|
python
|
def get_auto_removal_items(installinfo, cataloglist):
'Gets a list of items marked for automatic removal from the catalogs\n in cataloglist. Filters those against items in the processed_installs\n list, which should contain everything that is supposed to be installed.\n Then filters against the removals list, which contains all the removals\n that have already been processed.\n '
autoremovalnames = []
for catalogname in (cataloglist or []):
if (catalogname in list(_CATALOG.keys())):
autoremovalnames += _CATALOG[catalogname]['autoremoveitems']
processed_installs_names = [split_name_and_version(item)[0] for item in installinfo['processed_installs']]
autoremovalnames = [item for item in autoremovalnames if ((item not in processed_installs_names) and (item not in installinfo['processed_uninstalls']))]
return autoremovalnames
|
def look_for_updates(itemname, cataloglist):
"Looks for updates for a given manifest item that is either\n installed or scheduled to be installed or removed. This handles not only\n specific application updates, but also updates that aren't simply\n later versions of the manifest item.\n For example, AdobeCameraRaw is an update for Adobe Photoshop, but\n doesn't update the version of Adobe Photoshop.\n Returns a list of manifestitem names that are updates for\n manifestitem.\n "
display.display_debug1('Looking for updates for: %s', itemname)
update_list = []
for catalogname in cataloglist:
if (catalogname not in _CATALOG):
continue
updaters = _CATALOG[catalogname]['updaters']
update_items = [catalogitem['name'] for catalogitem in updaters if (itemname in catalogitem.get('update_for', []))]
if update_items:
update_list.extend(update_items)
update_list = list(set(update_list))
if update_list:
num_updates = len(update_list)
update_list_display = ', '.join((str(x) for x in update_list))
display.display_debug1('Found %s update(s): %s', num_updates, update_list_display)
return update_list
| 6,435,376,517,397,081,000
|
Looks for updates for a given manifest item that is either
installed or scheduled to be installed or removed. This handles not only
specific application updates, but also updates that aren't simply
later versions of the manifest item.
For example, AdobeCameraRaw is an update for Adobe Photoshop, but
doesn't update the version of Adobe Photoshop.
Returns a list of manifestitem names that are updates for
manifestitem.
|
code/client/munkilib/updatecheck/catalogs.py
|
look_for_updates
|
Artoria2e5/munki
|
python
|
def look_for_updates(itemname, cataloglist):
"Looks for updates for a given manifest item that is either\n installed or scheduled to be installed or removed. This handles not only\n specific application updates, but also updates that aren't simply\n later versions of the manifest item.\n For example, AdobeCameraRaw is an update for Adobe Photoshop, but\n doesn't update the version of Adobe Photoshop.\n Returns a list of manifestitem names that are updates for\n manifestitem.\n "
display.display_debug1('Looking for updates for: %s', itemname)
update_list = []
for catalogname in cataloglist:
if (catalogname not in _CATALOG):
continue
updaters = _CATALOG[catalogname]['updaters']
update_items = [catalogitem['name'] for catalogitem in updaters if (itemname in catalogitem.get('update_for', []))]
if update_items:
update_list.extend(update_items)
update_list = list(set(update_list))
if update_list:
num_updates = len(update_list)
update_list_display = ', '.join((str(x) for x in update_list))
display.display_debug1('Found %s update(s): %s', num_updates, update_list_display)
return update_list
|
def look_for_updates_for_version(itemname, itemversion, cataloglist):
'Looks for updates for a specific version of an item. Since these\n can appear in manifests and pkginfo as item-version or item--version\n we have to search twice.'
name_and_version = ('%s-%s' % (itemname, itemversion))
alt_name_and_version = ('%s--%s' % (itemname, itemversion))
update_list = look_for_updates(name_and_version, cataloglist)
update_list.extend(look_for_updates(alt_name_and_version, cataloglist))
update_list = list(set(update_list))
return update_list
| 7,019,348,813,548,713,000
|
Looks for updates for a specific version of an item. Since these
can appear in manifests and pkginfo as item-version or item--version
we have to search twice.
|
code/client/munkilib/updatecheck/catalogs.py
|
look_for_updates_for_version
|
Artoria2e5/munki
|
python
|
def look_for_updates_for_version(itemname, itemversion, cataloglist):
'Looks for updates for a specific version of an item. Since these\n can appear in manifests and pkginfo as item-version or item--version\n we have to search twice.'
name_and_version = ('%s-%s' % (itemname, itemversion))
alt_name_and_version = ('%s--%s' % (itemname, itemversion))
update_list = look_for_updates(name_and_version, cataloglist)
update_list.extend(look_for_updates(alt_name_and_version, cataloglist))
update_list = list(set(update_list))
return update_list
|
def best_version_match(vers_num, item_dict):
'Attempts to find the best match in item_dict for vers_num'
vers_tuple = vers_num.split('.')
precision = 1
while (precision <= len(vers_tuple)):
test_vers = '.'.join(vers_tuple[0:precision])
match_names = []
for item in item_dict.keys():
for item_version in item_dict[item]:
if (item_version.startswith(test_vers) and (item not in match_names)):
match_names.append(item)
if (len(match_names) == 1):
return match_names[0]
precision = (precision + 1)
return None
| -552,078,823,119,619,400
|
Attempts to find the best match in item_dict for vers_num
|
code/client/munkilib/updatecheck/catalogs.py
|
best_version_match
|
Artoria2e5/munki
|
python
|
def best_version_match(vers_num, item_dict):
vers_tuple = vers_num.split('.')
precision = 1
while (precision <= len(vers_tuple)):
test_vers = '.'.join(vers_tuple[0:precision])
match_names = []
for item in item_dict.keys():
for item_version in item_dict[item]:
if (item_version.startswith(test_vers) and (item not in match_names)):
match_names.append(item)
if (len(match_names) == 1):
return match_names[0]
precision = (precision + 1)
return None
|
@utils.Memoize
def analyze_installed_pkgs():
'Analyze catalog data and installed packages in an attempt to determine\n what is installed.'
pkgdata = {}
itemname_to_pkgid = {}
pkgid_to_itemname = {}
for catalogname in _CATALOG:
catalogitems = _CATALOG[catalogname]['items']
add_package_ids(catalogitems, itemname_to_pkgid, pkgid_to_itemname)
installedpkgs = pkgutils.getInstalledPackages()
installed = []
partiallyinstalled = []
installedpkgsmatchedtoname = {}
for name in itemname_to_pkgid:
foundpkgcount = 0
for pkgid in itemname_to_pkgid[name]:
if (pkgid in installedpkgs):
foundpkgcount += 1
if (not (name in installedpkgsmatchedtoname)):
installedpkgsmatchedtoname[name] = []
installedpkgsmatchedtoname[name].append(pkgid)
if (foundpkgcount > 0):
if (foundpkgcount == len(itemname_to_pkgid[name])):
installed.append(name)
else:
partiallyinstalled.append(name)
for name in partiallyinstalled:
pkgsforthisname = installedpkgsmatchedtoname[name]
allotherpkgs = []
for othername in installed:
allotherpkgs.extend(installedpkgsmatchedtoname[othername])
for othername in partiallyinstalled:
if (othername != name):
allotherpkgs.extend(installedpkgsmatchedtoname[othername])
uniquepkgs = list((set(pkgsforthisname) - set(allotherpkgs)))
if uniquepkgs:
installed.append(name)
partiallyinstalled = [item for item in partiallyinstalled if (item not in installed)]
references = {}
for name in installed:
for pkgid in installedpkgsmatchedtoname[name]:
if (not (pkgid in references)):
references[pkgid] = []
references[pkgid].append(name)
orphans = [pkgid for pkgid in installedpkgs if (pkgid not in references)]
matched_orphans = []
for pkgid in orphans:
if (pkgid in pkgid_to_itemname):
installed_pkgid_version = installedpkgs[pkgid]
possible_match_items = pkgid_to_itemname[pkgid]
best_match = best_version_match(installed_pkgid_version, possible_match_items)
if best_match:
matched_orphans.append(best_match)
for name in matched_orphans:
if (name not in installed):
installed.append(name)
if (name in partiallyinstalled):
partiallyinstalled.remove(name)
for pkgid in installedpkgsmatchedtoname[name]:
if (not (pkgid in references)):
references[pkgid] = []
if (not (name in references[pkgid])):
references[pkgid].append(name)
pkgdata['receipts_for_name'] = installedpkgsmatchedtoname
pkgdata['installed_names'] = installed
pkgdata['pkg_references'] = references
return pkgdata
| -5,312,295,383,452,086,000
|
Analyze catalog data and installed packages in an attempt to determine
what is installed.
|
code/client/munkilib/updatecheck/catalogs.py
|
analyze_installed_pkgs
|
Artoria2e5/munki
|
python
|
@utils.Memoize
def analyze_installed_pkgs():
'Analyze catalog data and installed packages in an attempt to determine\n what is installed.'
pkgdata = {}
itemname_to_pkgid = {}
pkgid_to_itemname = {}
for catalogname in _CATALOG:
catalogitems = _CATALOG[catalogname]['items']
add_package_ids(catalogitems, itemname_to_pkgid, pkgid_to_itemname)
installedpkgs = pkgutils.getInstalledPackages()
installed = []
partiallyinstalled = []
installedpkgsmatchedtoname = {}
for name in itemname_to_pkgid:
foundpkgcount = 0
for pkgid in itemname_to_pkgid[name]:
if (pkgid in installedpkgs):
foundpkgcount += 1
if (not (name in installedpkgsmatchedtoname)):
installedpkgsmatchedtoname[name] = []
installedpkgsmatchedtoname[name].append(pkgid)
if (foundpkgcount > 0):
if (foundpkgcount == len(itemname_to_pkgid[name])):
installed.append(name)
else:
partiallyinstalled.append(name)
for name in partiallyinstalled:
pkgsforthisname = installedpkgsmatchedtoname[name]
allotherpkgs = []
for othername in installed:
allotherpkgs.extend(installedpkgsmatchedtoname[othername])
for othername in partiallyinstalled:
if (othername != name):
allotherpkgs.extend(installedpkgsmatchedtoname[othername])
uniquepkgs = list((set(pkgsforthisname) - set(allotherpkgs)))
if uniquepkgs:
installed.append(name)
partiallyinstalled = [item for item in partiallyinstalled if (item not in installed)]
references = {}
for name in installed:
for pkgid in installedpkgsmatchedtoname[name]:
if (not (pkgid in references)):
references[pkgid] = []
references[pkgid].append(name)
orphans = [pkgid for pkgid in installedpkgs if (pkgid not in references)]
matched_orphans = []
for pkgid in orphans:
if (pkgid in pkgid_to_itemname):
installed_pkgid_version = installedpkgs[pkgid]
possible_match_items = pkgid_to_itemname[pkgid]
best_match = best_version_match(installed_pkgid_version, possible_match_items)
if best_match:
matched_orphans.append(best_match)
for name in matched_orphans:
if (name not in installed):
installed.append(name)
if (name in partiallyinstalled):
partiallyinstalled.remove(name)
for pkgid in installedpkgsmatchedtoname[name]:
if (not (pkgid in references)):
references[pkgid] = []
if (not (name in references[pkgid])):
references[pkgid].append(name)
pkgdata['receipts_for_name'] = installedpkgsmatchedtoname
pkgdata['installed_names'] = installed
pkgdata['pkg_references'] = references
return pkgdata
|
def get_item_detail(name, cataloglist, vers='', skip_min_os_check=False, suppress_warnings=False):
"Searches the catalogs in list for an item matching the given name that\n can be installed on the current hardware/OS (optionally skipping the\n minimum OS check so we can return an item that requires a higher OS)\n\n If no version is supplied, but the version is appended to the name\n ('TextWrangler--2.3.0.0.0') that version is used.\n If no version is given at all, the latest version is assumed.\n Returns a pkginfo item, or None.\n "
rejected_items = []
machine = info.getMachineFacts()
def munki_version_ok(item):
'Returns a boolean to indicate if the current Munki version is high\n enough to install this item. If not, also adds the failure reason to\n the rejected_items list.'
if item.get('minimum_munki_version'):
min_munki_vers = item['minimum_munki_version']
display.display_debug1('Considering item %s, version %s with minimum Munki version required %s', item['name'], item['version'], min_munki_vers)
display.display_debug1('Our Munki version is %s', machine['munki_version'])
if (pkgutils.MunkiLooseVersion(machine['munki_version']) < pkgutils.MunkiLooseVersion(min_munki_vers)):
reason = ('Rejected item %s, version %s with minimum Munki version required %s. Our Munki version is %s.' % (item['name'], item['version'], item['minimum_munki_version'], machine['munki_version']))
rejected_items.append(reason)
return False
return True
def os_version_ok(item, skip_min_os_check=False):
'Returns a boolean to indicate if the item is ok to install under\n the current OS. If not, also adds the failure reason to the\n rejected_items list. If skip_min_os_check is True, skips the minimum os\n version check.'
if (item.get('minimum_os_version') and (not skip_min_os_check)):
min_os_vers = item['minimum_os_version']
display.display_debug1('Considering item %s, version %s with minimum os version required %s', item['name'], item['version'], min_os_vers)
display.display_debug1('Our OS version is %s', machine['os_vers'])
if (pkgutils.MunkiLooseVersion(machine['os_vers']) < pkgutils.MunkiLooseVersion(min_os_vers)):
reason = ('Rejected item %s, version %s with minimum os version required %s. Our OS version is %s.' % (item['name'], item['version'], item['minimum_os_version'], machine['os_vers']))
rejected_items.append(reason)
return False
if item.get('maximum_os_version'):
max_os_vers = item['maximum_os_version']
display.display_debug1('Considering item %s, version %s with maximum os version supported %s', item['name'], item['version'], max_os_vers)
display.display_debug1('Our OS version is %s', machine['os_vers'])
if (pkgutils.MunkiLooseVersion(machine['os_vers']) > pkgutils.MunkiLooseVersion(max_os_vers)):
reason = ('Rejected item %s, version %s with maximum os version required %s. Our OS version is %s.' % (item['name'], item['version'], item['maximum_os_version'], machine['os_vers']))
rejected_items.append(reason)
return False
return True
def cpu_arch_ok(item):
'Returns a boolean to indicate if the item is ok to install under\n the current CPU architecture. If not, also adds the failure reason to\n the rejected_items list.'
if item.get('supported_architectures'):
display.display_debug1('Considering item %s, version %s with supported architectures: %s', item['name'], item['version'], item['supported_architectures'])
display.display_debug1('Our architecture is %s', machine['arch'])
if (machine['arch'] in item['supported_architectures']):
return True
if (('x86_64' in item['supported_architectures']) and (machine['arch'] == 'i386') and (machine['x86_64_capable'] is True)):
return True
reason = ('Rejected item %s, version %s with supported architectures: %s. Our architecture is %s.' % (item['name'], item['version'], item['supported_architectures'], machine['arch']))
rejected_items.append(reason)
return False
return True
def installable_condition_ok(item):
'Returns a boolean to indicate if an installable_condition predicate\n in the current item passes. If not, also adds the failure reason to\n the rejected_items list.'
if item.get('installable_condition'):
if (not info.predicate_evaluates_as_true(item['installable_condition'])):
rejected_items.append(('Rejected item %s, version %s with installable_condition: %s.' % (item['name'], item['version'], item['installable_condition'])))
return False
return True
if (vers == 'apple_update_metadata'):
vers = 'latest'
else:
(name, includedversion) = split_name_and_version(name)
if (includedversion and (vers == '')):
vers = includedversion
if vers:
vers = pkgutils.trim_version_string(vers)
else:
vers = 'latest'
if skip_min_os_check:
display.display_debug1('Looking for detail for: %s, version %s, ignoring minimum_os_version...', name, vers)
else:
display.display_debug1('Looking for detail for: %s, version %s...', name, vers)
for catalogname in cataloglist:
if ((catalogname in _CATALOG) and (name in _CATALOG[catalogname]['named'])):
itemsmatchingname = _CATALOG[catalogname]['named'][name]
indexlist = []
if (vers == 'latest'):
versionlist = list(itemsmatchingname.keys())
versionlist.sort(key=pkgutils.MunkiLooseVersion, reverse=True)
for versionkey in versionlist:
indexlist.extend(itemsmatchingname[versionkey])
elif (vers in list(itemsmatchingname.keys())):
indexlist = itemsmatchingname[vers]
if indexlist:
display.display_debug1(('Considering %s items with name %s from catalog %s' % (len(indexlist), name, catalogname)))
for index in indexlist:
item = _CATALOG[catalogname]['items'][index]
if (munki_version_ok(item) and os_version_ok(item, skip_min_os_check=skip_min_os_check) and cpu_arch_ok(item) and installable_condition_ok(item)):
display.display_debug1('Found %s, version %s in catalog %s', item['name'], item['version'], catalogname)
return item
display.display_debug1('Not found')
for reason in rejected_items:
if suppress_warnings:
display.display_debug1(reason)
else:
display.display_warning(reason)
return None
| -1,328,711,189,519,149,000
|
Searches the catalogs in list for an item matching the given name that
can be installed on the current hardware/OS (optionally skipping the
minimum OS check so we can return an item that requires a higher OS)
If no version is supplied, but the version is appended to the name
('TextWrangler--2.3.0.0.0') that version is used.
If no version is given at all, the latest version is assumed.
Returns a pkginfo item, or None.
|
code/client/munkilib/updatecheck/catalogs.py
|
get_item_detail
|
Artoria2e5/munki
|
python
|
def get_item_detail(name, cataloglist, vers=, skip_min_os_check=False, suppress_warnings=False):
"Searches the catalogs in list for an item matching the given name that\n can be installed on the current hardware/OS (optionally skipping the\n minimum OS check so we can return an item that requires a higher OS)\n\n If no version is supplied, but the version is appended to the name\n ('TextWrangler--2.3.0.0.0') that version is used.\n If no version is given at all, the latest version is assumed.\n Returns a pkginfo item, or None.\n "
rejected_items = []
machine = info.getMachineFacts()
def munki_version_ok(item):
'Returns a boolean to indicate if the current Munki version is high\n enough to install this item. If not, also adds the failure reason to\n the rejected_items list.'
if item.get('minimum_munki_version'):
min_munki_vers = item['minimum_munki_version']
display.display_debug1('Considering item %s, version %s with minimum Munki version required %s', item['name'], item['version'], min_munki_vers)
display.display_debug1('Our Munki version is %s', machine['munki_version'])
if (pkgutils.MunkiLooseVersion(machine['munki_version']) < pkgutils.MunkiLooseVersion(min_munki_vers)):
reason = ('Rejected item %s, version %s with minimum Munki version required %s. Our Munki version is %s.' % (item['name'], item['version'], item['minimum_munki_version'], machine['munki_version']))
rejected_items.append(reason)
return False
return True
def os_version_ok(item, skip_min_os_check=False):
'Returns a boolean to indicate if the item is ok to install under\n the current OS. If not, also adds the failure reason to the\n rejected_items list. If skip_min_os_check is True, skips the minimum os\n version check.'
if (item.get('minimum_os_version') and (not skip_min_os_check)):
min_os_vers = item['minimum_os_version']
display.display_debug1('Considering item %s, version %s with minimum os version required %s', item['name'], item['version'], min_os_vers)
display.display_debug1('Our OS version is %s', machine['os_vers'])
if (pkgutils.MunkiLooseVersion(machine['os_vers']) < pkgutils.MunkiLooseVersion(min_os_vers)):
reason = ('Rejected item %s, version %s with minimum os version required %s. Our OS version is %s.' % (item['name'], item['version'], item['minimum_os_version'], machine['os_vers']))
rejected_items.append(reason)
return False
if item.get('maximum_os_version'):
max_os_vers = item['maximum_os_version']
display.display_debug1('Considering item %s, version %s with maximum os version supported %s', item['name'], item['version'], max_os_vers)
display.display_debug1('Our OS version is %s', machine['os_vers'])
if (pkgutils.MunkiLooseVersion(machine['os_vers']) > pkgutils.MunkiLooseVersion(max_os_vers)):
reason = ('Rejected item %s, version %s with maximum os version required %s. Our OS version is %s.' % (item['name'], item['version'], item['maximum_os_version'], machine['os_vers']))
rejected_items.append(reason)
return False
return True
def cpu_arch_ok(item):
'Returns a boolean to indicate if the item is ok to install under\n the current CPU architecture. If not, also adds the failure reason to\n the rejected_items list.'
if item.get('supported_architectures'):
display.display_debug1('Considering item %s, version %s with supported architectures: %s', item['name'], item['version'], item['supported_architectures'])
display.display_debug1('Our architecture is %s', machine['arch'])
if (machine['arch'] in item['supported_architectures']):
return True
if (('x86_64' in item['supported_architectures']) and (machine['arch'] == 'i386') and (machine['x86_64_capable'] is True)):
return True
reason = ('Rejected item %s, version %s with supported architectures: %s. Our architecture is %s.' % (item['name'], item['version'], item['supported_architectures'], machine['arch']))
rejected_items.append(reason)
return False
return True
def installable_condition_ok(item):
'Returns a boolean to indicate if an installable_condition predicate\n in the current item passes. If not, also adds the failure reason to\n the rejected_items list.'
if item.get('installable_condition'):
if (not info.predicate_evaluates_as_true(item['installable_condition'])):
rejected_items.append(('Rejected item %s, version %s with installable_condition: %s.' % (item['name'], item['version'], item['installable_condition'])))
return False
return True
if (vers == 'apple_update_metadata'):
vers = 'latest'
else:
(name, includedversion) = split_name_and_version(name)
if (includedversion and (vers == )):
vers = includedversion
if vers:
vers = pkgutils.trim_version_string(vers)
else:
vers = 'latest'
if skip_min_os_check:
display.display_debug1('Looking for detail for: %s, version %s, ignoring minimum_os_version...', name, vers)
else:
display.display_debug1('Looking for detail for: %s, version %s...', name, vers)
for catalogname in cataloglist:
if ((catalogname in _CATALOG) and (name in _CATALOG[catalogname]['named'])):
itemsmatchingname = _CATALOG[catalogname]['named'][name]
indexlist = []
if (vers == 'latest'):
versionlist = list(itemsmatchingname.keys())
versionlist.sort(key=pkgutils.MunkiLooseVersion, reverse=True)
for versionkey in versionlist:
indexlist.extend(itemsmatchingname[versionkey])
elif (vers in list(itemsmatchingname.keys())):
indexlist = itemsmatchingname[vers]
if indexlist:
display.display_debug1(('Considering %s items with name %s from catalog %s' % (len(indexlist), name, catalogname)))
for index in indexlist:
item = _CATALOG[catalogname]['items'][index]
if (munki_version_ok(item) and os_version_ok(item, skip_min_os_check=skip_min_os_check) and cpu_arch_ok(item) and installable_condition_ok(item)):
display.display_debug1('Found %s, version %s in catalog %s', item['name'], item['version'], catalogname)
return item
display.display_debug1('Not found')
for reason in rejected_items:
if suppress_warnings:
display.display_debug1(reason)
else:
display.display_warning(reason)
return None
|
def get_catalogs(cataloglist):
'Retrieves the catalogs from the server and populates our catalogs\n dictionary.\n '
for catalogname in cataloglist:
if (not (catalogname in _CATALOG)):
catalogpath = download.download_catalog(catalogname)
if catalogpath:
try:
catalogdata = FoundationPlist.readPlist(catalogpath)
except FoundationPlist.NSPropertyListSerializationException:
display.display_error('Retrieved catalog %s is invalid.', catalogname)
try:
os.unlink(catalogpath)
except (OSError, IOError):
pass
else:
_CATALOG[catalogname] = make_catalog_db(catalogdata)
| 2,477,234,418,475,394,600
|
Retrieves the catalogs from the server and populates our catalogs
dictionary.
|
code/client/munkilib/updatecheck/catalogs.py
|
get_catalogs
|
Artoria2e5/munki
|
python
|
def get_catalogs(cataloglist):
'Retrieves the catalogs from the server and populates our catalogs\n dictionary.\n '
for catalogname in cataloglist:
if (not (catalogname in _CATALOG)):
catalogpath = download.download_catalog(catalogname)
if catalogpath:
try:
catalogdata = FoundationPlist.readPlist(catalogpath)
except FoundationPlist.NSPropertyListSerializationException:
display.display_error('Retrieved catalog %s is invalid.', catalogname)
try:
os.unlink(catalogpath)
except (OSError, IOError):
pass
else:
_CATALOG[catalogname] = make_catalog_db(catalogdata)
|
def clean_up():
'Removes any catalog files that are no longer in use by this client'
catalog_dir = os.path.join(prefs.pref('ManagedInstallDir'), 'catalogs')
for item in os.listdir(catalog_dir):
if (item not in _CATALOG):
os.unlink(os.path.join(catalog_dir, item))
| -2,612,927,915,147,749,400
|
Removes any catalog files that are no longer in use by this client
|
code/client/munkilib/updatecheck/catalogs.py
|
clean_up
|
Artoria2e5/munki
|
python
|
def clean_up():
catalog_dir = os.path.join(prefs.pref('ManagedInstallDir'), 'catalogs')
for item in os.listdir(catalog_dir):
if (item not in _CATALOG):
os.unlink(os.path.join(catalog_dir, item))
|
def catalogs():
'Returns our internal _CATALOG dict'
return _CATALOG
| -1,107,014,203,898,371,800
|
Returns our internal _CATALOG dict
|
code/client/munkilib/updatecheck/catalogs.py
|
catalogs
|
Artoria2e5/munki
|
python
|
def catalogs():
return _CATALOG
|
def item_version(item):
'Returns a MunkiLooseVersion for pkginfo item'
return pkgutils.MunkiLooseVersion(item['version'])
| 5,069,734,528,680,948
|
Returns a MunkiLooseVersion for pkginfo item
|
code/client/munkilib/updatecheck/catalogs.py
|
item_version
|
Artoria2e5/munki
|
python
|
def item_version(item):
return pkgutils.MunkiLooseVersion(item['version'])
|
def munki_version_ok(item):
'Returns a boolean to indicate if the current Munki version is high\n enough to install this item. If not, also adds the failure reason to\n the rejected_items list.'
if item.get('minimum_munki_version'):
min_munki_vers = item['minimum_munki_version']
display.display_debug1('Considering item %s, version %s with minimum Munki version required %s', item['name'], item['version'], min_munki_vers)
display.display_debug1('Our Munki version is %s', machine['munki_version'])
if (pkgutils.MunkiLooseVersion(machine['munki_version']) < pkgutils.MunkiLooseVersion(min_munki_vers)):
reason = ('Rejected item %s, version %s with minimum Munki version required %s. Our Munki version is %s.' % (item['name'], item['version'], item['minimum_munki_version'], machine['munki_version']))
rejected_items.append(reason)
return False
return True
| -1,973,357,087,770,327,300
|
Returns a boolean to indicate if the current Munki version is high
enough to install this item. If not, also adds the failure reason to
the rejected_items list.
|
code/client/munkilib/updatecheck/catalogs.py
|
munki_version_ok
|
Artoria2e5/munki
|
python
|
def munki_version_ok(item):
'Returns a boolean to indicate if the current Munki version is high\n enough to install this item. If not, also adds the failure reason to\n the rejected_items list.'
if item.get('minimum_munki_version'):
min_munki_vers = item['minimum_munki_version']
display.display_debug1('Considering item %s, version %s with minimum Munki version required %s', item['name'], item['version'], min_munki_vers)
display.display_debug1('Our Munki version is %s', machine['munki_version'])
if (pkgutils.MunkiLooseVersion(machine['munki_version']) < pkgutils.MunkiLooseVersion(min_munki_vers)):
reason = ('Rejected item %s, version %s with minimum Munki version required %s. Our Munki version is %s.' % (item['name'], item['version'], item['minimum_munki_version'], machine['munki_version']))
rejected_items.append(reason)
return False
return True
|
def os_version_ok(item, skip_min_os_check=False):
'Returns a boolean to indicate if the item is ok to install under\n the current OS. If not, also adds the failure reason to the\n rejected_items list. If skip_min_os_check is True, skips the minimum os\n version check.'
if (item.get('minimum_os_version') and (not skip_min_os_check)):
min_os_vers = item['minimum_os_version']
display.display_debug1('Considering item %s, version %s with minimum os version required %s', item['name'], item['version'], min_os_vers)
display.display_debug1('Our OS version is %s', machine['os_vers'])
if (pkgutils.MunkiLooseVersion(machine['os_vers']) < pkgutils.MunkiLooseVersion(min_os_vers)):
reason = ('Rejected item %s, version %s with minimum os version required %s. Our OS version is %s.' % (item['name'], item['version'], item['minimum_os_version'], machine['os_vers']))
rejected_items.append(reason)
return False
if item.get('maximum_os_version'):
max_os_vers = item['maximum_os_version']
display.display_debug1('Considering item %s, version %s with maximum os version supported %s', item['name'], item['version'], max_os_vers)
display.display_debug1('Our OS version is %s', machine['os_vers'])
if (pkgutils.MunkiLooseVersion(machine['os_vers']) > pkgutils.MunkiLooseVersion(max_os_vers)):
reason = ('Rejected item %s, version %s with maximum os version required %s. Our OS version is %s.' % (item['name'], item['version'], item['maximum_os_version'], machine['os_vers']))
rejected_items.append(reason)
return False
return True
| 6,567,157,521,926,049,000
|
Returns a boolean to indicate if the item is ok to install under
the current OS. If not, also adds the failure reason to the
rejected_items list. If skip_min_os_check is True, skips the minimum os
version check.
|
code/client/munkilib/updatecheck/catalogs.py
|
os_version_ok
|
Artoria2e5/munki
|
python
|
def os_version_ok(item, skip_min_os_check=False):
'Returns a boolean to indicate if the item is ok to install under\n the current OS. If not, also adds the failure reason to the\n rejected_items list. If skip_min_os_check is True, skips the minimum os\n version check.'
if (item.get('minimum_os_version') and (not skip_min_os_check)):
min_os_vers = item['minimum_os_version']
display.display_debug1('Considering item %s, version %s with minimum os version required %s', item['name'], item['version'], min_os_vers)
display.display_debug1('Our OS version is %s', machine['os_vers'])
if (pkgutils.MunkiLooseVersion(machine['os_vers']) < pkgutils.MunkiLooseVersion(min_os_vers)):
reason = ('Rejected item %s, version %s with minimum os version required %s. Our OS version is %s.' % (item['name'], item['version'], item['minimum_os_version'], machine['os_vers']))
rejected_items.append(reason)
return False
if item.get('maximum_os_version'):
max_os_vers = item['maximum_os_version']
display.display_debug1('Considering item %s, version %s with maximum os version supported %s', item['name'], item['version'], max_os_vers)
display.display_debug1('Our OS version is %s', machine['os_vers'])
if (pkgutils.MunkiLooseVersion(machine['os_vers']) > pkgutils.MunkiLooseVersion(max_os_vers)):
reason = ('Rejected item %s, version %s with maximum os version required %s. Our OS version is %s.' % (item['name'], item['version'], item['maximum_os_version'], machine['os_vers']))
rejected_items.append(reason)
return False
return True
|
def cpu_arch_ok(item):
'Returns a boolean to indicate if the item is ok to install under\n the current CPU architecture. If not, also adds the failure reason to\n the rejected_items list.'
if item.get('supported_architectures'):
display.display_debug1('Considering item %s, version %s with supported architectures: %s', item['name'], item['version'], item['supported_architectures'])
display.display_debug1('Our architecture is %s', machine['arch'])
if (machine['arch'] in item['supported_architectures']):
return True
if (('x86_64' in item['supported_architectures']) and (machine['arch'] == 'i386') and (machine['x86_64_capable'] is True)):
return True
reason = ('Rejected item %s, version %s with supported architectures: %s. Our architecture is %s.' % (item['name'], item['version'], item['supported_architectures'], machine['arch']))
rejected_items.append(reason)
return False
return True
| -7,022,270,190,569,582,000
|
Returns a boolean to indicate if the item is ok to install under
the current CPU architecture. If not, also adds the failure reason to
the rejected_items list.
|
code/client/munkilib/updatecheck/catalogs.py
|
cpu_arch_ok
|
Artoria2e5/munki
|
python
|
def cpu_arch_ok(item):
'Returns a boolean to indicate if the item is ok to install under\n the current CPU architecture. If not, also adds the failure reason to\n the rejected_items list.'
if item.get('supported_architectures'):
display.display_debug1('Considering item %s, version %s with supported architectures: %s', item['name'], item['version'], item['supported_architectures'])
display.display_debug1('Our architecture is %s', machine['arch'])
if (machine['arch'] in item['supported_architectures']):
return True
if (('x86_64' in item['supported_architectures']) and (machine['arch'] == 'i386') and (machine['x86_64_capable'] is True)):
return True
reason = ('Rejected item %s, version %s with supported architectures: %s. Our architecture is %s.' % (item['name'], item['version'], item['supported_architectures'], machine['arch']))
rejected_items.append(reason)
return False
return True
|
def installable_condition_ok(item):
'Returns a boolean to indicate if an installable_condition predicate\n in the current item passes. If not, also adds the failure reason to\n the rejected_items list.'
if item.get('installable_condition'):
if (not info.predicate_evaluates_as_true(item['installable_condition'])):
rejected_items.append(('Rejected item %s, version %s with installable_condition: %s.' % (item['name'], item['version'], item['installable_condition'])))
return False
return True
| -2,771,831,491,757,247,000
|
Returns a boolean to indicate if an installable_condition predicate
in the current item passes. If not, also adds the failure reason to
the rejected_items list.
|
code/client/munkilib/updatecheck/catalogs.py
|
installable_condition_ok
|
Artoria2e5/munki
|
python
|
def installable_condition_ok(item):
'Returns a boolean to indicate if an installable_condition predicate\n in the current item passes. If not, also adds the failure reason to\n the rejected_items list.'
if item.get('installable_condition'):
if (not info.predicate_evaluates_as_true(item['installable_condition'])):
rejected_items.append(('Rejected item %s, version %s with installable_condition: %s.' % (item['name'], item['version'], item['installable_condition'])))
return False
return True
|
@registry.register('A000073')
def tribonacci() -> Iterable[int]:
'Tribonacci numbers.'
(yield 0)
(yield 0)
(yield 1)
p3: int = 0
p2: int = 0
p1: int = 1
while True:
curr: int = ((p1 + p2) + p3)
(yield curr)
(p1, p2, p3) = (curr, p1, p2)
| 300,005,145,968,154,100
|
Tribonacci numbers.
|
oeis/tribonacci.py
|
tribonacci
|
reidhoch/oeis-seq
|
python
|
@registry.register('A000073')
def tribonacci() -> Iterable[int]:
(yield 0)
(yield 0)
(yield 1)
p3: int = 0
p2: int = 0
p1: int = 1
while True:
curr: int = ((p1 + p2) + p3)
(yield curr)
(p1, p2, p3) = (curr, p1, p2)
|
def set_line(self, line, membership):
'Set whether a given line is a member of the set.'
self._lines[line] = membership
| -6,751,681,664,870,876,000
|
Set whether a given line is a member of the set.
|
pytype/directors.py
|
set_line
|
Flameeyes/pytype
|
python
|
def set_line(self, line, membership):
self._lines[line] = membership
|
def start_range(self, line, membership):
'Start a range of lines that are either included/excluded from the set.\n\n Args:\n line: A line number.\n membership: If True, lines >= line are included in the set (starting\n a range), otherwise they are excluded (ending a range).\n\n Raises:\n ValueError: if line is less than that of a previous call to start_range().\n '
last = (self._transitions[(- 1)] if self._transitions else (- 1))
if (line < last):
raise ValueError('Line number less than previous start_range() call.')
previous = ((len(self._transitions) % 2) == 1)
if (membership == previous):
return
elif (line == last):
self._transitions.pop()
else:
self._transitions.append(line)
| 2,535,163,513,588,088,000
|
Start a range of lines that are either included/excluded from the set.
Args:
line: A line number.
membership: If True, lines >= line are included in the set (starting
a range), otherwise they are excluded (ending a range).
Raises:
ValueError: if line is less than that of a previous call to start_range().
|
pytype/directors.py
|
start_range
|
Flameeyes/pytype
|
python
|
def start_range(self, line, membership):
'Start a range of lines that are either included/excluded from the set.\n\n Args:\n line: A line number.\n membership: If True, lines >= line are included in the set (starting\n a range), otherwise they are excluded (ending a range).\n\n Raises:\n ValueError: if line is less than that of a previous call to start_range().\n '
last = (self._transitions[(- 1)] if self._transitions else (- 1))
if (line < last):
raise ValueError('Line number less than previous start_range() call.')
previous = ((len(self._transitions) % 2) == 1)
if (membership == previous):
return
elif (line == last):
self._transitions.pop()
else:
self._transitions.append(line)
|
def __contains__(self, line):
'Return if a line is a member of the set.'
specific = self._lines.get(line)
if (specific is not None):
return specific
pos = bisect.bisect(self._transitions, line)
return ((pos % 2) == 1)
| -5,524,570,454,118,664,000
|
Return if a line is a member of the set.
|
pytype/directors.py
|
__contains__
|
Flameeyes/pytype
|
python
|
def __contains__(self, line):
specific = self._lines.get(line)
if (specific is not None):
return specific
pos = bisect.bisect(self._transitions, line)
return ((pos % 2) == 1)
|
def get_disable_after(self, lineno):
'Get an unclosed disable, if any, that starts after lineno.'
if (((len(self._transitions) % 2) == 1) and (self._transitions[(- 1)] >= lineno)):
return self._transitions[(- 1)]
return None
| -8,653,321,035,775,793,000
|
Get an unclosed disable, if any, that starts after lineno.
|
pytype/directors.py
|
get_disable_after
|
Flameeyes/pytype
|
python
|
def get_disable_after(self, lineno):
if (((len(self._transitions) % 2) == 1) and (self._transitions[(- 1)] >= lineno)):
return self._transitions[(- 1)]
return None
|
def __init__(self, src, errorlog, filename, disable):
'Create a Director for a source file.\n\n Args:\n src: The source text as a string.\n errorlog: An ErrorLog object. Directive errors will be logged to the\n errorlog.\n filename: The name of the source file.\n disable: List of error messages to always ignore.\n '
self._filename = filename
self._errorlog = errorlog
self._type_comments = {}
self._docstrings = set()
self._ignore = _LineSet()
self._disables = collections.defaultdict(_LineSet)
for error_name in disable:
self._disables[error_name].start_range(0, True)
self._parse_source(src)
| -7,014,119,069,833,995,000
|
Create a Director for a source file.
Args:
src: The source text as a string.
errorlog: An ErrorLog object. Directive errors will be logged to the
errorlog.
filename: The name of the source file.
disable: List of error messages to always ignore.
|
pytype/directors.py
|
__init__
|
Flameeyes/pytype
|
python
|
def __init__(self, src, errorlog, filename, disable):
'Create a Director for a source file.\n\n Args:\n src: The source text as a string.\n errorlog: An ErrorLog object. Directive errors will be logged to the\n errorlog.\n filename: The name of the source file.\n disable: List of error messages to always ignore.\n '
self._filename = filename
self._errorlog = errorlog
self._type_comments = {}
self._docstrings = set()
self._ignore = _LineSet()
self._disables = collections.defaultdict(_LineSet)
for error_name in disable:
self._disables[error_name].start_range(0, True)
self._parse_source(src)
|
def _adjust_type_comments(self, closing_bracket_lines, whitespace_lines):
'Adjust any type comments affected by closing bracket lines.\n\n Lines that contain nothing but closing brackets don\'t appear in the\n bytecode, so for, e.g.,\n v = [\n "hello",\n "world",\n ] # line 4\n line 4 is where any type comment for \'v\' should be put, but the\n STORE_NAME opcode for \'v\' is at line 3. If we find a type comment put\n (wrongly) on line 3, we\'ll report an error, and if we find a type comment\n on line 4, we\'ll move it to line 3.\n\n Args:\n closing_bracket_lines: A set of lines containing only closing brackets,\n to be used for adjusting affected type comments.\n whitespace_lines: A set of lines containing only whitespace. Its union\n with closing_bracket_lines is a set of consecutive lines.\n '
target = (min((closing_bracket_lines | whitespace_lines)) - 1)
if (target in self._type_comments):
self._errorlog.ignored_type_comment(self._filename, target, self._type_comments[target][1])
del self._type_comments[target]
end = max(closing_bracket_lines)
if (end in self._type_comments):
self._type_comments[target] = self._type_comments[end]
del self._type_comments[end]
| -3,983,367,050,733,600,300
|
Adjust any type comments affected by closing bracket lines.
Lines that contain nothing but closing brackets don't appear in the
bytecode, so for, e.g.,
v = [
"hello",
"world",
] # line 4
line 4 is where any type comment for 'v' should be put, but the
STORE_NAME opcode for 'v' is at line 3. If we find a type comment put
(wrongly) on line 3, we'll report an error, and if we find a type comment
on line 4, we'll move it to line 3.
Args:
closing_bracket_lines: A set of lines containing only closing brackets,
to be used for adjusting affected type comments.
whitespace_lines: A set of lines containing only whitespace. Its union
with closing_bracket_lines is a set of consecutive lines.
|
pytype/directors.py
|
_adjust_type_comments
|
Flameeyes/pytype
|
python
|
def _adjust_type_comments(self, closing_bracket_lines, whitespace_lines):
'Adjust any type comments affected by closing bracket lines.\n\n Lines that contain nothing but closing brackets don\'t appear in the\n bytecode, so for, e.g.,\n v = [\n "hello",\n "world",\n ] # line 4\n line 4 is where any type comment for \'v\' should be put, but the\n STORE_NAME opcode for \'v\' is at line 3. If we find a type comment put\n (wrongly) on line 3, we\'ll report an error, and if we find a type comment\n on line 4, we\'ll move it to line 3.\n\n Args:\n closing_bracket_lines: A set of lines containing only closing brackets,\n to be used for adjusting affected type comments.\n whitespace_lines: A set of lines containing only whitespace. Its union\n with closing_bracket_lines is a set of consecutive lines.\n '
target = (min((closing_bracket_lines | whitespace_lines)) - 1)
if (target in self._type_comments):
self._errorlog.ignored_type_comment(self._filename, target, self._type_comments[target][1])
del self._type_comments[target]
end = max(closing_bracket_lines)
if (end in self._type_comments):
self._type_comments[target] = self._type_comments[end]
del self._type_comments[end]
|
def _parse_source(self, src):
'Parse a source file, extracting directives from comments.'
f = moves.StringIO(src)
defs_start = None
closing_bracket_lines = set()
whitespace_lines = set()
for (tok, _, start, _, line) in tokenize.generate_tokens(f.readline):
(lineno, col) = start
if ((defs_start is None) and _CLASS_OR_FUNC_RE.match(line)):
defs_start = lineno
if _CLOSING_BRACKETS_RE.match(line):
closing_bracket_lines.add(lineno)
elif _WHITESPACE_RE.match(line):
whitespace_lines.add(lineno)
elif _DOCSTRING_RE.match(line):
self._docstrings.add(lineno)
else:
if closing_bracket_lines:
self._adjust_type_comments(closing_bracket_lines, whitespace_lines)
closing_bracket_lines.clear()
whitespace_lines.clear()
if (tok == tokenize.COMMENT):
matches = list(_DIRECTIVE_RE.finditer(line[col:]))
is_nested = (bool(matches) and (matches[0].start(0) > 0))
for m in matches:
code = line[:col].strip()
(tool, data) = m.groups()
open_ended = (not code)
data = data.strip()
if (tool == 'type'):
self._process_type(lineno, code, data, is_nested)
elif (tool == 'pytype'):
try:
self._process_pytype(lineno, data, open_ended)
except _DirectiveError as e:
self._errorlog.invalid_directive(self._filename, lineno, utils.message(e))
else:
pass
if closing_bracket_lines:
self._adjust_type_comments(closing_bracket_lines, whitespace_lines)
if (defs_start is not None):
disables = list(self._disables.items())
disables.append(('Type checking', self._ignore))
for (name, lineset) in disables:
lineno = lineset.get_disable_after(defs_start)
if (lineno is not None):
self._errorlog.late_directive(self._filename, lineno, name)
| -4,893,756,587,152,604,000
|
Parse a source file, extracting directives from comments.
|
pytype/directors.py
|
_parse_source
|
Flameeyes/pytype
|
python
|
def _parse_source(self, src):
f = moves.StringIO(src)
defs_start = None
closing_bracket_lines = set()
whitespace_lines = set()
for (tok, _, start, _, line) in tokenize.generate_tokens(f.readline):
(lineno, col) = start
if ((defs_start is None) and _CLASS_OR_FUNC_RE.match(line)):
defs_start = lineno
if _CLOSING_BRACKETS_RE.match(line):
closing_bracket_lines.add(lineno)
elif _WHITESPACE_RE.match(line):
whitespace_lines.add(lineno)
elif _DOCSTRING_RE.match(line):
self._docstrings.add(lineno)
else:
if closing_bracket_lines:
self._adjust_type_comments(closing_bracket_lines, whitespace_lines)
closing_bracket_lines.clear()
whitespace_lines.clear()
if (tok == tokenize.COMMENT):
matches = list(_DIRECTIVE_RE.finditer(line[col:]))
is_nested = (bool(matches) and (matches[0].start(0) > 0))
for m in matches:
code = line[:col].strip()
(tool, data) = m.groups()
open_ended = (not code)
data = data.strip()
if (tool == 'type'):
self._process_type(lineno, code, data, is_nested)
elif (tool == 'pytype'):
try:
self._process_pytype(lineno, data, open_ended)
except _DirectiveError as e:
self._errorlog.invalid_directive(self._filename, lineno, utils.message(e))
else:
pass
if closing_bracket_lines:
self._adjust_type_comments(closing_bracket_lines, whitespace_lines)
if (defs_start is not None):
disables = list(self._disables.items())
disables.append(('Type checking', self._ignore))
for (name, lineset) in disables:
lineno = lineset.get_disable_after(defs_start)
if (lineno is not None):
self._errorlog.late_directive(self._filename, lineno, name)
|
def _process_type(self, lineno, code, data, is_nested):
'Process a type: comment.'
if ((not code) and is_nested):
return
if (lineno in self._type_comments):
self._errorlog.invalid_directive(self._filename, lineno, 'Multiple type comments on the same line.')
if (data == 'ignore'):
if (not code):
self._ignore.start_range(lineno, True)
else:
self._ignore.set_line(lineno, True)
else:
self._type_comments[lineno] = (code, data)
| 2,161,644,576,464,778,500
|
Process a type: comment.
|
pytype/directors.py
|
_process_type
|
Flameeyes/pytype
|
python
|
def _process_type(self, lineno, code, data, is_nested):
if ((not code) and is_nested):
return
if (lineno in self._type_comments):
self._errorlog.invalid_directive(self._filename, lineno, 'Multiple type comments on the same line.')
if (data == 'ignore'):
if (not code):
self._ignore.start_range(lineno, True)
else:
self._ignore.set_line(lineno, True)
else:
self._type_comments[lineno] = (code, data)
|
def _process_pytype(self, lineno, data, open_ended):
'Process a pytype: comment.'
if (not data):
raise _DirectiveError('Invalid directive syntax.')
for option in data.split():
if (option == 'skip-file'):
raise SkipFile()
try:
(command, values) = option.split('=', 1)
values = values.split(',')
except ValueError:
raise _DirectiveError('Invalid directive syntax.')
if (command == 'disable'):
disable = True
elif (command == 'enable'):
disable = False
else:
raise _DirectiveError(("Unknown pytype directive: '%s'" % command))
if (not values):
raise _DirectiveError('Disable/enable must specify one or more error names.')
for error_name in values:
if ((error_name == _ALL_ERRORS) or self._errorlog.is_valid_error_name(error_name)):
lines = self._disables[error_name]
if open_ended:
lines.start_range(lineno, disable)
else:
lines.set_line(lineno, disable)
else:
self._errorlog.invalid_directive(self._filename, lineno, ("Invalid error name: '%s'" % error_name))
| 821,426,886,601,486,200
|
Process a pytype: comment.
|
pytype/directors.py
|
_process_pytype
|
Flameeyes/pytype
|
python
|
def _process_pytype(self, lineno, data, open_ended):
if (not data):
raise _DirectiveError('Invalid directive syntax.')
for option in data.split():
if (option == 'skip-file'):
raise SkipFile()
try:
(command, values) = option.split('=', 1)
values = values.split(',')
except ValueError:
raise _DirectiveError('Invalid directive syntax.')
if (command == 'disable'):
disable = True
elif (command == 'enable'):
disable = False
else:
raise _DirectiveError(("Unknown pytype directive: '%s'" % command))
if (not values):
raise _DirectiveError('Disable/enable must specify one or more error names.')
for error_name in values:
if ((error_name == _ALL_ERRORS) or self._errorlog.is_valid_error_name(error_name)):
lines = self._disables[error_name]
if open_ended:
lines.start_range(lineno, disable)
else:
lines.set_line(lineno, disable)
else:
self._errorlog.invalid_directive(self._filename, lineno, ("Invalid error name: '%s'" % error_name))
|
def should_report_error(self, error):
'Return whether the error should be logged.\n\n This method is suitable for use as an error filter.\n\n Args:\n error: An error._Error object.\n\n Returns:\n True iff the error should be included in the log.\n '
if ((error.filename != self._filename) or (error.lineno is None)):
return True
lineno = (error.lineno or sys.maxsize)
return ((lineno not in self._ignore) and (lineno not in self._disables[_ALL_ERRORS]) and (lineno not in self._disables[error.name]))
| -4,967,645,690,361,252,000
|
Return whether the error should be logged.
This method is suitable for use as an error filter.
Args:
error: An error._Error object.
Returns:
True iff the error should be included in the log.
|
pytype/directors.py
|
should_report_error
|
Flameeyes/pytype
|
python
|
def should_report_error(self, error):
'Return whether the error should be logged.\n\n This method is suitable for use as an error filter.\n\n Args:\n error: An error._Error object.\n\n Returns:\n True iff the error should be included in the log.\n '
if ((error.filename != self._filename) or (error.lineno is None)):
return True
lineno = (error.lineno or sys.maxsize)
return ((lineno not in self._ignore) and (lineno not in self._disables[_ALL_ERRORS]) and (lineno not in self._disables[error.name]))
|
def _create_label(self, kg: KG, vertex: Vertex, n: int) -> str:
'Creates a label according to a vertex and its neighbors.\n\n kg: The Knowledge Graph.\n\n The graph from which the neighborhoods are extracted for the\n provided entities.\n vertex: The vertex to get its neighbors to create the suffix.\n n: The index of the neighbor\n\n Returns:\n the label created for the vertex.\n\n '
if (len(self._label_map) == 0):
self._weisfeiler_lehman(kg)
suffix = '-'.join(sorted(set([self._label_map[neighbor][(n - 1)] for neighbor in kg.get_neighbors(vertex, is_reverse=True)])))
return f'{self._label_map[vertex][(n - 1)]}-{suffix}'
| -926,188,340,973,383,600
|
Creates a label according to a vertex and its neighbors.
kg: The Knowledge Graph.
The graph from which the neighborhoods are extracted for the
provided entities.
vertex: The vertex to get its neighbors to create the suffix.
n: The index of the neighbor
Returns:
the label created for the vertex.
|
pyrdf2vec/walkers/weisfeiler_lehman.py
|
_create_label
|
vishalbelsare/pyRDF2Vec
|
python
|
def _create_label(self, kg: KG, vertex: Vertex, n: int) -> str:
'Creates a label according to a vertex and its neighbors.\n\n kg: The Knowledge Graph.\n\n The graph from which the neighborhoods are extracted for the\n provided entities.\n vertex: The vertex to get its neighbors to create the suffix.\n n: The index of the neighbor\n\n Returns:\n the label created for the vertex.\n\n '
if (len(self._label_map) == 0):
self._weisfeiler_lehman(kg)
suffix = '-'.join(sorted(set([self._label_map[neighbor][(n - 1)] for neighbor in kg.get_neighbors(vertex, is_reverse=True)])))
return f'{self._label_map[vertex][(n - 1)]}-{suffix}'
|
def _weisfeiler_lehman(self, kg: KG) -> None:
'Performs Weisfeiler-Lehman relabeling of the vertices.\n\n Args:\n kg: The Knowledge Graph.\n\n The graph from which the neighborhoods are extracted for the\n provided entities.\n\n '
for vertex in kg._vertices:
self._label_map[vertex][0] = vertex.name
self._inv_label_map[vertex][0] = vertex.name
for n in range(1, (self.wl_iterations + 1)):
for vertex in kg._vertices:
if self.md5_bytes:
self._label_map[vertex][n] = str(md5(self._create_label(kg, vertex, n).encode()).digest()[:self.md5_bytes])
else:
self._label_map[vertex][n] = str(self._create_label(kg, vertex, n))
for vertex in kg._vertices:
for (k, v) in self._label_map[vertex].items():
self._inv_label_map[vertex][v] = k
| 3,049,563,729,577,228,000
|
Performs Weisfeiler-Lehman relabeling of the vertices.
Args:
kg: The Knowledge Graph.
The graph from which the neighborhoods are extracted for the
provided entities.
|
pyrdf2vec/walkers/weisfeiler_lehman.py
|
_weisfeiler_lehman
|
vishalbelsare/pyRDF2Vec
|
python
|
def _weisfeiler_lehman(self, kg: KG) -> None:
'Performs Weisfeiler-Lehman relabeling of the vertices.\n\n Args:\n kg: The Knowledge Graph.\n\n The graph from which the neighborhoods are extracted for the\n provided entities.\n\n '
for vertex in kg._vertices:
self._label_map[vertex][0] = vertex.name
self._inv_label_map[vertex][0] = vertex.name
for n in range(1, (self.wl_iterations + 1)):
for vertex in kg._vertices:
if self.md5_bytes:
self._label_map[vertex][n] = str(md5(self._create_label(kg, vertex, n).encode()).digest()[:self.md5_bytes])
else:
self._label_map[vertex][n] = str(self._create_label(kg, vertex, n))
for vertex in kg._vertices:
for (k, v) in self._label_map[vertex].items():
self._inv_label_map[vertex][v] = k
|
def extract(self, kg: KG, entities: Entities, verbose: int=0) -> List[List[SWalk]]:
'Fits the provided sampling strategy and then calls the\n private _extract method that is implemented for each of the\n walking strategies.\n\n Args:\n kg: The Knowledge Graph.\n entities: The entities to be extracted from the Knowledge Graph.\n verbose: The verbosity level.\n 0: does not display anything;\n 1: display of the progress of extraction and training of walks;\n 2: debugging.\n Defaults to 0.\n\n Returns:\n The 2D matrix with its number of rows equal to the number of\n provided entities; number of column equal to the embedding size.\n\n '
self._weisfeiler_lehman(kg)
return super().extract(kg, entities, verbose)
| 8,135,408,716,254,422,000
|
Fits the provided sampling strategy and then calls the
private _extract method that is implemented for each of the
walking strategies.
Args:
kg: The Knowledge Graph.
entities: The entities to be extracted from the Knowledge Graph.
verbose: The verbosity level.
0: does not display anything;
1: display of the progress of extraction and training of walks;
2: debugging.
Defaults to 0.
Returns:
The 2D matrix with its number of rows equal to the number of
provided entities; number of column equal to the embedding size.
|
pyrdf2vec/walkers/weisfeiler_lehman.py
|
extract
|
vishalbelsare/pyRDF2Vec
|
python
|
def extract(self, kg: KG, entities: Entities, verbose: int=0) -> List[List[SWalk]]:
'Fits the provided sampling strategy and then calls the\n private _extract method that is implemented for each of the\n walking strategies.\n\n Args:\n kg: The Knowledge Graph.\n entities: The entities to be extracted from the Knowledge Graph.\n verbose: The verbosity level.\n 0: does not display anything;\n 1: display of the progress of extraction and training of walks;\n 2: debugging.\n Defaults to 0.\n\n Returns:\n The 2D matrix with its number of rows equal to the number of\n provided entities; number of column equal to the embedding size.\n\n '
self._weisfeiler_lehman(kg)
return super().extract(kg, entities, verbose)
|
def _map_wl(self, entity: Vertex, pos: int, n: int) -> str:
'Maps certain vertices to MD5 hashes to save memory. For entities of\n interest (provided by the user to the extract function) and predicates,\n the string representation is kept.\n\n Args:\n entity: The entity to be mapped.\n pos: The position of the entity in the walk.\n n: The iteration number of the WL algorithm.\n\n Returns:\n A hash (string) or original string representation.\n\n '
if ((entity.name in self._entities) or ((pos % 2) == 1)):
return entity.name
else:
return self._label_map[entity][n]
| 2,031,195,953,491,032,300
|
Maps certain vertices to MD5 hashes to save memory. For entities of
interest (provided by the user to the extract function) and predicates,
the string representation is kept.
Args:
entity: The entity to be mapped.
pos: The position of the entity in the walk.
n: The iteration number of the WL algorithm.
Returns:
A hash (string) or original string representation.
|
pyrdf2vec/walkers/weisfeiler_lehman.py
|
_map_wl
|
vishalbelsare/pyRDF2Vec
|
python
|
def _map_wl(self, entity: Vertex, pos: int, n: int) -> str:
'Maps certain vertices to MD5 hashes to save memory. For entities of\n interest (provided by the user to the extract function) and predicates,\n the string representation is kept.\n\n Args:\n entity: The entity to be mapped.\n pos: The position of the entity in the walk.\n n: The iteration number of the WL algorithm.\n\n Returns:\n A hash (string) or original string representation.\n\n '
if ((entity.name in self._entities) or ((pos % 2) == 1)):
return entity.name
else:
return self._label_map[entity][n]
|
def _extract(self, kg: KG, entity: Vertex) -> EntityWalks:
'Extracts random walks for an entity based on a Knowledge Graph.\n\n Args:\n kg: The Knowledge Graph.\n entity: The root node to extract walks.\n\n Returns:\n A dictionary having the entity as key and a list of tuples as value\n corresponding to the extracted walks.\n\n '
canonical_walks: Set[SWalk] = set()
for n in range((self.wl_iterations + 1)):
for walk in self.extract_walks(kg, entity):
canonical_walk: List[str] = [self._map_wl(vertex, i, n) for (i, vertex) in enumerate(walk)]
canonical_walks.add(tuple(canonical_walk))
return {entity.name: list(canonical_walks)}
| -9,107,139,105,087,989,000
|
Extracts random walks for an entity based on a Knowledge Graph.
Args:
kg: The Knowledge Graph.
entity: The root node to extract walks.
Returns:
A dictionary having the entity as key and a list of tuples as value
corresponding to the extracted walks.
|
pyrdf2vec/walkers/weisfeiler_lehman.py
|
_extract
|
vishalbelsare/pyRDF2Vec
|
python
|
def _extract(self, kg: KG, entity: Vertex) -> EntityWalks:
'Extracts random walks for an entity based on a Knowledge Graph.\n\n Args:\n kg: The Knowledge Graph.\n entity: The root node to extract walks.\n\n Returns:\n A dictionary having the entity as key and a list of tuples as value\n corresponding to the extracted walks.\n\n '
canonical_walks: Set[SWalk] = set()
for n in range((self.wl_iterations + 1)):
for walk in self.extract_walks(kg, entity):
canonical_walk: List[str] = [self._map_wl(vertex, i, n) for (i, vertex) in enumerate(walk)]
canonical_walks.add(tuple(canonical_walk))
return {entity.name: list(canonical_walks)}
|
def csbal_process():
'\n This method is run when the `csbal` script is called.\n can be used to check a single file (check balance state after adjusting)\n args are file stem, freq (Hz [rpm/60] float), samp_rate (data collector)\n '
args = sys.argv[1:]
stem = args[0]
freq = float(args[1])
samp_rate = float(args[2])
df = bal.read_data_files(stem, freq, samp_rate)
bal.graph_data(df)
bal.process_data(df, freq, samp_rate, True)
| 4,217,561,701,050,841,000
|
This method is run when the `csbal` script is called.
can be used to check a single file (check balance state after adjusting)
args are file stem, freq (Hz [rpm/60] float), samp_rate (data collector)
|
cheapskate_bal/cheapskate_bal/cli.py
|
csbal_process
|
kevinpowell/balancer
|
python
|
def csbal_process():
'\n This method is run when the `csbal` script is called.\n can be used to check a single file (check balance state after adjusting)\n args are file stem, freq (Hz [rpm/60] float), samp_rate (data collector)\n '
args = sys.argv[1:]
stem = args[0]
freq = float(args[1])
samp_rate = float(args[2])
df = bal.read_data_files(stem, freq, samp_rate)
bal.graph_data(df)
bal.process_data(df, freq, samp_rate, True)
|
def csbal_single():
'\n This method performs the whole process for a single plane balance\n Four data files are captured, and the results are emitted\n args are file stem, freq(Hz), shift angle of test mass (deg), test mass '
args = sys.argv[1:]
if (len(args) < 4):
print('args are stem, freq, shift_ang, test_mass')
stem = args[0]
freq = float(args[1])
shift_ang = float(args[2])
tmass = float(args[3])
offset_1_ang = 360
offset_2_ang = 360
if (len(args) > 5):
offset_1_ang = float(args[4])
offset_2_ang = float(args[5])
if (stem[(- 1)] != os.path.sep):
stem = (stem + os.path.sep)
tests = [('T0: initial unbalanced state', 't0'), ('T1: test mass at 0 deg ref', 't1'), ('T2: test mass at positive angle', 't2'), ('T3: test mass at negative angle', 't3')]
grab_data(tests, stem)
print('Processing captured data...')
results = batch_process(tests, stem, freq)
print('Balace Results:')
bal.single_balance(results, tmass, shift_ang, offset_1_ang, offset_2_ang)
| -2,981,180,016,032,641,000
|
This method performs the whole process for a single plane balance
Four data files are captured, and the results are emitted
args are file stem, freq(Hz), shift angle of test mass (deg), test mass
|
cheapskate_bal/cheapskate_bal/cli.py
|
csbal_single
|
kevinpowell/balancer
|
python
|
def csbal_single():
'\n This method performs the whole process for a single plane balance\n Four data files are captured, and the results are emitted\n args are file stem, freq(Hz), shift angle of test mass (deg), test mass '
args = sys.argv[1:]
if (len(args) < 4):
print('args are stem, freq, shift_ang, test_mass')
stem = args[0]
freq = float(args[1])
shift_ang = float(args[2])
tmass = float(args[3])
offset_1_ang = 360
offset_2_ang = 360
if (len(args) > 5):
offset_1_ang = float(args[4])
offset_2_ang = float(args[5])
if (stem[(- 1)] != os.path.sep):
stem = (stem + os.path.sep)
tests = [('T0: initial unbalanced state', 't0'), ('T1: test mass at 0 deg ref', 't1'), ('T2: test mass at positive angle', 't2'), ('T3: test mass at negative angle', 't3')]
grab_data(tests, stem)
print('Processing captured data...')
results = batch_process(tests, stem, freq)
print('Balace Results:')
bal.single_balance(results, tmass, shift_ang, offset_1_ang, offset_2_ang)
|
def csbal_dual_init():
'\n THis method performs the whole process for a dual plane balance\n Three files are captured and the results are emitted\n args are file stem, freq(Hz), shift angle of test mass (deg), test mass '
args = sys.argv[1:]
if (len(args) < 4):
print('args are stem, freq, shift_ang, test_mass')
stem = args[0]
freq = float(args[1])
shift_ang = float(args[2])
tmass = float(args[3])
if (stem[(- 1)] != os.path.sep):
stem = (stem + os.path.sep)
tests = [('T0: initial unbalanced state', 't0'), ('TA: test mass on bearing 1 at shift angle', 'ta'), ('TB: test mass on bearing 2 at shift angle', 'tb')]
grab_data(tests, stem)
print('Processing captured data...')
results = batch_process(tests, stem, freq)
print('Dual Plane Balance Results')
(influence, correction) = bal.dual_compute_influence(results, tmass, shift_ang)
inf_file = (stem + 'influence')
with open(inf_file, 'wb') as filehandle:
pickle.dump(influence, filehandle)
| 4,723,339,305,924,539,000
|
THis method performs the whole process for a dual plane balance
Three files are captured and the results are emitted
args are file stem, freq(Hz), shift angle of test mass (deg), test mass
|
cheapskate_bal/cheapskate_bal/cli.py
|
csbal_dual_init
|
kevinpowell/balancer
|
python
|
def csbal_dual_init():
'\n THis method performs the whole process for a dual plane balance\n Three files are captured and the results are emitted\n args are file stem, freq(Hz), shift angle of test mass (deg), test mass '
args = sys.argv[1:]
if (len(args) < 4):
print('args are stem, freq, shift_ang, test_mass')
stem = args[0]
freq = float(args[1])
shift_ang = float(args[2])
tmass = float(args[3])
if (stem[(- 1)] != os.path.sep):
stem = (stem + os.path.sep)
tests = [('T0: initial unbalanced state', 't0'), ('TA: test mass on bearing 1 at shift angle', 'ta'), ('TB: test mass on bearing 2 at shift angle', 'tb')]
grab_data(tests, stem)
print('Processing captured data...')
results = batch_process(tests, stem, freq)
print('Dual Plane Balance Results')
(influence, correction) = bal.dual_compute_influence(results, tmass, shift_ang)
inf_file = (stem + 'influence')
with open(inf_file, 'wb') as filehandle:
pickle.dump(influence, filehandle)
|
def csbal_dual_iter():
'\n This method performs an iteration of dual plane balance, once the\n influence params are known. One file is captured and the results\n are emitted\n args are file stem, tag, freq\n '
args = sys.argv[1:]
if (len(args) < 3):
print('args are: filestem, tag, freq')
stem = args[0]
tag = args[1]
freq = float(args[2])
if (stem[(- 1)] != os.path.sep):
stem = (stem + os.path.sep)
influence = []
inf_file = (stem + 'influence')
with open(inf_file, 'rb') as filehandle:
influence = pickle.load(filehandle)
tests = [('T(curr): initial unbalanced state', ('t' + tag))]
grab_data(tests, stem)
print('Processing captured data...')
results = batch_process(tests, stem, freq)
print('Dual Plane Balance Results')
correction = bal.dual_compute_weights(results, influence)
| 8,211,429,376,707,480,000
|
This method performs an iteration of dual plane balance, once the
influence params are known. One file is captured and the results
are emitted
args are file stem, tag, freq
|
cheapskate_bal/cheapskate_bal/cli.py
|
csbal_dual_iter
|
kevinpowell/balancer
|
python
|
def csbal_dual_iter():
'\n This method performs an iteration of dual plane balance, once the\n influence params are known. One file is captured and the results\n are emitted\n args are file stem, tag, freq\n '
args = sys.argv[1:]
if (len(args) < 3):
print('args are: filestem, tag, freq')
stem = args[0]
tag = args[1]
freq = float(args[2])
if (stem[(- 1)] != os.path.sep):
stem = (stem + os.path.sep)
influence = []
inf_file = (stem + 'influence')
with open(inf_file, 'rb') as filehandle:
influence = pickle.load(filehandle)
tests = [('T(curr): initial unbalanced state', ('t' + tag))]
grab_data(tests, stem)
print('Processing captured data...')
results = batch_process(tests, stem, freq)
print('Dual Plane Balance Results')
correction = bal.dual_compute_weights(results, influence)
|
def prepare_package(err, path, expectation=0, for_appversions=None, timeout=(- 1)):
'Prepares a file-based package for validation.\n\n timeout is the number of seconds before validation is aborted.\n If timeout is -1 then no timeout checking code will run.\n '
package = None
try:
if (not os.path.isfile(path)):
err.error(('main', 'prepare_package', 'not_found'), 'The package could not be found')
return
package_extension = os.path.splitext(path)[1]
package_extension = package_extension.lower()
def timeout_handler(signum, frame):
raise validator.ValidationTimeout(timeout)
if (timeout != (- 1)):
signal.signal(signal.SIGALRM, timeout_handler)
signal.setitimer(signal.ITIMER_REAL, timeout)
if (package_extension == '.xml'):
test_search(err, path, expectation)
elif (package_extension not in ('.xpi', '.jar')):
err.error(('main', 'prepare_package', 'unrecognized'), 'The package is not of a recognized type.')
else:
package = open(path, 'rb')
test_package(err, package, path, expectation, for_appversions)
except validator.ValidationTimeout:
err.system_error(msg_id='validation_timeout', message='Validation has timed out', signing_severity='high', description=('Validation was unable to complete in the allotted time. This is most likely due to the size or complexity of your add-on.', 'This timeout has been logged, but please consider filing an issue report here: http://mzl.la/1DG0sFd'), exc_info=sys.exc_info())
except Exception:
err.system_error(exc_info=sys.exc_info())
finally:
if (timeout != (- 1)):
signal.setitimer(signal.ITIMER_REAL, 0)
signal.signal(signal.SIGALRM, signal.SIG_DFL)
if package:
package.close()
decorator.cleanup()
| 4,124,246,953,156,577,000
|
Prepares a file-based package for validation.
timeout is the number of seconds before validation is aborted.
If timeout is -1 then no timeout checking code will run.
|
validator/submain.py
|
prepare_package
|
kumar303/amo-validator
|
python
|
def prepare_package(err, path, expectation=0, for_appversions=None, timeout=(- 1)):
'Prepares a file-based package for validation.\n\n timeout is the number of seconds before validation is aborted.\n If timeout is -1 then no timeout checking code will run.\n '
package = None
try:
if (not os.path.isfile(path)):
err.error(('main', 'prepare_package', 'not_found'), 'The package could not be found')
return
package_extension = os.path.splitext(path)[1]
package_extension = package_extension.lower()
def timeout_handler(signum, frame):
raise validator.ValidationTimeout(timeout)
if (timeout != (- 1)):
signal.signal(signal.SIGALRM, timeout_handler)
signal.setitimer(signal.ITIMER_REAL, timeout)
if (package_extension == '.xml'):
test_search(err, path, expectation)
elif (package_extension not in ('.xpi', '.jar')):
err.error(('main', 'prepare_package', 'unrecognized'), 'The package is not of a recognized type.')
else:
package = open(path, 'rb')
test_package(err, package, path, expectation, for_appversions)
except validator.ValidationTimeout:
err.system_error(msg_id='validation_timeout', message='Validation has timed out', signing_severity='high', description=('Validation was unable to complete in the allotted time. This is most likely due to the size or complexity of your add-on.', 'This timeout has been logged, but please consider filing an issue report here: http://mzl.la/1DG0sFd'), exc_info=sys.exc_info())
except Exception:
err.system_error(exc_info=sys.exc_info())
finally:
if (timeout != (- 1)):
signal.setitimer(signal.ITIMER_REAL, 0)
signal.signal(signal.SIGALRM, signal.SIG_DFL)
if package:
package.close()
decorator.cleanup()
|
def test_search(err, package, expectation=0):
'Tests the package to see if it is a search provider.'
expected_search_provider = (expectation in (PACKAGE_ANY, PACKAGE_SEARCHPROV))
if (not expected_search_provider):
return err.warning(('main', 'test_search', 'extension'), 'Unexpected file extension.')
detect_opensearch(err, package, listed=err.get_resource('listed'))
if (expected_search_provider and (not err.failed())):
err.detected_type = PACKAGE_SEARCHPROV
| 3,807,156,908,103,187,000
|
Tests the package to see if it is a search provider.
|
validator/submain.py
|
test_search
|
kumar303/amo-validator
|
python
|
def test_search(err, package, expectation=0):
expected_search_provider = (expectation in (PACKAGE_ANY, PACKAGE_SEARCHPROV))
if (not expected_search_provider):
return err.warning(('main', 'test_search', 'extension'), 'Unexpected file extension.')
detect_opensearch(err, package, listed=err.get_resource('listed'))
if (expected_search_provider and (not err.failed())):
err.detected_type = PACKAGE_SEARCHPROV
|
def test_package(err, file_, name, expectation=PACKAGE_ANY, for_appversions=None):
'Begins tests for the package.'
try:
package = XPIManager(file_, mode='r', name=name)
has_package_json = ('package.json' in package)
has_manifest_json = ('manifest.json' in package)
has_install_rdf = ('install.rdf' in package)
if has_package_json:
_load_package_json(err, package, expectation)
if has_manifest_json:
_load_manifest_json(err, package, expectation)
if has_install_rdf:
_load_install_rdf(err, package, expectation)
except IOError:
err.error(('main', 'test_package', 'unopenable'), 'The XPI could not be opened.')
return
except (BadZipfile, zlib_error):
err.error(('submain', '_load_install_rdf', 'badzipfile'), error='Corrupt ZIP file', description='We were unable to decompress the zip file.')
return
if (package.extension in assumed_extensions):
assumed_type = assumed_extensions[package.extension]
if (expectation not in (PACKAGE_ANY, assumed_type)):
err.error(('main', 'test_package', 'unexpected_type'), 'Unexpected package type (found theme)')
test_inner_package(err, package, for_appversions)
| -6,450,757,682,280,588,000
|
Begins tests for the package.
|
validator/submain.py
|
test_package
|
kumar303/amo-validator
|
python
|
def test_package(err, file_, name, expectation=PACKAGE_ANY, for_appversions=None):
try:
package = XPIManager(file_, mode='r', name=name)
has_package_json = ('package.json' in package)
has_manifest_json = ('manifest.json' in package)
has_install_rdf = ('install.rdf' in package)
if has_package_json:
_load_package_json(err, package, expectation)
if has_manifest_json:
_load_manifest_json(err, package, expectation)
if has_install_rdf:
_load_install_rdf(err, package, expectation)
except IOError:
err.error(('main', 'test_package', 'unopenable'), 'The XPI could not be opened.')
return
except (BadZipfile, zlib_error):
err.error(('submain', '_load_install_rdf', 'badzipfile'), error='Corrupt ZIP file', description='We were unable to decompress the zip file.')
return
if (package.extension in assumed_extensions):
assumed_type = assumed_extensions[package.extension]
if (expectation not in (PACKAGE_ANY, assumed_type)):
err.error(('main', 'test_package', 'unexpected_type'), 'Unexpected package type (found theme)')
test_inner_package(err, package, for_appversions)
|
def populate_chrome_manifest(err, xpi_package):
"Loads the chrome.manifest if it's present"
if ('chrome.manifest' in xpi_package):
chrome_data = xpi_package.read('chrome.manifest')
chrome = ChromeManifest(chrome_data, 'chrome.manifest')
chrome_recursion_buster = set()
def get_linked_manifest(path, from_path, from_chrome, from_triple):
if (path in chrome_recursion_buster):
err.warning(err_id=('submain', 'populate_chrome_manifest', 'recursion'), warning='Linked manifest recursion detected.', description='A chrome registration file links back to itself. This can cause a multitude of issues.', filename=path)
return
if (path not in xpi_package):
err.notice(err_id=('submain', 'populate_chrome_manifest', 'linkerr'), notice='Linked manifest could not be found.', description=('A linked manifest file could not be found in the package.', ('Path: %s' % path)), filename=from_path, line=from_triple['line'], context=from_chrome.context)
return
chrome_recursion_buster.add(path)
manifest = ChromeManifest(xpi_package.read(path), path)
for triple in manifest.triples:
(yield triple)
if (triple['subject'] == 'manifest'):
subpath = triple['predicate']
if (not subpath.startswith('/')):
subpath = ('%s/%s' % ('/'.join(path.split('/')[:(- 1)]), subpath))
subpath = subpath.lstrip('/')
for subtriple in get_linked_manifest(subpath, path, manifest, triple):
(yield subtriple)
chrome_recursion_buster.discard(path)
chrome_recursion_buster.add('chrome.manifest')
for extra_manifest in chrome.get_triples(subject='manifest'):
for triple in get_linked_manifest(extra_manifest['predicate'], 'chrome.manifest', chrome, extra_manifest):
chrome.triples.append(triple)
chrome_recursion_buster.discard('chrome.manifest')
err.save_resource('chrome.manifest', chrome, pushable=True)
err.save_resource('chrome.manifest_nopush', chrome, pushable=False)
| 3,416,004,744,526,102,000
|
Loads the chrome.manifest if it's present
|
validator/submain.py
|
populate_chrome_manifest
|
kumar303/amo-validator
|
python
|
def populate_chrome_manifest(err, xpi_package):
if ('chrome.manifest' in xpi_package):
chrome_data = xpi_package.read('chrome.manifest')
chrome = ChromeManifest(chrome_data, 'chrome.manifest')
chrome_recursion_buster = set()
def get_linked_manifest(path, from_path, from_chrome, from_triple):
if (path in chrome_recursion_buster):
err.warning(err_id=('submain', 'populate_chrome_manifest', 'recursion'), warning='Linked manifest recursion detected.', description='A chrome registration file links back to itself. This can cause a multitude of issues.', filename=path)
return
if (path not in xpi_package):
err.notice(err_id=('submain', 'populate_chrome_manifest', 'linkerr'), notice='Linked manifest could not be found.', description=('A linked manifest file could not be found in the package.', ('Path: %s' % path)), filename=from_path, line=from_triple['line'], context=from_chrome.context)
return
chrome_recursion_buster.add(path)
manifest = ChromeManifest(xpi_package.read(path), path)
for triple in manifest.triples:
(yield triple)
if (triple['subject'] == 'manifest'):
subpath = triple['predicate']
if (not subpath.startswith('/')):
subpath = ('%s/%s' % ('/'.join(path.split('/')[:(- 1)]), subpath))
subpath = subpath.lstrip('/')
for subtriple in get_linked_manifest(subpath, path, manifest, triple):
(yield subtriple)
chrome_recursion_buster.discard(path)
chrome_recursion_buster.add('chrome.manifest')
for extra_manifest in chrome.get_triples(subject='manifest'):
for triple in get_linked_manifest(extra_manifest['predicate'], 'chrome.manifest', chrome, extra_manifest):
chrome.triples.append(triple)
chrome_recursion_buster.discard('chrome.manifest')
err.save_resource('chrome.manifest', chrome, pushable=True)
err.save_resource('chrome.manifest_nopush', chrome, pushable=False)
|
def test_inner_package(err, xpi_package, for_appversions=None):
"Tests a package's inner content."
populate_chrome_manifest(err, xpi_package)
for tier in sorted(decorator.get_tiers()):
err.set_tier(tier)
for test in decorator.get_tests(tier, err.detected_type):
if (test['versions'] is not None):
if (not err.supports_version(test['versions'])):
continue
if (for_appversions and (not (err._compare_version(requirements=for_appversions, support=test['versions']) and err.supports_version(for_appversions)))):
continue
err.version_requirements = test['versions']
test_func = test['test']
if test['simple']:
test_func(err)
else:
test_func(err, xpi_package)
if (err.failed(fail_on_warnings=False) and (not err.determined)):
err.unfinished = True
err.discard_unused_messages(ending_tier=tier)
return err
return err
| 3,624,565,051,179,548,000
|
Tests a package's inner content.
|
validator/submain.py
|
test_inner_package
|
kumar303/amo-validator
|
python
|
def test_inner_package(err, xpi_package, for_appversions=None):
populate_chrome_manifest(err, xpi_package)
for tier in sorted(decorator.get_tiers()):
err.set_tier(tier)
for test in decorator.get_tests(tier, err.detected_type):
if (test['versions'] is not None):
if (not err.supports_version(test['versions'])):
continue
if (for_appversions and (not (err._compare_version(requirements=for_appversions, support=test['versions']) and err.supports_version(for_appversions)))):
continue
err.version_requirements = test['versions']
test_func = test['test']
if test['simple']:
test_func(err)
else:
test_func(err, xpi_package)
if (err.failed(fail_on_warnings=False) and (not err.determined)):
err.unfinished = True
err.discard_unused_messages(ending_tier=tier)
return err
return err
|
def Main():
'The main program function.\n\n Returns:\n bool: True if successful or False if not.\n '
argument_parser = argparse.ArgumentParser(description='Extracts information from BSM event auditing files.')
argument_parser.add_argument('-d', '--debug', dest='debug', action='store_true', default=False, help='enable debug output.')
argument_parser.add_argument('source', nargs='?', action='store', metavar='PATH', default=None, help='path of the BSM event auditing file.')
options = argument_parser.parse_args()
if (not options.source):
print('Source file missing.')
print('')
argument_parser.print_help()
print('')
return False
logging.basicConfig(level=logging.INFO, format='[%(levelname)s] %(message)s')
output_writer = output_writers.StdoutWriter()
try:
output_writer.Open()
except IOError as exception:
print('Unable to open output writer with error: {0!s}'.format(exception))
print('')
return False
log_file = bsm.BSMEventAuditingFile(debug=options.debug, output_writer=output_writer)
log_file.Open(options.source)
print('BSM event auditing information:')
print('')
log_file.Close()
output_writer.Close()
return True
| -7,459,887,251,937,730,000
|
The main program function.
Returns:
bool: True if successful or False if not.
|
scripts/bsm.py
|
Main
|
jleaniz/dtformats
|
python
|
def Main():
'The main program function.\n\n Returns:\n bool: True if successful or False if not.\n '
argument_parser = argparse.ArgumentParser(description='Extracts information from BSM event auditing files.')
argument_parser.add_argument('-d', '--debug', dest='debug', action='store_true', default=False, help='enable debug output.')
argument_parser.add_argument('source', nargs='?', action='store', metavar='PATH', default=None, help='path of the BSM event auditing file.')
options = argument_parser.parse_args()
if (not options.source):
print('Source file missing.')
print()
argument_parser.print_help()
print()
return False
logging.basicConfig(level=logging.INFO, format='[%(levelname)s] %(message)s')
output_writer = output_writers.StdoutWriter()
try:
output_writer.Open()
except IOError as exception:
print('Unable to open output writer with error: {0!s}'.format(exception))
print()
return False
log_file = bsm.BSMEventAuditingFile(debug=options.debug, output_writer=output_writer)
log_file.Open(options.source)
print('BSM event auditing information:')
print()
log_file.Close()
output_writer.Close()
return True
|
def annuli_around(region, inner_factor, outer_factor, header, x_size, y_size):
'\n This function ...\n :param region:\n :param inner_factor:\n :param outer_factor:\n :param header:\n :param x_size:\n :param y_size:\n :return:\n '
inner_region = regions.expand(region, inner_factor)
outer_region = regions.expand(region, outer_factor)
inner_mask = regions.create_mask(inner_region, header, x_size, y_size)
outer_mask = regions.create_mask(outer_region, header, x_size, y_size)
mask = (inner_mask | np.logical_not(outer_mask))
return mask
| -1,261,848,470,547,287,000
|
This function ...
:param region:
:param inner_factor:
:param outer_factor:
:param header:
:param x_size:
:param y_size:
:return:
|
CAAPR/CAAPR_AstroMagic/PTS/pts/magic/tools/masks.py
|
annuli_around
|
Stargrazer82301/CAAPR
|
python
|
def annuli_around(region, inner_factor, outer_factor, header, x_size, y_size):
'\n This function ...\n :param region:\n :param inner_factor:\n :param outer_factor:\n :param header:\n :param x_size:\n :param y_size:\n :return:\n '
inner_region = regions.expand(region, inner_factor)
outer_region = regions.expand(region, outer_factor)
inner_mask = regions.create_mask(inner_region, header, x_size, y_size)
outer_mask = regions.create_mask(outer_region, header, x_size, y_size)
mask = (inner_mask | np.logical_not(outer_mask))
return mask
|
def masked_outside(region, header, x_size, y_size, expand_factor=1.0):
'\n This function ...\n :param region:\n :param header:\n :param x_size:\n :param y_size:\n :param expand_factor:\n :return:\n '
region = regions.expand(region, factor=expand_factor)
mask = np.logical_not(regions.create_mask(region, header, x_size, y_size))
return mask
| 1,645,701,534,307,375,000
|
This function ...
:param region:
:param header:
:param x_size:
:param y_size:
:param expand_factor:
:return:
|
CAAPR/CAAPR_AstroMagic/PTS/pts/magic/tools/masks.py
|
masked_outside
|
Stargrazer82301/CAAPR
|
python
|
def masked_outside(region, header, x_size, y_size, expand_factor=1.0):
'\n This function ...\n :param region:\n :param header:\n :param x_size:\n :param y_size:\n :param expand_factor:\n :return:\n '
region = regions.expand(region, factor=expand_factor)
mask = np.logical_not(regions.create_mask(region, header, x_size, y_size))
return mask
|
def create_disk_mask(x_size, y_size, x_center, y_center, radius):
'\n This function ...\n :param x_size:\n :param y_size:\n :param x_center:\n :param y_center:\n :param radius:\n :return:\n '
(y, x) = np.ogrid[(- y_center):(y_size - y_center), (- x_center):(x_size - x_center)]
mask = (((x * x) + (y * y)) <= (radius * radius))
return mask
| -2,308,099,002,475,903,500
|
This function ...
:param x_size:
:param y_size:
:param x_center:
:param y_center:
:param radius:
:return:
|
CAAPR/CAAPR_AstroMagic/PTS/pts/magic/tools/masks.py
|
create_disk_mask
|
Stargrazer82301/CAAPR
|
python
|
def create_disk_mask(x_size, y_size, x_center, y_center, radius):
'\n This function ...\n :param x_size:\n :param y_size:\n :param x_center:\n :param y_center:\n :param radius:\n :return:\n '
(y, x) = np.ogrid[(- y_center):(y_size - y_center), (- x_center):(x_size - x_center)]
mask = (((x * x) + (y * y)) <= (radius * radius))
return mask
|
def union(mask_a, mask_b):
'\n This function ...\n :param args:\n :return:\n '
return (mask_a + mask_b)
| -4,128,617,534,399,295,500
|
This function ...
:param args:
:return:
|
CAAPR/CAAPR_AstroMagic/PTS/pts/magic/tools/masks.py
|
union
|
Stargrazer82301/CAAPR
|
python
|
def union(mask_a, mask_b):
'\n This function ...\n :param args:\n :return:\n '
return (mask_a + mask_b)
|
def intersection(mask_a, mask_b):
'\n This function ...\n :param args:\n :return:\n '
return (mask_a * mask_b)
| -3,610,174,022,581,261,300
|
This function ...
:param args:
:return:
|
CAAPR/CAAPR_AstroMagic/PTS/pts/magic/tools/masks.py
|
intersection
|
Stargrazer82301/CAAPR
|
python
|
def intersection(mask_a, mask_b):
'\n This function ...\n :param args:\n :return:\n '
return (mask_a * mask_b)
|
def overlap(mask_a, mask_b):
'\n This function ...\n :param mask_a:\n :param mask_b:\n :return:\n '
return np.any(intersection(mask_a, mask_b))
| -6,524,343,050,101,109,000
|
This function ...
:param mask_a:
:param mask_b:
:return:
|
CAAPR/CAAPR_AstroMagic/PTS/pts/magic/tools/masks.py
|
overlap
|
Stargrazer82301/CAAPR
|
python
|
def overlap(mask_a, mask_b):
'\n This function ...\n :param mask_a:\n :param mask_b:\n :return:\n '
return np.any(intersection(mask_a, mask_b))
|
def split_overlap(base_mask, test_mask, return_segments=False):
'\n This function takes all blobs in the base_mask and checks whether they overlap with the test_mask.\n The function returns two new masks, one mask with all the blobs that overlapped, and another with the blobs\n that did not overlap.\n :param base_mask:\n :param test_mask:\n :return:\n '
overlapping = np.zeros_like(base_mask, dtype=bool)
not_overlapping = np.copy(base_mask)
from photutils import detect_sources
segments = detect_sources(base_mask.astype('float'), 0.5, 1).data
overlap = intersection(segments, test_mask)
possible = np.array(range(1, (np.max(overlap) + 1)))
present = np.in1d(possible, overlap)
indices = possible[present]
overlapping_segments = np.zeros_like(base_mask, dtype=int)
not_overlapping_segments = np.copy(segments)
for index in indices:
blob = (segments == index)
overlapping[blob] = True
not_overlapping[blob] = False
overlapping_segments[blob] = index
not_overlapping_segments[blob] = 0
if return_segments:
return (overlapping, not_overlapping, overlapping_segments, not_overlapping_segments)
else:
return (overlapping, not_overlapping)
| 8,212,150,702,368,379,000
|
This function takes all blobs in the base_mask and checks whether they overlap with the test_mask.
The function returns two new masks, one mask with all the blobs that overlapped, and another with the blobs
that did not overlap.
:param base_mask:
:param test_mask:
:return:
|
CAAPR/CAAPR_AstroMagic/PTS/pts/magic/tools/masks.py
|
split_overlap
|
Stargrazer82301/CAAPR
|
python
|
def split_overlap(base_mask, test_mask, return_segments=False):
'\n This function takes all blobs in the base_mask and checks whether they overlap with the test_mask.\n The function returns two new masks, one mask with all the blobs that overlapped, and another with the blobs\n that did not overlap.\n :param base_mask:\n :param test_mask:\n :return:\n '
overlapping = np.zeros_like(base_mask, dtype=bool)
not_overlapping = np.copy(base_mask)
from photutils import detect_sources
segments = detect_sources(base_mask.astype('float'), 0.5, 1).data
overlap = intersection(segments, test_mask)
possible = np.array(range(1, (np.max(overlap) + 1)))
present = np.in1d(possible, overlap)
indices = possible[present]
overlapping_segments = np.zeros_like(base_mask, dtype=int)
not_overlapping_segments = np.copy(segments)
for index in indices:
blob = (segments == index)
overlapping[blob] = True
not_overlapping[blob] = False
overlapping_segments[blob] = index
not_overlapping_segments[blob] = 0
if return_segments:
return (overlapping, not_overlapping, overlapping_segments, not_overlapping_segments)
else:
return (overlapping, not_overlapping)
|
def wrap_check_policy(func):
'Check policy corresponding to the wrapped methods prior to execution\n\n This decorator requires the first 3 args of the wrapped function\n to be (self, context, volume)\n '
@functools.wraps(func)
def wrapped(self, context, target_obj, *args, **kwargs):
check_policy(context, func.__name__, target_obj)
return func(self, context, target_obj, *args, **kwargs)
return wrapped
| 2,356,297,874,592,871,000
|
Check policy corresponding to the wrapped methods prior to execution
This decorator requires the first 3 args of the wrapped function
to be (self, context, volume)
|
cinder/volume/api.py
|
wrap_check_policy
|
CiscoSystems/cinder-old
|
python
|
def wrap_check_policy(func):
'Check policy corresponding to the wrapped methods prior to execution\n\n This decorator requires the first 3 args of the wrapped function\n to be (self, context, volume)\n '
@functools.wraps(func)
def wrapped(self, context, target_obj, *args, **kwargs):
check_policy(context, func.__name__, target_obj)
return func(self, context, target_obj, *args, **kwargs)
return wrapped
|
def remove_from_compute(self, context, volume, instance_id, host):
'Remove volume from specified compute host.'
rpc.call(context, rpc.queue_get_for(context, FLAGS.compute_topic, host), {'method': 'remove_volume_connection', 'args': {'instance_id': instance_id, 'volume_id': volume['id']}})
| -6,526,975,400,008,303,000
|
Remove volume from specified compute host.
|
cinder/volume/api.py
|
remove_from_compute
|
CiscoSystems/cinder-old
|
python
|
def remove_from_compute(self, context, volume, instance_id, host):
rpc.call(context, rpc.queue_get_for(context, FLAGS.compute_topic, host), {'method': 'remove_volume_connection', 'args': {'instance_id': instance_id, 'volume_id': volume['id']}})
|
@wrap_check_policy
def get_volume_metadata(self, context, volume):
'Get all metadata associated with a volume.'
rv = self.db.volume_metadata_get(context, volume['id'])
return dict(rv.iteritems())
| 5,805,609,976,959,036,000
|
Get all metadata associated with a volume.
|
cinder/volume/api.py
|
get_volume_metadata
|
CiscoSystems/cinder-old
|
python
|
@wrap_check_policy
def get_volume_metadata(self, context, volume):
rv = self.db.volume_metadata_get(context, volume['id'])
return dict(rv.iteritems())
|
@wrap_check_policy
def delete_volume_metadata(self, context, volume, key):
'Delete the given metadata item from an volume.'
self.db.volume_metadata_delete(context, volume['id'], key)
| -1,655,074,085,884,326,100
|
Delete the given metadata item from an volume.
|
cinder/volume/api.py
|
delete_volume_metadata
|
CiscoSystems/cinder-old
|
python
|
@wrap_check_policy
def delete_volume_metadata(self, context, volume, key):
self.db.volume_metadata_delete(context, volume['id'], key)
|
@wrap_check_policy
def update_volume_metadata(self, context, volume, metadata, delete=False):
'Updates or creates volume metadata.\n\n If delete is True, metadata items that are not specified in the\n `metadata` argument will be deleted.\n\n '
if delete:
_metadata = metadata
else:
_metadata = self.get_volume_metadata(context, volume['id'])
_metadata.update(metadata)
self.db.volume_metadata_update(context, volume['id'], _metadata, True)
return _metadata
| -3,966,998,166,392,626,000
|
Updates or creates volume metadata.
If delete is True, metadata items that are not specified in the
`metadata` argument will be deleted.
|
cinder/volume/api.py
|
update_volume_metadata
|
CiscoSystems/cinder-old
|
python
|
@wrap_check_policy
def update_volume_metadata(self, context, volume, metadata, delete=False):
'Updates or creates volume metadata.\n\n If delete is True, metadata items that are not specified in the\n `metadata` argument will be deleted.\n\n '
if delete:
_metadata = metadata
else:
_metadata = self.get_volume_metadata(context, volume['id'])
_metadata.update(metadata)
self.db.volume_metadata_update(context, volume['id'], _metadata, True)
return _metadata
|
def get_volume_metadata_value(self, volume, key):
'Get value of particular metadata key.'
metadata = volume.get('volume_metadata')
if metadata:
for i in volume['volume_metadata']:
if (i['key'] == key):
return i['value']
return None
| -8,077,658,148,052,221,000
|
Get value of particular metadata key.
|
cinder/volume/api.py
|
get_volume_metadata_value
|
CiscoSystems/cinder-old
|
python
|
def get_volume_metadata_value(self, volume, key):
metadata = volume.get('volume_metadata')
if metadata:
for i in volume['volume_metadata']:
if (i['key'] == key):
return i['value']
return None
|
def _check_volume_availability(self, context, volume, force):
'Check if the volume can be used.'
if (volume['status'] not in ['available', 'in-use']):
msg = _('Volume status must be available/in-use.')
raise exception.InvalidVolume(reason=msg)
if ((not force) and ('in-use' == volume['status'])):
msg = _('Volume status is in-use.')
raise exception.InvalidVolume(reason=msg)
| 5,035,103,438,389,566,000
|
Check if the volume can be used.
|
cinder/volume/api.py
|
_check_volume_availability
|
CiscoSystems/cinder-old
|
python
|
def _check_volume_availability(self, context, volume, force):
if (volume['status'] not in ['available', 'in-use']):
msg = _('Volume status must be available/in-use.')
raise exception.InvalidVolume(reason=msg)
if ((not force) and ('in-use' == volume['status'])):
msg = _('Volume status is in-use.')
raise exception.InvalidVolume(reason=msg)
|
@wrap_check_policy
def copy_volume_to_image(self, context, volume, metadata, force):
'Create a new image from the specified volume.'
self._check_volume_availability(context, volume, force)
recv_metadata = self.image_service.create(context, metadata)
self.update(context, volume, {'status': 'uploading'})
rpc.cast(context, rpc.queue_get_for(context, FLAGS.volume_topic, volume['host']), {'method': 'copy_volume_to_image', 'args': {'volume_id': volume['id'], 'image_id': recv_metadata['id']}})
response = {'id': volume['id'], 'updated_at': volume['updated_at'], 'status': 'uploading', 'display_description': volume['display_description'], 'size': volume['size'], 'volume_type': volume['volume_type'], 'image_id': recv_metadata['id'], 'container_format': recv_metadata['container_format'], 'disk_format': recv_metadata['disk_format'], 'image_name': recv_metadata.get('name', None)}
return response
| -7,635,538,943,194,196,000
|
Create a new image from the specified volume.
|
cinder/volume/api.py
|
copy_volume_to_image
|
CiscoSystems/cinder-old
|
python
|
@wrap_check_policy
def copy_volume_to_image(self, context, volume, metadata, force):
self._check_volume_availability(context, volume, force)
recv_metadata = self.image_service.create(context, metadata)
self.update(context, volume, {'status': 'uploading'})
rpc.cast(context, rpc.queue_get_for(context, FLAGS.volume_topic, volume['host']), {'method': 'copy_volume_to_image', 'args': {'volume_id': volume['id'], 'image_id': recv_metadata['id']}})
response = {'id': volume['id'], 'updated_at': volume['updated_at'], 'status': 'uploading', 'display_description': volume['display_description'], 'size': volume['size'], 'volume_type': volume['volume_type'], 'image_id': recv_metadata['id'], 'container_format': recv_metadata['container_format'], 'disk_format': recv_metadata['disk_format'], 'image_name': recv_metadata.get('name', None)}
return response
|
def show_mri_sample(sample, pred_mask=None, pred_lbl=None, seg_downsample=None, save_fn=None):
' Plot sample in three projections '
plt.close('all')
alpha = 0.5
image_alpha = 1.0
ims = sample['image'].numpy()
means = sample['mean'].numpy()
stds = sample['std'].numpy()
segs = (sample['segmentation'].numpy() if ('segmentation' in sample) else None)
if (ims.ndim == 4):
ims = np.expand_dims(ims, 0)
means = np.expand_dims(means, 0)
stds = np.expand_dims(stds, 0)
if (segs is not None):
segs = np.expand_dims(segs, 0)
n_images = len(ims)
n_root = int(np.ceil(np.sqrt(n_images)))
n_cols = (n_root * 2)
n_rows = (n_root * 2)
if (n_images == 2):
n_rows = 2
fig_scale = 2
f = plt.figure(figsize=((fig_scale * n_cols), (fig_scale * n_rows)))
brats_ids = ([sample['BraTSID']] if (n_images == 1) else sample['BraTSID'])
labels = None
if ('label' in sample):
labels = ([sample['label']] if (n_images == 1) else sample['label'])
def _subplot_index(index, row_off, col_off):
startrow = ((index * 2) // n_cols)
startcol = ((index * 2) % n_cols)
return (((((2 * startrow) + row_off) * n_cols) + (startcol + col_off)) + 1)
for index in range(n_images):
im = ims[index]
seg = segs[index]
seg = np.swapaxes(seg, 0, 3)
if (seg_downsample is not None):
seg = seg.repeat(seg_downsample, axis=0).repeat(seg_downsample, axis=1).repeat(seg_downsample, axis=2)
im = np.swapaxes(im, 0, 3)
im = ((im * stds[index]) + means[index])
title = f'BraTSID: {brats_ids[index]}'
if (labels is not None):
title += f', GT-MGMT:{labels[index]}'
if (pred_lbl is not None):
title += f'''
Pred-MGMT:{float(pred_lbl[index][0]):.3f}'''
(d, x, y, c) = im.shape
coronal_ax = f.add_subplot(n_rows, n_cols, _subplot_index(index, 0, 0))
coronal_ax.set_title((title + ' - coronal'), fontsize=8)
coronal_ax.imshow(make_bg_transparent(adjust_saturation(im[::(- 1), (x // 2), :, :])), alpha=image_alpha)
sagittal_ax = f.add_subplot(n_rows, n_cols, _subplot_index(index, 0, 1))
sagittal_ax.set_title((title + ' - sagittal'), fontsize=8)
sagittal_ax.get_yaxis().set_visible(False)
sagittal_ax.imshow(make_bg_transparent(adjust_saturation(im[::(- 1), :, (y // 2), :])), alpha=image_alpha)
axial_ax = f.add_subplot(n_rows, n_cols, _subplot_index(index, 1, 0))
axial_ax.set_title((title + ' - axial'), fontsize=8)
axial_ax.imshow(make_bg_transparent(adjust_saturation(im[(d // 2), :, :, :])), alpha=image_alpha)
proj_ax = f.add_subplot(n_rows, n_cols, _subplot_index(index, 1, 1), projection='3d')
proj_ax.scatter(*to_3d_points(im), color='gray', alpha=0.015, s=5, depthshade=False)
proj_ax.set_title(f'''Green=GT-tumor, Red=Pred-tumor
{title}''', fontsize=6)
proj_ax.set_xticks([])
proj_ax.set_yticks([])
proj_ax.set_zticks([])
if (seg is not None):
for (seg_chan, color) in zip(range(seg.shape[3]), ['green']):
coronal_ax.imshow(make_bg_transparent(seg[::(- 1), (x // 2), :, seg_chan], set_to_color=color), alpha=alpha)
sagittal_ax.imshow(make_bg_transparent(seg[::(- 1), :, (y // 2), seg_chan], set_to_color=color), alpha=alpha)
axial_ax.imshow(make_bg_transparent(seg[(d // 2), :, :, seg_chan], set_to_color=color), alpha=alpha)
proj_ax.scatter(*to_3d_points(seg[:, :, :, seg_chan]), color=color, s=5, alpha=0.05)
if (pred_mask is not None):
pred = np.swapaxes(pred_mask[index].cpu().numpy(), 0, 3)
pred = np.clip(pred, 0, 1.0)
if (seg_downsample is not None):
pred = pred.repeat(seg_downsample, axis=0).repeat(seg_downsample, axis=1).repeat(seg_downsample, axis=2)
for (seg_chan, color) in zip(range(pred.shape[3]), ['red']):
coronal_ax.imshow(make_bg_transparent(pred[::(- 1), (x // 2), :, seg_chan], set_to_color=color, bg_th=0.5), alpha=alpha)
sagittal_ax.imshow(make_bg_transparent(pred[::(- 1), :, (y // 2), seg_chan], set_to_color=color, bg_th=0.5), alpha=alpha)
axial_ax.imshow(make_bg_transparent(pred[(d // 2), :, :, seg_chan], set_to_color=color, bg_th=0.5), alpha=alpha)
proj_ax.scatter(*to_3d_points(pred[:, :, :, seg_chan], th=0.5), color=color, s=5, alpha=0.05)
coronal_ax.plot([0, (x - 1)], [(d // 2), (d // 2)], '--', color='white', linewidth=1)
coronal_ax.plot([(x // 2), (x // 2)], [0, (d - 1)], '--', color='white', linewidth=1)
sagittal_ax.plot([0, (y - 1)], [(d // 2), (d // 2)], '--', color='white', linewidth=1)
sagittal_ax.plot([(y // 2), (y // 2)], [0, (d - 1)], '--', color='white', linewidth=1)
axial_ax.plot([0, (y - 1)], [(x // 2), (x // 2)], '--', color='white', linewidth=1)
axial_ax.plot([(x // 2), (x // 2)], [0, (y - 1)], '--', color='white', linewidth=1)
plt.subplots_adjust(left=0.0, top=1.0, right=1.0, bottom=0.0, wspace=0.15, hspace=0.15)
bbox = f.get_window_extent().transformed(f.dpi_scale_trans.inverted())
(width, height) = ((bbox.width * f.dpi), (bbox.height * f.dpi))
width *= 1.05
height *= 1.05
for row in range(0, n_rows, 2):
if ((n_images == 2) and (row > 0)):
break
for col in range(0, n_cols, 2):
different_color = (((row // 2) % 2) == ((col // 2) % 2))
color = ((1, 1, 1) if different_color else (0.8, 0.8, 0.8))
f.patches.extend([plt.Rectangle((((width * col) / n_cols), ((height * ((n_rows - row) - 2)) / n_rows)), (width / max(1, (n_cols // 2))), (height / max(1, (n_rows // 2))), fill=True, color=color, zorder=(- 1), alpha=0.5, transform=None, figure=f)])
if (save_fn is not None):
plt.savefig(save_fn, transparent=False)
else:
plt.show()
| 4,095,470,176,055,205,000
|
Plot sample in three projections
|
src/seg_model_utils/visualization.py
|
show_mri_sample
|
jpjuvo/RSNA-MICCAI-Brain-Tumor-Classification
|
python
|
def show_mri_sample(sample, pred_mask=None, pred_lbl=None, seg_downsample=None, save_fn=None):
' '
plt.close('all')
alpha = 0.5
image_alpha = 1.0
ims = sample['image'].numpy()
means = sample['mean'].numpy()
stds = sample['std'].numpy()
segs = (sample['segmentation'].numpy() if ('segmentation' in sample) else None)
if (ims.ndim == 4):
ims = np.expand_dims(ims, 0)
means = np.expand_dims(means, 0)
stds = np.expand_dims(stds, 0)
if (segs is not None):
segs = np.expand_dims(segs, 0)
n_images = len(ims)
n_root = int(np.ceil(np.sqrt(n_images)))
n_cols = (n_root * 2)
n_rows = (n_root * 2)
if (n_images == 2):
n_rows = 2
fig_scale = 2
f = plt.figure(figsize=((fig_scale * n_cols), (fig_scale * n_rows)))
brats_ids = ([sample['BraTSID']] if (n_images == 1) else sample['BraTSID'])
labels = None
if ('label' in sample):
labels = ([sample['label']] if (n_images == 1) else sample['label'])
def _subplot_index(index, row_off, col_off):
startrow = ((index * 2) // n_cols)
startcol = ((index * 2) % n_cols)
return (((((2 * startrow) + row_off) * n_cols) + (startcol + col_off)) + 1)
for index in range(n_images):
im = ims[index]
seg = segs[index]
seg = np.swapaxes(seg, 0, 3)
if (seg_downsample is not None):
seg = seg.repeat(seg_downsample, axis=0).repeat(seg_downsample, axis=1).repeat(seg_downsample, axis=2)
im = np.swapaxes(im, 0, 3)
im = ((im * stds[index]) + means[index])
title = f'BraTSID: {brats_ids[index]}'
if (labels is not None):
title += f', GT-MGMT:{labels[index]}'
if (pred_lbl is not None):
title += f'
Pred-MGMT:{float(pred_lbl[index][0]):.3f}'
(d, x, y, c) = im.shape
coronal_ax = f.add_subplot(n_rows, n_cols, _subplot_index(index, 0, 0))
coronal_ax.set_title((title + ' - coronal'), fontsize=8)
coronal_ax.imshow(make_bg_transparent(adjust_saturation(im[::(- 1), (x // 2), :, :])), alpha=image_alpha)
sagittal_ax = f.add_subplot(n_rows, n_cols, _subplot_index(index, 0, 1))
sagittal_ax.set_title((title + ' - sagittal'), fontsize=8)
sagittal_ax.get_yaxis().set_visible(False)
sagittal_ax.imshow(make_bg_transparent(adjust_saturation(im[::(- 1), :, (y // 2), :])), alpha=image_alpha)
axial_ax = f.add_subplot(n_rows, n_cols, _subplot_index(index, 1, 0))
axial_ax.set_title((title + ' - axial'), fontsize=8)
axial_ax.imshow(make_bg_transparent(adjust_saturation(im[(d // 2), :, :, :])), alpha=image_alpha)
proj_ax = f.add_subplot(n_rows, n_cols, _subplot_index(index, 1, 1), projection='3d')
proj_ax.scatter(*to_3d_points(im), color='gray', alpha=0.015, s=5, depthshade=False)
proj_ax.set_title(f'Green=GT-tumor, Red=Pred-tumor
{title}', fontsize=6)
proj_ax.set_xticks([])
proj_ax.set_yticks([])
proj_ax.set_zticks([])
if (seg is not None):
for (seg_chan, color) in zip(range(seg.shape[3]), ['green']):
coronal_ax.imshow(make_bg_transparent(seg[::(- 1), (x // 2), :, seg_chan], set_to_color=color), alpha=alpha)
sagittal_ax.imshow(make_bg_transparent(seg[::(- 1), :, (y // 2), seg_chan], set_to_color=color), alpha=alpha)
axial_ax.imshow(make_bg_transparent(seg[(d // 2), :, :, seg_chan], set_to_color=color), alpha=alpha)
proj_ax.scatter(*to_3d_points(seg[:, :, :, seg_chan]), color=color, s=5, alpha=0.05)
if (pred_mask is not None):
pred = np.swapaxes(pred_mask[index].cpu().numpy(), 0, 3)
pred = np.clip(pred, 0, 1.0)
if (seg_downsample is not None):
pred = pred.repeat(seg_downsample, axis=0).repeat(seg_downsample, axis=1).repeat(seg_downsample, axis=2)
for (seg_chan, color) in zip(range(pred.shape[3]), ['red']):
coronal_ax.imshow(make_bg_transparent(pred[::(- 1), (x // 2), :, seg_chan], set_to_color=color, bg_th=0.5), alpha=alpha)
sagittal_ax.imshow(make_bg_transparent(pred[::(- 1), :, (y // 2), seg_chan], set_to_color=color, bg_th=0.5), alpha=alpha)
axial_ax.imshow(make_bg_transparent(pred[(d // 2), :, :, seg_chan], set_to_color=color, bg_th=0.5), alpha=alpha)
proj_ax.scatter(*to_3d_points(pred[:, :, :, seg_chan], th=0.5), color=color, s=5, alpha=0.05)
coronal_ax.plot([0, (x - 1)], [(d // 2), (d // 2)], '--', color='white', linewidth=1)
coronal_ax.plot([(x // 2), (x // 2)], [0, (d - 1)], '--', color='white', linewidth=1)
sagittal_ax.plot([0, (y - 1)], [(d // 2), (d // 2)], '--', color='white', linewidth=1)
sagittal_ax.plot([(y // 2), (y // 2)], [0, (d - 1)], '--', color='white', linewidth=1)
axial_ax.plot([0, (y - 1)], [(x // 2), (x // 2)], '--', color='white', linewidth=1)
axial_ax.plot([(x // 2), (x // 2)], [0, (y - 1)], '--', color='white', linewidth=1)
plt.subplots_adjust(left=0.0, top=1.0, right=1.0, bottom=0.0, wspace=0.15, hspace=0.15)
bbox = f.get_window_extent().transformed(f.dpi_scale_trans.inverted())
(width, height) = ((bbox.width * f.dpi), (bbox.height * f.dpi))
width *= 1.05
height *= 1.05
for row in range(0, n_rows, 2):
if ((n_images == 2) and (row > 0)):
break
for col in range(0, n_cols, 2):
different_color = (((row // 2) % 2) == ((col // 2) % 2))
color = ((1, 1, 1) if different_color else (0.8, 0.8, 0.8))
f.patches.extend([plt.Rectangle((((width * col) / n_cols), ((height * ((n_rows - row) - 2)) / n_rows)), (width / max(1, (n_cols // 2))), (height / max(1, (n_rows // 2))), fill=True, color=color, zorder=(- 1), alpha=0.5, transform=None, figure=f)])
if (save_fn is not None):
plt.savefig(save_fn, transparent=False)
else:
plt.show()
|
def date_to_int(self, dates):
"\n calculates number of days between 01/01/0001 and each date in dates\n date has format '%m/%d/%Y'\n\n :param dates: Pandas Series\n :return: list\n "
ret = []
for date in dates:
date0 = datetime.datetime(year=1, month=1, day=1)
datex = datetime.datetime.strptime(date, '%m/%d/%Y')
ret.append((datex - date0).days)
return ret
| -6,971,423,626,463,422,000
|
calculates number of days between 01/01/0001 and each date in dates
date has format '%m/%d/%Y'
:param dates: Pandas Series
:return: list
|
backend/data_merge.py
|
date_to_int
|
repeating/stock-analyzer
|
python
|
def date_to_int(self, dates):
"\n calculates number of days between 01/01/0001 and each date in dates\n date has format '%m/%d/%Y'\n\n :param dates: Pandas Series\n :return: list\n "
ret = []
for date in dates:
date0 = datetime.datetime(year=1, month=1, day=1)
datex = datetime.datetime.strptime(date, '%m/%d/%Y')
ret.append((datex - date0).days)
return ret
|
def _get_args(info):
'Return the list of args & kwds for building the __init__ function'
required = set()
kwds = set()
invalid_kwds = set()
if info.is_allOf():
arginfo = [_get_args(child) for child in info.allOf]
nonkeyword = all((args[0] for args in arginfo))
required = set.union(set(), *(args[1] for args in arginfo))
kwds = set.union(set(), *(args[2] for args in arginfo))
kwds -= required
invalid_kwds = set.union(set(), *(args[3] for args in arginfo))
additional = all((args[4] for args in arginfo))
elif (info.is_empty() or info.is_compound()):
nonkeyword = True
additional = True
elif info.is_value():
nonkeyword = True
additional = False
elif info.is_object():
invalid_kwds = ({p for p in info.required if (not is_valid_identifier(p))} | {p for p in info.properties if (not is_valid_identifier(p))})
required = {p for p in info.required if is_valid_identifier(p)}
kwds = {p for p in info.properties if is_valid_identifier(p)}
kwds -= required
nonkeyword = False
additional = True
else:
raise ValueError('Schema object not understood')
return (nonkeyword, required, kwds, invalid_kwds, additional)
| 962,504,644,605,657,200
|
Return the list of args & kwds for building the __init__ function
|
tools/schemapi/codegen.py
|
_get_args
|
aladdingsw/altair
|
python
|
def _get_args(info):
required = set()
kwds = set()
invalid_kwds = set()
if info.is_allOf():
arginfo = [_get_args(child) for child in info.allOf]
nonkeyword = all((args[0] for args in arginfo))
required = set.union(set(), *(args[1] for args in arginfo))
kwds = set.union(set(), *(args[2] for args in arginfo))
kwds -= required
invalid_kwds = set.union(set(), *(args[3] for args in arginfo))
additional = all((args[4] for args in arginfo))
elif (info.is_empty() or info.is_compound()):
nonkeyword = True
additional = True
elif info.is_value():
nonkeyword = True
additional = False
elif info.is_object():
invalid_kwds = ({p for p in info.required if (not is_valid_identifier(p))} | {p for p in info.properties if (not is_valid_identifier(p))})
required = {p for p in info.required if is_valid_identifier(p)}
kwds = {p for p in info.properties if is_valid_identifier(p)}
kwds -= required
nonkeyword = False
additional = True
else:
raise ValueError('Schema object not understood')
return (nonkeyword, required, kwds, invalid_kwds, additional)
|
def schema_class(self):
'Generate code for a schema class'
rootschema = (self.rootschema if (self.rootschema is not None) else self.schema)
schemarepr = (self.schemarepr if (self.schemarepr is not None) else self.schema)
rootschemarepr = self.rootschemarepr
if (rootschemarepr is None):
if (rootschema is self.schema):
rootschemarepr = CodeSnippet('_schema')
else:
rootschemarepr = rootschema
return self.schema_class_template.format(classname=self.classname, basename=self.basename, schema=schemarepr, rootschema=rootschemarepr, docstring=self.docstring(indent=4), init_code=self.init_code(indent=4))
| 2,319,359,880,158,826,000
|
Generate code for a schema class
|
tools/schemapi/codegen.py
|
schema_class
|
aladdingsw/altair
|
python
|
def schema_class(self):
rootschema = (self.rootschema if (self.rootschema is not None) else self.schema)
schemarepr = (self.schemarepr if (self.schemarepr is not None) else self.schema)
rootschemarepr = self.rootschemarepr
if (rootschemarepr is None):
if (rootschema is self.schema):
rootschemarepr = CodeSnippet('_schema')
else:
rootschemarepr = rootschema
return self.schema_class_template.format(classname=self.classname, basename=self.basename, schema=schemarepr, rootschema=rootschemarepr, docstring=self.docstring(indent=4), init_code=self.init_code(indent=4))
|
def init_code(self, indent=0):
'Return code suitablde for the __init__ function of a Schema class'
info = SchemaInfo(self.schema, rootschema=self.rootschema)
(nonkeyword, required, kwds, invalid_kwds, additional) = _get_args(info)
nodefault = set(self.nodefault)
required -= nodefault
kwds -= nodefault
args = ['self']
super_args = []
if nodefault:
args.extend(sorted(nodefault))
elif nonkeyword:
args.append('*args')
super_args.append('*args')
args.extend(('{}=Undefined'.format(p) for p in (sorted(required) + sorted(kwds))))
super_args.extend(('{0}={0}'.format(p) for p in ((sorted(nodefault) + sorted(required)) + sorted(kwds))))
if additional:
args.append('**kwds')
super_args.append('**kwds')
arg_indent_level = (9 + indent)
super_arg_indent_level = ((23 + len(self.classname)) + indent)
initfunc = self.init_template.format(classname=self.classname, arglist=indent_arglist(args, indent_level=arg_indent_level), super_arglist=indent_arglist(super_args, indent_level=super_arg_indent_level))
if indent:
initfunc = ('\n' + (indent * ' ')).join(initfunc.splitlines())
return initfunc
| 7,375,621,453,165,764,000
|
Return code suitablde for the __init__ function of a Schema class
|
tools/schemapi/codegen.py
|
init_code
|
aladdingsw/altair
|
python
|
def init_code(self, indent=0):
info = SchemaInfo(self.schema, rootschema=self.rootschema)
(nonkeyword, required, kwds, invalid_kwds, additional) = _get_args(info)
nodefault = set(self.nodefault)
required -= nodefault
kwds -= nodefault
args = ['self']
super_args = []
if nodefault:
args.extend(sorted(nodefault))
elif nonkeyword:
args.append('*args')
super_args.append('*args')
args.extend(('{}=Undefined'.format(p) for p in (sorted(required) + sorted(kwds))))
super_args.extend(('{0}={0}'.format(p) for p in ((sorted(nodefault) + sorted(required)) + sorted(kwds))))
if additional:
args.append('**kwds')
super_args.append('**kwds')
arg_indent_level = (9 + indent)
super_arg_indent_level = ((23 + len(self.classname)) + indent)
initfunc = self.init_template.format(classname=self.classname, arglist=indent_arglist(args, indent_level=arg_indent_level), super_arglist=indent_arglist(super_args, indent_level=super_arg_indent_level))
if indent:
initfunc = ('\n' + (indent * ' ')).join(initfunc.splitlines())
return initfunc
|
@cached_property
def openapi_types():
'\n This must be a method because a model may have properties that are\n of type self, this must run after the class is loaded\n\n Returns\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n '
return {'metadataset_ids': ([str],)}
| 3,202,055,015,200,675,300
|
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
|
datameta_client_lib/model/staged_meta_data_sets.py
|
openapi_types
|
ghga-de/datameta-client-lib
|
python
|
@cached_property
def openapi_types():
'\n This must be a method because a model may have properties that are\n of type self, this must run after the class is loaded\n\n Returns\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n '
return {'metadataset_ids': ([str],)}
|
@convert_js_args_to_python_args
def __init__(self, metadataset_ids, *args, **kwargs):
'StagedMetaDataSets - a model defined in OpenAPI\n\n Args:\n metadataset_ids ([str]):\n\n Keyword Args:\n _check_type (bool): if True, values for parameters in openapi_types\n will be type checked and a TypeError will be\n raised if the wrong type is input.\n Defaults to True\n _path_to_item (tuple/list): This is a list of keys or values to\n drill down to the model in received_data\n when deserializing a response\n _spec_property_naming (bool): True if the variable names in the input data\n are serialized names, as specified in the OpenAPI document.\n False if the variable names in the input data\n are pythonic names, e.g. snake case (default)\n _configuration (Configuration): the instance to use when\n deserializing a file_type parameter.\n If passed, type conversion is attempted\n If omitted no type conversion is done.\n _visited_composed_classes (tuple): This stores a tuple of\n classes that we have traveled through so that\n if we see that class again we will not use its\n discriminator again.\n When traveling through a discriminator, the\n composed schema that is\n is traveled through is added to this set.\n For example if Animal has a discriminator\n petType and we pass in "Dog", and the class Dog\n allOf includes Animal, we move through Animal\n once using the discriminator, and pick Dog.\n Then in Dog, we will make an instance of the\n Animal class but this time we won\'t travel\n through its discriminator because we passed in\n _visited_composed_classes = (Animal,)\n '
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,))
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = (_visited_composed_classes + (self.__class__,))
self.metadataset_ids = metadataset_ids
for (var_name, var_value) in kwargs.items():
if ((var_name not in self.attribute_map) and (self._configuration is not None) and self._configuration.discard_unknown_keys and (self.additional_properties_type is None)):
continue
setattr(self, var_name, var_value)
| -4,712,968,294,148,478,000
|
StagedMetaDataSets - a model defined in OpenAPI
Args:
metadataset_ids ([str]):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
|
datameta_client_lib/model/staged_meta_data_sets.py
|
__init__
|
ghga-de/datameta-client-lib
|
python
|
@convert_js_args_to_python_args
def __init__(self, metadataset_ids, *args, **kwargs):
'StagedMetaDataSets - a model defined in OpenAPI\n\n Args:\n metadataset_ids ([str]):\n\n Keyword Args:\n _check_type (bool): if True, values for parameters in openapi_types\n will be type checked and a TypeError will be\n raised if the wrong type is input.\n Defaults to True\n _path_to_item (tuple/list): This is a list of keys or values to\n drill down to the model in received_data\n when deserializing a response\n _spec_property_naming (bool): True if the variable names in the input data\n are serialized names, as specified in the OpenAPI document.\n False if the variable names in the input data\n are pythonic names, e.g. snake case (default)\n _configuration (Configuration): the instance to use when\n deserializing a file_type parameter.\n If passed, type conversion is attempted\n If omitted no type conversion is done.\n _visited_composed_classes (tuple): This stores a tuple of\n classes that we have traveled through so that\n if we see that class again we will not use its\n discriminator again.\n When traveling through a discriminator, the\n composed schema that is\n is traveled through is added to this set.\n For example if Animal has a discriminator\n petType and we pass in "Dog", and the class Dog\n allOf includes Animal, we move through Animal\n once using the discriminator, and pick Dog.\n Then in Dog, we will make an instance of the\n Animal class but this time we won\'t travel\n through its discriminator because we passed in\n _visited_composed_classes = (Animal,)\n '
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,))
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = (_visited_composed_classes + (self.__class__,))
self.metadataset_ids = metadataset_ids
for (var_name, var_value) in kwargs.items():
if ((var_name not in self.attribute_map) and (self._configuration is not None) and self._configuration.discard_unknown_keys and (self.additional_properties_type is None)):
continue
setattr(self, var_name, var_value)
|
def __init__(self, alpha, beta, validate_args=True, allow_nan_stats=False, name='Gamma'):
'Construct Gamma distributions with parameters `alpha` and `beta`.\n\n The parameters `alpha` and `beta` must be shaped in a way that supports\n broadcasting (e.g. `alpha + beta` is a valid operation).\n\n Args:\n alpha: Floating point tensor, the shape params of the\n distribution(s).\n alpha must contain only positive values.\n beta: Floating point tensor, the inverse scale params of the\n distribution(s).\n beta must contain only positive values.\n validate_args: Whether to assert that `a > 0, b > 0`, and that `x > 0` in\n the methods `prob(x)` and `log_prob(x)`. If `validate_args` is `False`\n and the inputs are invalid, correct behavior is not guaranteed.\n allow_nan_stats: Boolean, default `False`. If `False`, raise an\n exception if a statistic (e.g. mean/mode/etc...) is undefined for any\n batch member. If `True`, batch members with valid parameters leading to\n undefined statistics will return NaN for this statistic.\n name: The name to prepend to all ops created by this distribution.\n\n Raises:\n TypeError: if `alpha` and `beta` are different dtypes.\n '
self._allow_nan_stats = allow_nan_stats
self._validate_args = validate_args
with ops.name_scope(name, values=[alpha, beta]) as scope:
self._name = scope
with ops.control_dependencies(([check_ops.assert_positive(alpha), check_ops.assert_positive(beta)] if validate_args else [])):
alpha = array_ops.identity(alpha, name='alpha')
beta = array_ops.identity(beta, name='beta')
contrib_tensor_util.assert_same_float_dtype((alpha, beta))
self._broadcast_tensor = (alpha + beta)
self._get_batch_shape = self._broadcast_tensor.get_shape()
self._get_event_shape = tensor_shape.TensorShape([])
self._alpha = alpha
self._beta = beta
| 8,342,424,512,551,853,000
|
Construct Gamma distributions with parameters `alpha` and `beta`.
The parameters `alpha` and `beta` must be shaped in a way that supports
broadcasting (e.g. `alpha + beta` is a valid operation).
Args:
alpha: Floating point tensor, the shape params of the
distribution(s).
alpha must contain only positive values.
beta: Floating point tensor, the inverse scale params of the
distribution(s).
beta must contain only positive values.
validate_args: Whether to assert that `a > 0, b > 0`, and that `x > 0` in
the methods `prob(x)` and `log_prob(x)`. If `validate_args` is `False`
and the inputs are invalid, correct behavior is not guaranteed.
allow_nan_stats: Boolean, default `False`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member. If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to prepend to all ops created by this distribution.
Raises:
TypeError: if `alpha` and `beta` are different dtypes.
|
tensorflow/contrib/distributions/python/ops/gamma.py
|
__init__
|
enrewen1/tf
|
python
|
def __init__(self, alpha, beta, validate_args=True, allow_nan_stats=False, name='Gamma'):
'Construct Gamma distributions with parameters `alpha` and `beta`.\n\n The parameters `alpha` and `beta` must be shaped in a way that supports\n broadcasting (e.g. `alpha + beta` is a valid operation).\n\n Args:\n alpha: Floating point tensor, the shape params of the\n distribution(s).\n alpha must contain only positive values.\n beta: Floating point tensor, the inverse scale params of the\n distribution(s).\n beta must contain only positive values.\n validate_args: Whether to assert that `a > 0, b > 0`, and that `x > 0` in\n the methods `prob(x)` and `log_prob(x)`. If `validate_args` is `False`\n and the inputs are invalid, correct behavior is not guaranteed.\n allow_nan_stats: Boolean, default `False`. If `False`, raise an\n exception if a statistic (e.g. mean/mode/etc...) is undefined for any\n batch member. If `True`, batch members with valid parameters leading to\n undefined statistics will return NaN for this statistic.\n name: The name to prepend to all ops created by this distribution.\n\n Raises:\n TypeError: if `alpha` and `beta` are different dtypes.\n '
self._allow_nan_stats = allow_nan_stats
self._validate_args = validate_args
with ops.name_scope(name, values=[alpha, beta]) as scope:
self._name = scope
with ops.control_dependencies(([check_ops.assert_positive(alpha), check_ops.assert_positive(beta)] if validate_args else [])):
alpha = array_ops.identity(alpha, name='alpha')
beta = array_ops.identity(beta, name='beta')
contrib_tensor_util.assert_same_float_dtype((alpha, beta))
self._broadcast_tensor = (alpha + beta)
self._get_batch_shape = self._broadcast_tensor.get_shape()
self._get_event_shape = tensor_shape.TensorShape([])
self._alpha = alpha
self._beta = beta
|
@property
def allow_nan_stats(self):
'Boolean describing behavior when a stat is undefined for batch member.'
return self._allow_nan_stats
| -6,998,151,567,754,223,000
|
Boolean describing behavior when a stat is undefined for batch member.
|
tensorflow/contrib/distributions/python/ops/gamma.py
|
allow_nan_stats
|
enrewen1/tf
|
python
|
@property
def allow_nan_stats(self):
return self._allow_nan_stats
|
@property
def validate_args(self):
'Boolean describing behavior on invalid input.'
return self._validate_args
| -1,579,648,302,353,013,800
|
Boolean describing behavior on invalid input.
|
tensorflow/contrib/distributions/python/ops/gamma.py
|
validate_args
|
enrewen1/tf
|
python
|
@property
def validate_args(self):
return self._validate_args
|
@property
def name(self):
'Name to prepend to all ops.'
return self._name
| -1,989,245,888,842,757,000
|
Name to prepend to all ops.
|
tensorflow/contrib/distributions/python/ops/gamma.py
|
name
|
enrewen1/tf
|
python
|
@property
def name(self):
return self._name
|
@property
def dtype(self):
'dtype of samples from this distribution.'
return self._alpha.dtype
| -6,171,087,007,865,193,000
|
dtype of samples from this distribution.
|
tensorflow/contrib/distributions/python/ops/gamma.py
|
dtype
|
enrewen1/tf
|
python
|
@property
def dtype(self):
return self._alpha.dtype
|
@property
def alpha(self):
'Shape parameter.'
return self._alpha
| -6,876,081,743,250,618,000
|
Shape parameter.
|
tensorflow/contrib/distributions/python/ops/gamma.py
|
alpha
|
enrewen1/tf
|
python
|
@property
def alpha(self):
return self._alpha
|
@property
def beta(self):
'Inverse scale parameter.'
return self._beta
| -8,770,863,598,163,808,000
|
Inverse scale parameter.
|
tensorflow/contrib/distributions/python/ops/gamma.py
|
beta
|
enrewen1/tf
|
python
|
@property
def beta(self):
return self._beta
|
def batch_shape(self, name='batch_shape'):
'Batch dimensions of this instance as a 1-D int32 `Tensor`.\n\n The product of the dimensions of the `batch_shape` is the number of\n independent distributions of this kind the instance represents.\n\n Args:\n name: name to give to the op\n\n Returns:\n `Tensor` `batch_shape`\n '
with ops.name_scope(self.name):
with ops.name_scope(name, values=[self._broadcast_tensor]):
return array_ops.shape(self._broadcast_tensor)
| -794,722,025,407,041,500
|
Batch dimensions of this instance as a 1-D int32 `Tensor`.
The product of the dimensions of the `batch_shape` is the number of
independent distributions of this kind the instance represents.
Args:
name: name to give to the op
Returns:
`Tensor` `batch_shape`
|
tensorflow/contrib/distributions/python/ops/gamma.py
|
batch_shape
|
enrewen1/tf
|
python
|
def batch_shape(self, name='batch_shape'):
'Batch dimensions of this instance as a 1-D int32 `Tensor`.\n\n The product of the dimensions of the `batch_shape` is the number of\n independent distributions of this kind the instance represents.\n\n Args:\n name: name to give to the op\n\n Returns:\n `Tensor` `batch_shape`\n '
with ops.name_scope(self.name):
with ops.name_scope(name, values=[self._broadcast_tensor]):
return array_ops.shape(self._broadcast_tensor)
|
def get_batch_shape(self):
'`TensorShape` available at graph construction time.\n\n Same meaning as `batch_shape`. May be only partially defined.\n\n Returns:\n `TensorShape` object.\n '
return self._get_batch_shape
| -6,757,097,947,968,199,000
|
`TensorShape` available at graph construction time.
Same meaning as `batch_shape`. May be only partially defined.
Returns:
`TensorShape` object.
|
tensorflow/contrib/distributions/python/ops/gamma.py
|
get_batch_shape
|
enrewen1/tf
|
python
|
def get_batch_shape(self):
'`TensorShape` available at graph construction time.\n\n Same meaning as `batch_shape`. May be only partially defined.\n\n Returns:\n `TensorShape` object.\n '
return self._get_batch_shape
|
def event_shape(self, name='event_shape'):
'Shape of a sample from a single distribution as a 1-D int32 `Tensor`.\n\n Args:\n name: name to give to the op\n\n Returns:\n `Tensor` `event_shape`\n '
with ops.name_scope(self.name):
with ops.name_scope(name):
return constant_op.constant([], dtype=dtypes.int32)
| 8,889,442,052,272,346,000
|
Shape of a sample from a single distribution as a 1-D int32 `Tensor`.
Args:
name: name to give to the op
Returns:
`Tensor` `event_shape`
|
tensorflow/contrib/distributions/python/ops/gamma.py
|
event_shape
|
enrewen1/tf
|
python
|
def event_shape(self, name='event_shape'):
'Shape of a sample from a single distribution as a 1-D int32 `Tensor`.\n\n Args:\n name: name to give to the op\n\n Returns:\n `Tensor` `event_shape`\n '
with ops.name_scope(self.name):
with ops.name_scope(name):
return constant_op.constant([], dtype=dtypes.int32)
|
def get_event_shape(self):
'`TensorShape` available at graph construction time.\n\n Same meaning as `event_shape`. May be only partially defined.\n\n Returns:\n `TensorShape` object.\n '
return self._get_event_shape
| -1,408,605,194,796,173,800
|
`TensorShape` available at graph construction time.
Same meaning as `event_shape`. May be only partially defined.
Returns:
`TensorShape` object.
|
tensorflow/contrib/distributions/python/ops/gamma.py
|
get_event_shape
|
enrewen1/tf
|
python
|
def get_event_shape(self):
'`TensorShape` available at graph construction time.\n\n Same meaning as `event_shape`. May be only partially defined.\n\n Returns:\n `TensorShape` object.\n '
return self._get_event_shape
|
def mean(self, name='mean'):
'Mean of each batch member.'
with ops.name_scope(self.name):
with ops.name_scope(name, values=[self._alpha, self._beta]):
return (self._alpha / self._beta)
| 2,590,676,959,716,852,000
|
Mean of each batch member.
|
tensorflow/contrib/distributions/python/ops/gamma.py
|
mean
|
enrewen1/tf
|
python
|
def mean(self, name='mean'):
with ops.name_scope(self.name):
with ops.name_scope(name, values=[self._alpha, self._beta]):
return (self._alpha / self._beta)
|
def mode(self, name='mode'):
'Mode of each batch member.\n\n The mode of a gamma distribution is `(alpha - 1) / beta` when `alpha > 1`,\n and `NaN` otherwise. If `self.allow_nan_stats` is `False`, an exception\n will be raised rather than returning `NaN`.\n\n Args:\n name: A name to give this op.\n\n Returns:\n The mode for every batch member, a `Tensor` with same `dtype` as self.\n '
alpha = self._alpha
beta = self._beta
with ops.name_scope(self.name):
with ops.name_scope(name, values=[alpha, beta]):
mode_if_defined = ((alpha - 1.0) / beta)
if self.allow_nan_stats:
alpha_ge_1 = (alpha >= 1.0)
nan = (np.nan * self._ones())
return math_ops.select(alpha_ge_1, mode_if_defined, nan)
else:
one = constant_op.constant(1.0, dtype=self.dtype)
return control_flow_ops.with_dependencies([check_ops.assert_less(one, alpha, message='mode not defined for components of alpha <= 1')], mode_if_defined)
| -3,134,000,186,075,014,700
|
Mode of each batch member.
The mode of a gamma distribution is `(alpha - 1) / beta` when `alpha > 1`,
and `NaN` otherwise. If `self.allow_nan_stats` is `False`, an exception
will be raised rather than returning `NaN`.
Args:
name: A name to give this op.
Returns:
The mode for every batch member, a `Tensor` with same `dtype` as self.
|
tensorflow/contrib/distributions/python/ops/gamma.py
|
mode
|
enrewen1/tf
|
python
|
def mode(self, name='mode'):
'Mode of each batch member.\n\n The mode of a gamma distribution is `(alpha - 1) / beta` when `alpha > 1`,\n and `NaN` otherwise. If `self.allow_nan_stats` is `False`, an exception\n will be raised rather than returning `NaN`.\n\n Args:\n name: A name to give this op.\n\n Returns:\n The mode for every batch member, a `Tensor` with same `dtype` as self.\n '
alpha = self._alpha
beta = self._beta
with ops.name_scope(self.name):
with ops.name_scope(name, values=[alpha, beta]):
mode_if_defined = ((alpha - 1.0) / beta)
if self.allow_nan_stats:
alpha_ge_1 = (alpha >= 1.0)
nan = (np.nan * self._ones())
return math_ops.select(alpha_ge_1, mode_if_defined, nan)
else:
one = constant_op.constant(1.0, dtype=self.dtype)
return control_flow_ops.with_dependencies([check_ops.assert_less(one, alpha, message='mode not defined for components of alpha <= 1')], mode_if_defined)
|
def variance(self, name='variance'):
'Variance of each batch member.'
with ops.name_scope(self.name):
with ops.name_scope(name, values=[self._alpha, self._beta]):
return (self._alpha / math_ops.square(self._beta))
| 3,112,165,319,384,282,600
|
Variance of each batch member.
|
tensorflow/contrib/distributions/python/ops/gamma.py
|
variance
|
enrewen1/tf
|
python
|
def variance(self, name='variance'):
with ops.name_scope(self.name):
with ops.name_scope(name, values=[self._alpha, self._beta]):
return (self._alpha / math_ops.square(self._beta))
|
def std(self, name='std'):
'Standard deviation of this distribution.'
with ops.name_scope(self.name):
with ops.name_scope(name, values=[self._alpha, self._beta]):
return (math_ops.sqrt(self._alpha) / self._beta)
| -6,884,819,005,825,626,000
|
Standard deviation of this distribution.
|
tensorflow/contrib/distributions/python/ops/gamma.py
|
std
|
enrewen1/tf
|
python
|
def std(self, name='std'):
with ops.name_scope(self.name):
with ops.name_scope(name, values=[self._alpha, self._beta]):
return (math_ops.sqrt(self._alpha) / self._beta)
|
def log_prob(self, x, name='log_prob'):
'Log prob of observations in `x` under these Gamma distribution(s).\n\n Args:\n x: tensor of dtype `dtype`, must be broadcastable with `alpha` and `beta`.\n name: The name to give this op.\n\n Returns:\n log_prob: tensor of dtype `dtype`, the log-PDFs of `x`.\n\n Raises:\n TypeError: if `x` and `alpha` are different dtypes.\n '
with ops.name_scope(self.name):
with ops.name_scope(name, values=[self._alpha, self._beta, x]):
alpha = self._alpha
beta = self._beta
x = ops.convert_to_tensor(x)
x = control_flow_ops.with_dependencies(([check_ops.assert_positive(x)] if self.validate_args else []), x)
contrib_tensor_util.assert_same_float_dtype(tensors=[x], dtype=self.dtype)
return ((((alpha * math_ops.log(beta)) + ((alpha - 1) * math_ops.log(x))) - (beta * x)) - math_ops.lgamma(self._alpha))
| -2,444,142,182,391,518,700
|
Log prob of observations in `x` under these Gamma distribution(s).
Args:
x: tensor of dtype `dtype`, must be broadcastable with `alpha` and `beta`.
name: The name to give this op.
Returns:
log_prob: tensor of dtype `dtype`, the log-PDFs of `x`.
Raises:
TypeError: if `x` and `alpha` are different dtypes.
|
tensorflow/contrib/distributions/python/ops/gamma.py
|
log_prob
|
enrewen1/tf
|
python
|
def log_prob(self, x, name='log_prob'):
'Log prob of observations in `x` under these Gamma distribution(s).\n\n Args:\n x: tensor of dtype `dtype`, must be broadcastable with `alpha` and `beta`.\n name: The name to give this op.\n\n Returns:\n log_prob: tensor of dtype `dtype`, the log-PDFs of `x`.\n\n Raises:\n TypeError: if `x` and `alpha` are different dtypes.\n '
with ops.name_scope(self.name):
with ops.name_scope(name, values=[self._alpha, self._beta, x]):
alpha = self._alpha
beta = self._beta
x = ops.convert_to_tensor(x)
x = control_flow_ops.with_dependencies(([check_ops.assert_positive(x)] if self.validate_args else []), x)
contrib_tensor_util.assert_same_float_dtype(tensors=[x], dtype=self.dtype)
return ((((alpha * math_ops.log(beta)) + ((alpha - 1) * math_ops.log(x))) - (beta * x)) - math_ops.lgamma(self._alpha))
|
def prob(self, x, name='prob'):
'Pdf of observations in `x` under these Gamma distribution(s).\n\n Args:\n x: tensor of dtype `dtype`, must be broadcastable with `alpha` and `beta`.\n name: The name to give this op.\n\n Returns:\n prob: tensor of dtype `dtype`, the PDFs of `x`\n\n Raises:\n TypeError: if `x` and `alpha` are different dtypes.\n '
return super(Gamma, self).prob(x, name)
| 3,286,626,708,954,102,000
|
Pdf of observations in `x` under these Gamma distribution(s).
Args:
x: tensor of dtype `dtype`, must be broadcastable with `alpha` and `beta`.
name: The name to give this op.
Returns:
prob: tensor of dtype `dtype`, the PDFs of `x`
Raises:
TypeError: if `x` and `alpha` are different dtypes.
|
tensorflow/contrib/distributions/python/ops/gamma.py
|
prob
|
enrewen1/tf
|
python
|
def prob(self, x, name='prob'):
'Pdf of observations in `x` under these Gamma distribution(s).\n\n Args:\n x: tensor of dtype `dtype`, must be broadcastable with `alpha` and `beta`.\n name: The name to give this op.\n\n Returns:\n prob: tensor of dtype `dtype`, the PDFs of `x`\n\n Raises:\n TypeError: if `x` and `alpha` are different dtypes.\n '
return super(Gamma, self).prob(x, name)
|
def log_cdf(self, x, name='log_cdf'):
'Log CDF of observations `x` under these Gamma distribution(s).\n\n Args:\n x: tensor of dtype `dtype`, must be broadcastable with `alpha` and `beta`.\n name: The name to give this op.\n\n Returns:\n log_cdf: tensor of dtype `dtype`, the log-CDFs of `x`.\n '
with ops.name_scope(self.name):
with ops.name_scope(name, values=[self._alpha, self._beta, x]):
x = ops.convert_to_tensor(x)
x = control_flow_ops.with_dependencies(([check_ops.assert_positive(x)] if self.validate_args else []), x)
contrib_tensor_util.assert_same_float_dtype(tensors=[x], dtype=self.dtype)
return math_ops.log(math_ops.igamma(self._alpha, (self._beta * x)))
| -2,546,260,761,848,574,000
|
Log CDF of observations `x` under these Gamma distribution(s).
Args:
x: tensor of dtype `dtype`, must be broadcastable with `alpha` and `beta`.
name: The name to give this op.
Returns:
log_cdf: tensor of dtype `dtype`, the log-CDFs of `x`.
|
tensorflow/contrib/distributions/python/ops/gamma.py
|
log_cdf
|
enrewen1/tf
|
python
|
def log_cdf(self, x, name='log_cdf'):
'Log CDF of observations `x` under these Gamma distribution(s).\n\n Args:\n x: tensor of dtype `dtype`, must be broadcastable with `alpha` and `beta`.\n name: The name to give this op.\n\n Returns:\n log_cdf: tensor of dtype `dtype`, the log-CDFs of `x`.\n '
with ops.name_scope(self.name):
with ops.name_scope(name, values=[self._alpha, self._beta, x]):
x = ops.convert_to_tensor(x)
x = control_flow_ops.with_dependencies(([check_ops.assert_positive(x)] if self.validate_args else []), x)
contrib_tensor_util.assert_same_float_dtype(tensors=[x], dtype=self.dtype)
return math_ops.log(math_ops.igamma(self._alpha, (self._beta * x)))
|
def cdf(self, x, name='cdf'):
'CDF of observations `x` under these Gamma distribution(s).\n\n Args:\n x: tensor of dtype `dtype`, must be broadcastable with `alpha` and `beta`.\n name: The name to give this op.\n\n Returns:\n cdf: tensor of dtype `dtype`, the CDFs of `x`.\n '
with ops.name_scope(self.name):
with ops.name_scope(name, values=[self._alpha, self._beta, x]):
return math_ops.igamma(self._alpha, (self._beta * x))
| -6,876,127,372,610,292,000
|
CDF of observations `x` under these Gamma distribution(s).
Args:
x: tensor of dtype `dtype`, must be broadcastable with `alpha` and `beta`.
name: The name to give this op.
Returns:
cdf: tensor of dtype `dtype`, the CDFs of `x`.
|
tensorflow/contrib/distributions/python/ops/gamma.py
|
cdf
|
enrewen1/tf
|
python
|
def cdf(self, x, name='cdf'):
'CDF of observations `x` under these Gamma distribution(s).\n\n Args:\n x: tensor of dtype `dtype`, must be broadcastable with `alpha` and `beta`.\n name: The name to give this op.\n\n Returns:\n cdf: tensor of dtype `dtype`, the CDFs of `x`.\n '
with ops.name_scope(self.name):
with ops.name_scope(name, values=[self._alpha, self._beta, x]):
return math_ops.igamma(self._alpha, (self._beta * x))
|
def entropy(self, name='entropy'):
'The entropy of Gamma distribution(s).\n\n This is defined to be\n\n ```\n entropy = alpha - log(beta) + log(Gamma(alpha))\n + (1-alpha)digamma(alpha)\n ```\n\n where digamma(alpha) is the digamma function.\n\n Args:\n name: The name to give this op.\n\n Returns:\n entropy: tensor of dtype `dtype`, the entropy.\n '
with ops.name_scope(self.name):
with ops.name_scope(name, values=[self.alpha, self._beta]):
alpha = self._alpha
beta = self._beta
return (((alpha - math_ops.log(beta)) + math_ops.lgamma(alpha)) + ((1 - alpha) * math_ops.digamma(alpha)))
| 9,167,662,546,117,315,000
|
The entropy of Gamma distribution(s).
This is defined to be
```
entropy = alpha - log(beta) + log(Gamma(alpha))
+ (1-alpha)digamma(alpha)
```
where digamma(alpha) is the digamma function.
Args:
name: The name to give this op.
Returns:
entropy: tensor of dtype `dtype`, the entropy.
|
tensorflow/contrib/distributions/python/ops/gamma.py
|
entropy
|
enrewen1/tf
|
python
|
def entropy(self, name='entropy'):
'The entropy of Gamma distribution(s).\n\n This is defined to be\n\n ```\n entropy = alpha - log(beta) + log(Gamma(alpha))\n + (1-alpha)digamma(alpha)\n ```\n\n where digamma(alpha) is the digamma function.\n\n Args:\n name: The name to give this op.\n\n Returns:\n entropy: tensor of dtype `dtype`, the entropy.\n '
with ops.name_scope(self.name):
with ops.name_scope(name, values=[self.alpha, self._beta]):
alpha = self._alpha
beta = self._beta
return (((alpha - math_ops.log(beta)) + math_ops.lgamma(alpha)) + ((1 - alpha) * math_ops.digamma(alpha)))
|
def sample_n(self, n, seed=None, name='sample_n'):
'Draws `n` samples from the Gamma distribution(s).\n\n See the doc for tf.random_gamma for further detail.\n\n Args:\n n: Python integer, the number of observations to sample from each\n distribution.\n seed: Python integer, the random seed for this operation.\n name: Optional name for the operation.\n\n Returns:\n samples: a `Tensor` of shape `(n,) + self.batch_shape + self.event_shape`\n with values of type `self.dtype`.\n '
with ops.name_scope(self.name, values=[n, self.alpha, self._beta]):
return random_ops.random_gamma([n], self.alpha, beta=self._beta, dtype=self.dtype, seed=seed, name=name)
| 6,028,801,741,464,078,000
|
Draws `n` samples from the Gamma distribution(s).
See the doc for tf.random_gamma for further detail.
Args:
n: Python integer, the number of observations to sample from each
distribution.
seed: Python integer, the random seed for this operation.
name: Optional name for the operation.
Returns:
samples: a `Tensor` of shape `(n,) + self.batch_shape + self.event_shape`
with values of type `self.dtype`.
|
tensorflow/contrib/distributions/python/ops/gamma.py
|
sample_n
|
enrewen1/tf
|
python
|
def sample_n(self, n, seed=None, name='sample_n'):
'Draws `n` samples from the Gamma distribution(s).\n\n See the doc for tf.random_gamma for further detail.\n\n Args:\n n: Python integer, the number of observations to sample from each\n distribution.\n seed: Python integer, the random seed for this operation.\n name: Optional name for the operation.\n\n Returns:\n samples: a `Tensor` of shape `(n,) + self.batch_shape + self.event_shape`\n with values of type `self.dtype`.\n '
with ops.name_scope(self.name, values=[n, self.alpha, self._beta]):
return random_ops.random_gamma([n], self.alpha, beta=self._beta, dtype=self.dtype, seed=seed, name=name)
|
@abstractmethod
def has_valid_padding(self, ciphertext: bytes) -> bool:
'\n Override this method and send off the ciphertext to check for valid padding.\n\n :param bytes ciphertext: The ciphertext to check, send this to your padding oracle.\n :rtype: True for valid padding, False otherwise.\n '
raise PaddownException('Not implemented')
| -3,526,876,157,925,465,000
|
Override this method and send off the ciphertext to check for valid padding.
:param bytes ciphertext: The ciphertext to check, send this to your padding oracle.
:rtype: True for valid padding, False otherwise.
|
paddown.py
|
has_valid_padding
|
MarvinKweyu/PadDown
|
python
|
@abstractmethod
def has_valid_padding(self, ciphertext: bytes) -> bool:
'\n Override this method and send off the ciphertext to check for valid padding.\n\n :param bytes ciphertext: The ciphertext to check, send this to your padding oracle.\n :rtype: True for valid padding, False otherwise.\n '
raise PaddownException('Not implemented')
|
def boosted_trees_calculate_best_gains_per_feature(node_id_range, stats_summary_list, l1, l2, tree_complexity, min_node_weight, max_splits, name=None):
'Calculates gains for each feature and returns the best possible split information for the feature.\n\n The split information is the best threshold (bucket id), gains and left/right node contributions per node for each feature.\n\n It is possible that not all nodes can be split on each feature. Hence, the list of possible nodes can differ between the features. Therefore, we return `node_ids_list` for each feature, containing the list of nodes that this feature can be used to split.\n\n In this manner, the output is the best split per features and per node, so that it needs to be combined later to produce the best split for each node (among all possible features).\n\n The length of output lists are all of the same length, `num_features`.\n The output shapes are compatible in a way that the first dimension of all tensors of all lists are the same and equal to the number of possible split nodes for each feature.\n\n Args:\n node_id_range: A `Tensor` of type `int32`.\n A Rank 1 tensor (shape=[2]) to specify the range [first, last) of node ids to process within `stats_summary_list`. The nodes are iterated between the two nodes specified by the tensor, as like `for node_id in range(node_id_range[0], node_id_range[1])` (Note that the last index node_id_range[1] is exclusive).\n stats_summary_list: A list of at least 1 `Tensor` objects with type `float32`.\n A list of Rank 3 tensor (#shape=[max_splits, bucket, 2]) for accumulated stats summary (gradient/hessian) per node per buckets for each feature. The first dimension of the tensor is the maximum number of splits, and thus not all elements of it will be used, but only the indexes specified by node_ids will be used.\n l1: A `Tensor` of type `float32`.\n l1 regularization factor on leaf weights, per instance based.\n l2: A `Tensor` of type `float32`.\n l2 regularization factor on leaf weights, per instance based.\n tree_complexity: A `Tensor` of type `float32`.\n adjustment to the gain, per leaf based.\n min_node_weight: A `Tensor` of type `float32`.\n mininum avg of hessians in a node before required for the node to be considered for splitting.\n max_splits: An `int` that is `>= 1`.\n the number of nodes that can be split in the whole tree. Used as a dimension of output tensors.\n name: A name for the operation (optional).\n\n Returns:\n A tuple of `Tensor` objects (node_ids_list, gains_list, thresholds_list, left_node_contribs_list, right_node_contribs_list).\n\n node_ids_list: A list with the same length as `stats_summary_list` of `Tensor` objects with type `int32`.\n gains_list: A list with the same length as `stats_summary_list` of `Tensor` objects with type `float32`.\n thresholds_list: A list with the same length as `stats_summary_list` of `Tensor` objects with type `int32`.\n left_node_contribs_list: A list with the same length as `stats_summary_list` of `Tensor` objects with type `float32`.\n right_node_contribs_list: A list with the same length as `stats_summary_list` of `Tensor` objects with type `float32`.\n '
_ctx = _context._context
if ((_ctx is None) or (not _ctx._eager_context.is_eager)):
if (not isinstance(stats_summary_list, (list, tuple))):
raise TypeError(("Expected list for 'stats_summary_list' argument to 'boosted_trees_calculate_best_gains_per_feature' Op, not %r." % stats_summary_list))
_attr_num_features = len(stats_summary_list)
max_splits = _execute.make_int(max_splits, 'max_splits')
(_, _, _op) = _op_def_lib._apply_op_helper('BoostedTreesCalculateBestGainsPerFeature', node_id_range=node_id_range, stats_summary_list=stats_summary_list, l1=l1, l2=l2, tree_complexity=tree_complexity, min_node_weight=min_node_weight, max_splits=max_splits, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ('max_splits', _op.get_attr('max_splits'), 'num_features', _op.get_attr('num_features'))
_execute.record_gradient('BoostedTreesCalculateBestGainsPerFeature', _inputs_flat, _attrs, _result, name)
_result = ([_result[:_attr_num_features]] + _result[_attr_num_features:])
_result = ((_result[:1] + [_result[1:(1 + _attr_num_features)]]) + _result[(1 + _attr_num_features):])
_result = ((_result[:2] + [_result[2:(2 + _attr_num_features)]]) + _result[(2 + _attr_num_features):])
_result = ((_result[:3] + [_result[3:(3 + _attr_num_features)]]) + _result[(3 + _attr_num_features):])
_result = (_result[:4] + [_result[4:]])
_result = _BoostedTreesCalculateBestGainsPerFeatureOutput._make(_result)
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(_ctx._context_handle, _ctx._eager_context.device_name, 'BoostedTreesCalculateBestGainsPerFeature', name, _ctx._post_execution_callbacks, node_id_range, stats_summary_list, l1, l2, tree_complexity, min_node_weight, 'max_splits', max_splits)
_result = _BoostedTreesCalculateBestGainsPerFeatureOutput._make(_result)
return _result
except _core._FallbackException:
return boosted_trees_calculate_best_gains_per_feature_eager_fallback(node_id_range, stats_summary_list, l1, l2, tree_complexity, min_node_weight, max_splits=max_splits, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if (name is not None):
message = ((e.message + ' name: ') + name)
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
| -1,721,589,663,908,158,700
|
Calculates gains for each feature and returns the best possible split information for the feature.
The split information is the best threshold (bucket id), gains and left/right node contributions per node for each feature.
It is possible that not all nodes can be split on each feature. Hence, the list of possible nodes can differ between the features. Therefore, we return `node_ids_list` for each feature, containing the list of nodes that this feature can be used to split.
In this manner, the output is the best split per features and per node, so that it needs to be combined later to produce the best split for each node (among all possible features).
The length of output lists are all of the same length, `num_features`.
The output shapes are compatible in a way that the first dimension of all tensors of all lists are the same and equal to the number of possible split nodes for each feature.
Args:
node_id_range: A `Tensor` of type `int32`.
A Rank 1 tensor (shape=[2]) to specify the range [first, last) of node ids to process within `stats_summary_list`. The nodes are iterated between the two nodes specified by the tensor, as like `for node_id in range(node_id_range[0], node_id_range[1])` (Note that the last index node_id_range[1] is exclusive).
stats_summary_list: A list of at least 1 `Tensor` objects with type `float32`.
A list of Rank 3 tensor (#shape=[max_splits, bucket, 2]) for accumulated stats summary (gradient/hessian) per node per buckets for each feature. The first dimension of the tensor is the maximum number of splits, and thus not all elements of it will be used, but only the indexes specified by node_ids will be used.
l1: A `Tensor` of type `float32`.
l1 regularization factor on leaf weights, per instance based.
l2: A `Tensor` of type `float32`.
l2 regularization factor on leaf weights, per instance based.
tree_complexity: A `Tensor` of type `float32`.
adjustment to the gain, per leaf based.
min_node_weight: A `Tensor` of type `float32`.
mininum avg of hessians in a node before required for the node to be considered for splitting.
max_splits: An `int` that is `>= 1`.
the number of nodes that can be split in the whole tree. Used as a dimension of output tensors.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (node_ids_list, gains_list, thresholds_list, left_node_contribs_list, right_node_contribs_list).
node_ids_list: A list with the same length as `stats_summary_list` of `Tensor` objects with type `int32`.
gains_list: A list with the same length as `stats_summary_list` of `Tensor` objects with type `float32`.
thresholds_list: A list with the same length as `stats_summary_list` of `Tensor` objects with type `int32`.
left_node_contribs_list: A list with the same length as `stats_summary_list` of `Tensor` objects with type `float32`.
right_node_contribs_list: A list with the same length as `stats_summary_list` of `Tensor` objects with type `float32`.
|
Keras_tensorflow_nightly/source2.7/tensorflow/python/ops/gen_boosted_trees_ops.py
|
boosted_trees_calculate_best_gains_per_feature
|
Con-Mi/lambda-packs
|
python
|
def boosted_trees_calculate_best_gains_per_feature(node_id_range, stats_summary_list, l1, l2, tree_complexity, min_node_weight, max_splits, name=None):
'Calculates gains for each feature and returns the best possible split information for the feature.\n\n The split information is the best threshold (bucket id), gains and left/right node contributions per node for each feature.\n\n It is possible that not all nodes can be split on each feature. Hence, the list of possible nodes can differ between the features. Therefore, we return `node_ids_list` for each feature, containing the list of nodes that this feature can be used to split.\n\n In this manner, the output is the best split per features and per node, so that it needs to be combined later to produce the best split for each node (among all possible features).\n\n The length of output lists are all of the same length, `num_features`.\n The output shapes are compatible in a way that the first dimension of all tensors of all lists are the same and equal to the number of possible split nodes for each feature.\n\n Args:\n node_id_range: A `Tensor` of type `int32`.\n A Rank 1 tensor (shape=[2]) to specify the range [first, last) of node ids to process within `stats_summary_list`. The nodes are iterated between the two nodes specified by the tensor, as like `for node_id in range(node_id_range[0], node_id_range[1])` (Note that the last index node_id_range[1] is exclusive).\n stats_summary_list: A list of at least 1 `Tensor` objects with type `float32`.\n A list of Rank 3 tensor (#shape=[max_splits, bucket, 2]) for accumulated stats summary (gradient/hessian) per node per buckets for each feature. The first dimension of the tensor is the maximum number of splits, and thus not all elements of it will be used, but only the indexes specified by node_ids will be used.\n l1: A `Tensor` of type `float32`.\n l1 regularization factor on leaf weights, per instance based.\n l2: A `Tensor` of type `float32`.\n l2 regularization factor on leaf weights, per instance based.\n tree_complexity: A `Tensor` of type `float32`.\n adjustment to the gain, per leaf based.\n min_node_weight: A `Tensor` of type `float32`.\n mininum avg of hessians in a node before required for the node to be considered for splitting.\n max_splits: An `int` that is `>= 1`.\n the number of nodes that can be split in the whole tree. Used as a dimension of output tensors.\n name: A name for the operation (optional).\n\n Returns:\n A tuple of `Tensor` objects (node_ids_list, gains_list, thresholds_list, left_node_contribs_list, right_node_contribs_list).\n\n node_ids_list: A list with the same length as `stats_summary_list` of `Tensor` objects with type `int32`.\n gains_list: A list with the same length as `stats_summary_list` of `Tensor` objects with type `float32`.\n thresholds_list: A list with the same length as `stats_summary_list` of `Tensor` objects with type `int32`.\n left_node_contribs_list: A list with the same length as `stats_summary_list` of `Tensor` objects with type `float32`.\n right_node_contribs_list: A list with the same length as `stats_summary_list` of `Tensor` objects with type `float32`.\n '
_ctx = _context._context
if ((_ctx is None) or (not _ctx._eager_context.is_eager)):
if (not isinstance(stats_summary_list, (list, tuple))):
raise TypeError(("Expected list for 'stats_summary_list' argument to 'boosted_trees_calculate_best_gains_per_feature' Op, not %r." % stats_summary_list))
_attr_num_features = len(stats_summary_list)
max_splits = _execute.make_int(max_splits, 'max_splits')
(_, _, _op) = _op_def_lib._apply_op_helper('BoostedTreesCalculateBestGainsPerFeature', node_id_range=node_id_range, stats_summary_list=stats_summary_list, l1=l1, l2=l2, tree_complexity=tree_complexity, min_node_weight=min_node_weight, max_splits=max_splits, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ('max_splits', _op.get_attr('max_splits'), 'num_features', _op.get_attr('num_features'))
_execute.record_gradient('BoostedTreesCalculateBestGainsPerFeature', _inputs_flat, _attrs, _result, name)
_result = ([_result[:_attr_num_features]] + _result[_attr_num_features:])
_result = ((_result[:1] + [_result[1:(1 + _attr_num_features)]]) + _result[(1 + _attr_num_features):])
_result = ((_result[:2] + [_result[2:(2 + _attr_num_features)]]) + _result[(2 + _attr_num_features):])
_result = ((_result[:3] + [_result[3:(3 + _attr_num_features)]]) + _result[(3 + _attr_num_features):])
_result = (_result[:4] + [_result[4:]])
_result = _BoostedTreesCalculateBestGainsPerFeatureOutput._make(_result)
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(_ctx._context_handle, _ctx._eager_context.device_name, 'BoostedTreesCalculateBestGainsPerFeature', name, _ctx._post_execution_callbacks, node_id_range, stats_summary_list, l1, l2, tree_complexity, min_node_weight, 'max_splits', max_splits)
_result = _BoostedTreesCalculateBestGainsPerFeatureOutput._make(_result)
return _result
except _core._FallbackException:
return boosted_trees_calculate_best_gains_per_feature_eager_fallback(node_id_range, stats_summary_list, l1, l2, tree_complexity, min_node_weight, max_splits=max_splits, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if (name is not None):
message = ((e.message + ' name: ') + name)
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
|
def boosted_trees_calculate_best_gains_per_feature_eager_fallback(node_id_range, stats_summary_list, l1, l2, tree_complexity, min_node_weight, max_splits, name=None, ctx=None):
'This is the slowpath function for Eager mode.\n This is for function boosted_trees_calculate_best_gains_per_feature\n '
_ctx = (ctx if ctx else _context.context())
if (not isinstance(stats_summary_list, (list, tuple))):
raise TypeError(("Expected list for 'stats_summary_list' argument to 'boosted_trees_calculate_best_gains_per_feature' Op, not %r." % stats_summary_list))
_attr_num_features = len(stats_summary_list)
max_splits = _execute.make_int(max_splits, 'max_splits')
node_id_range = _ops.convert_to_tensor(node_id_range, _dtypes.int32)
stats_summary_list = _ops.convert_n_to_tensor(stats_summary_list, _dtypes.float32)
l1 = _ops.convert_to_tensor(l1, _dtypes.float32)
l2 = _ops.convert_to_tensor(l2, _dtypes.float32)
tree_complexity = _ops.convert_to_tensor(tree_complexity, _dtypes.float32)
min_node_weight = _ops.convert_to_tensor(min_node_weight, _dtypes.float32)
_inputs_flat = (([node_id_range] + list(stats_summary_list)) + [l1, l2, tree_complexity, min_node_weight])
_attrs = ('max_splits', max_splits, 'num_features', _attr_num_features)
_result = _execute.execute(b'BoostedTreesCalculateBestGainsPerFeature', ((((_attr_num_features + _attr_num_features) + _attr_num_features) + _attr_num_features) + _attr_num_features), inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient('BoostedTreesCalculateBestGainsPerFeature', _inputs_flat, _attrs, _result, name)
_result = ([_result[:_attr_num_features]] + _result[_attr_num_features:])
_result = ((_result[:1] + [_result[1:(1 + _attr_num_features)]]) + _result[(1 + _attr_num_features):])
_result = ((_result[:2] + [_result[2:(2 + _attr_num_features)]]) + _result[(2 + _attr_num_features):])
_result = ((_result[:3] + [_result[3:(3 + _attr_num_features)]]) + _result[(3 + _attr_num_features):])
_result = (_result[:4] + [_result[4:]])
_result = _BoostedTreesCalculateBestGainsPerFeatureOutput._make(_result)
return _result
| -7,513,891,186,051,138,000
|
This is the slowpath function for Eager mode.
This is for function boosted_trees_calculate_best_gains_per_feature
|
Keras_tensorflow_nightly/source2.7/tensorflow/python/ops/gen_boosted_trees_ops.py
|
boosted_trees_calculate_best_gains_per_feature_eager_fallback
|
Con-Mi/lambda-packs
|
python
|
def boosted_trees_calculate_best_gains_per_feature_eager_fallback(node_id_range, stats_summary_list, l1, l2, tree_complexity, min_node_weight, max_splits, name=None, ctx=None):
'This is the slowpath function for Eager mode.\n This is for function boosted_trees_calculate_best_gains_per_feature\n '
_ctx = (ctx if ctx else _context.context())
if (not isinstance(stats_summary_list, (list, tuple))):
raise TypeError(("Expected list for 'stats_summary_list' argument to 'boosted_trees_calculate_best_gains_per_feature' Op, not %r." % stats_summary_list))
_attr_num_features = len(stats_summary_list)
max_splits = _execute.make_int(max_splits, 'max_splits')
node_id_range = _ops.convert_to_tensor(node_id_range, _dtypes.int32)
stats_summary_list = _ops.convert_n_to_tensor(stats_summary_list, _dtypes.float32)
l1 = _ops.convert_to_tensor(l1, _dtypes.float32)
l2 = _ops.convert_to_tensor(l2, _dtypes.float32)
tree_complexity = _ops.convert_to_tensor(tree_complexity, _dtypes.float32)
min_node_weight = _ops.convert_to_tensor(min_node_weight, _dtypes.float32)
_inputs_flat = (([node_id_range] + list(stats_summary_list)) + [l1, l2, tree_complexity, min_node_weight])
_attrs = ('max_splits', max_splits, 'num_features', _attr_num_features)
_result = _execute.execute(b'BoostedTreesCalculateBestGainsPerFeature', ((((_attr_num_features + _attr_num_features) + _attr_num_features) + _attr_num_features) + _attr_num_features), inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient('BoostedTreesCalculateBestGainsPerFeature', _inputs_flat, _attrs, _result, name)
_result = ([_result[:_attr_num_features]] + _result[_attr_num_features:])
_result = ((_result[:1] + [_result[1:(1 + _attr_num_features)]]) + _result[(1 + _attr_num_features):])
_result = ((_result[:2] + [_result[2:(2 + _attr_num_features)]]) + _result[(2 + _attr_num_features):])
_result = ((_result[:3] + [_result[3:(3 + _attr_num_features)]]) + _result[(3 + _attr_num_features):])
_result = (_result[:4] + [_result[4:]])
_result = _BoostedTreesCalculateBestGainsPerFeatureOutput._make(_result)
return _result
|
def boosted_trees_create_ensemble(tree_ensemble_handle, stamp_token, tree_ensemble_serialized, name=None):
'Creates a tree ensemble model and returns a handle to it.\n\n Args:\n tree_ensemble_handle: A `Tensor` of type `resource`.\n Handle to the tree ensemble resource to be created.\n stamp_token: A `Tensor` of type `int64`.\n Token to use as the initial value of the resource stamp.\n tree_ensemble_serialized: A `Tensor` of type `string`.\n Serialized proto of the tree ensemble.\n name: A name for the operation (optional).\n\n Returns:\n The created Operation.\n '
_ctx = _context._context
if ((_ctx is None) or (not _ctx._eager_context.is_eager)):
(_, _, _op) = _op_def_lib._apply_op_helper('BoostedTreesCreateEnsemble', tree_ensemble_handle=tree_ensemble_handle, stamp_token=stamp_token, tree_ensemble_serialized=tree_ensemble_serialized, name=name)
return _op
_result = None
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(_ctx._context_handle, _ctx._eager_context.device_name, 'BoostedTreesCreateEnsemble', name, _ctx._post_execution_callbacks, tree_ensemble_handle, stamp_token, tree_ensemble_serialized)
return _result
except _core._FallbackException:
return boosted_trees_create_ensemble_eager_fallback(tree_ensemble_handle, stamp_token, tree_ensemble_serialized, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if (name is not None):
message = ((e.message + ' name: ') + name)
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
| 2,120,951,892,225,411,000
|
Creates a tree ensemble model and returns a handle to it.
Args:
tree_ensemble_handle: A `Tensor` of type `resource`.
Handle to the tree ensemble resource to be created.
stamp_token: A `Tensor` of type `int64`.
Token to use as the initial value of the resource stamp.
tree_ensemble_serialized: A `Tensor` of type `string`.
Serialized proto of the tree ensemble.
name: A name for the operation (optional).
Returns:
The created Operation.
|
Keras_tensorflow_nightly/source2.7/tensorflow/python/ops/gen_boosted_trees_ops.py
|
boosted_trees_create_ensemble
|
Con-Mi/lambda-packs
|
python
|
def boosted_trees_create_ensemble(tree_ensemble_handle, stamp_token, tree_ensemble_serialized, name=None):
'Creates a tree ensemble model and returns a handle to it.\n\n Args:\n tree_ensemble_handle: A `Tensor` of type `resource`.\n Handle to the tree ensemble resource to be created.\n stamp_token: A `Tensor` of type `int64`.\n Token to use as the initial value of the resource stamp.\n tree_ensemble_serialized: A `Tensor` of type `string`.\n Serialized proto of the tree ensemble.\n name: A name for the operation (optional).\n\n Returns:\n The created Operation.\n '
_ctx = _context._context
if ((_ctx is None) or (not _ctx._eager_context.is_eager)):
(_, _, _op) = _op_def_lib._apply_op_helper('BoostedTreesCreateEnsemble', tree_ensemble_handle=tree_ensemble_handle, stamp_token=stamp_token, tree_ensemble_serialized=tree_ensemble_serialized, name=name)
return _op
_result = None
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(_ctx._context_handle, _ctx._eager_context.device_name, 'BoostedTreesCreateEnsemble', name, _ctx._post_execution_callbacks, tree_ensemble_handle, stamp_token, tree_ensemble_serialized)
return _result
except _core._FallbackException:
return boosted_trees_create_ensemble_eager_fallback(tree_ensemble_handle, stamp_token, tree_ensemble_serialized, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if (name is not None):
message = ((e.message + ' name: ') + name)
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
|
def boosted_trees_create_ensemble_eager_fallback(tree_ensemble_handle, stamp_token, tree_ensemble_serialized, name=None, ctx=None):
'This is the slowpath function for Eager mode.\n This is for function boosted_trees_create_ensemble\n '
_ctx = (ctx if ctx else _context.context())
tree_ensemble_handle = _ops.convert_to_tensor(tree_ensemble_handle, _dtypes.resource)
stamp_token = _ops.convert_to_tensor(stamp_token, _dtypes.int64)
tree_ensemble_serialized = _ops.convert_to_tensor(tree_ensemble_serialized, _dtypes.string)
_inputs_flat = [tree_ensemble_handle, stamp_token, tree_ensemble_serialized]
_attrs = None
_result = _execute.execute(b'BoostedTreesCreateEnsemble', 0, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name)
_result = None
return _result
| -7,537,204,663,431,566,000
|
This is the slowpath function for Eager mode.
This is for function boosted_trees_create_ensemble
|
Keras_tensorflow_nightly/source2.7/tensorflow/python/ops/gen_boosted_trees_ops.py
|
boosted_trees_create_ensemble_eager_fallback
|
Con-Mi/lambda-packs
|
python
|
def boosted_trees_create_ensemble_eager_fallback(tree_ensemble_handle, stamp_token, tree_ensemble_serialized, name=None, ctx=None):
'This is the slowpath function for Eager mode.\n This is for function boosted_trees_create_ensemble\n '
_ctx = (ctx if ctx else _context.context())
tree_ensemble_handle = _ops.convert_to_tensor(tree_ensemble_handle, _dtypes.resource)
stamp_token = _ops.convert_to_tensor(stamp_token, _dtypes.int64)
tree_ensemble_serialized = _ops.convert_to_tensor(tree_ensemble_serialized, _dtypes.string)
_inputs_flat = [tree_ensemble_handle, stamp_token, tree_ensemble_serialized]
_attrs = None
_result = _execute.execute(b'BoostedTreesCreateEnsemble', 0, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name)
_result = None
return _result
|
def boosted_trees_deserialize_ensemble(tree_ensemble_handle, stamp_token, tree_ensemble_serialized, name=None):
'Deserializes a serialized tree ensemble config and replaces current tree\n\n ensemble.\n\n Args:\n tree_ensemble_handle: A `Tensor` of type `resource`.\n Handle to the tree ensemble.\n stamp_token: A `Tensor` of type `int64`.\n Token to use as the new value of the resource stamp.\n tree_ensemble_serialized: A `Tensor` of type `string`.\n Serialized proto of the ensemble.\n name: A name for the operation (optional).\n\n Returns:\n The created Operation.\n '
_ctx = _context._context
if ((_ctx is None) or (not _ctx._eager_context.is_eager)):
(_, _, _op) = _op_def_lib._apply_op_helper('BoostedTreesDeserializeEnsemble', tree_ensemble_handle=tree_ensemble_handle, stamp_token=stamp_token, tree_ensemble_serialized=tree_ensemble_serialized, name=name)
return _op
_result = None
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(_ctx._context_handle, _ctx._eager_context.device_name, 'BoostedTreesDeserializeEnsemble', name, _ctx._post_execution_callbacks, tree_ensemble_handle, stamp_token, tree_ensemble_serialized)
return _result
except _core._FallbackException:
return boosted_trees_deserialize_ensemble_eager_fallback(tree_ensemble_handle, stamp_token, tree_ensemble_serialized, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if (name is not None):
message = ((e.message + ' name: ') + name)
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
| -8,351,597,545,228,423,000
|
Deserializes a serialized tree ensemble config and replaces current tree
ensemble.
Args:
tree_ensemble_handle: A `Tensor` of type `resource`.
Handle to the tree ensemble.
stamp_token: A `Tensor` of type `int64`.
Token to use as the new value of the resource stamp.
tree_ensemble_serialized: A `Tensor` of type `string`.
Serialized proto of the ensemble.
name: A name for the operation (optional).
Returns:
The created Operation.
|
Keras_tensorflow_nightly/source2.7/tensorflow/python/ops/gen_boosted_trees_ops.py
|
boosted_trees_deserialize_ensemble
|
Con-Mi/lambda-packs
|
python
|
def boosted_trees_deserialize_ensemble(tree_ensemble_handle, stamp_token, tree_ensemble_serialized, name=None):
'Deserializes a serialized tree ensemble config and replaces current tree\n\n ensemble.\n\n Args:\n tree_ensemble_handle: A `Tensor` of type `resource`.\n Handle to the tree ensemble.\n stamp_token: A `Tensor` of type `int64`.\n Token to use as the new value of the resource stamp.\n tree_ensemble_serialized: A `Tensor` of type `string`.\n Serialized proto of the ensemble.\n name: A name for the operation (optional).\n\n Returns:\n The created Operation.\n '
_ctx = _context._context
if ((_ctx is None) or (not _ctx._eager_context.is_eager)):
(_, _, _op) = _op_def_lib._apply_op_helper('BoostedTreesDeserializeEnsemble', tree_ensemble_handle=tree_ensemble_handle, stamp_token=stamp_token, tree_ensemble_serialized=tree_ensemble_serialized, name=name)
return _op
_result = None
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(_ctx._context_handle, _ctx._eager_context.device_name, 'BoostedTreesDeserializeEnsemble', name, _ctx._post_execution_callbacks, tree_ensemble_handle, stamp_token, tree_ensemble_serialized)
return _result
except _core._FallbackException:
return boosted_trees_deserialize_ensemble_eager_fallback(tree_ensemble_handle, stamp_token, tree_ensemble_serialized, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if (name is not None):
message = ((e.message + ' name: ') + name)
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
|
def boosted_trees_deserialize_ensemble_eager_fallback(tree_ensemble_handle, stamp_token, tree_ensemble_serialized, name=None, ctx=None):
'This is the slowpath function for Eager mode.\n This is for function boosted_trees_deserialize_ensemble\n '
_ctx = (ctx if ctx else _context.context())
tree_ensemble_handle = _ops.convert_to_tensor(tree_ensemble_handle, _dtypes.resource)
stamp_token = _ops.convert_to_tensor(stamp_token, _dtypes.int64)
tree_ensemble_serialized = _ops.convert_to_tensor(tree_ensemble_serialized, _dtypes.string)
_inputs_flat = [tree_ensemble_handle, stamp_token, tree_ensemble_serialized]
_attrs = None
_result = _execute.execute(b'BoostedTreesDeserializeEnsemble', 0, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name)
_result = None
return _result
| -7,604,356,604,721,799,000
|
This is the slowpath function for Eager mode.
This is for function boosted_trees_deserialize_ensemble
|
Keras_tensorflow_nightly/source2.7/tensorflow/python/ops/gen_boosted_trees_ops.py
|
boosted_trees_deserialize_ensemble_eager_fallback
|
Con-Mi/lambda-packs
|
python
|
def boosted_trees_deserialize_ensemble_eager_fallback(tree_ensemble_handle, stamp_token, tree_ensemble_serialized, name=None, ctx=None):
'This is the slowpath function for Eager mode.\n This is for function boosted_trees_deserialize_ensemble\n '
_ctx = (ctx if ctx else _context.context())
tree_ensemble_handle = _ops.convert_to_tensor(tree_ensemble_handle, _dtypes.resource)
stamp_token = _ops.convert_to_tensor(stamp_token, _dtypes.int64)
tree_ensemble_serialized = _ops.convert_to_tensor(tree_ensemble_serialized, _dtypes.string)
_inputs_flat = [tree_ensemble_handle, stamp_token, tree_ensemble_serialized]
_attrs = None
_result = _execute.execute(b'BoostedTreesDeserializeEnsemble', 0, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name)
_result = None
return _result
|
def boosted_trees_ensemble_resource_handle_op(container='', shared_name='', name=None):
'Creates a handle to a BoostedTreesEnsembleResource\n\n Args:\n container: An optional `string`. Defaults to `""`.\n shared_name: An optional `string`. Defaults to `""`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `resource`.\n '
_ctx = _context._context
if ((_ctx is None) or (not _ctx._eager_context.is_eager)):
if (container is None):
container = ''
container = _execute.make_str(container, 'container')
if (shared_name is None):
shared_name = ''
shared_name = _execute.make_str(shared_name, 'shared_name')
(_, _, _op) = _op_def_lib._apply_op_helper('BoostedTreesEnsembleResourceHandleOp', container=container, shared_name=shared_name, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ('container', _op.get_attr('container'), 'shared_name', _op.get_attr('shared_name'))
_execute.record_gradient('BoostedTreesEnsembleResourceHandleOp', _inputs_flat, _attrs, _result, name)
(_result,) = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(_ctx._context_handle, _ctx._eager_context.device_name, 'BoostedTreesEnsembleResourceHandleOp', name, _ctx._post_execution_callbacks, 'container', container, 'shared_name', shared_name)
return _result
except _core._FallbackException:
return boosted_trees_ensemble_resource_handle_op_eager_fallback(container=container, shared_name=shared_name, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if (name is not None):
message = ((e.message + ' name: ') + name)
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
| -1,176,348,563,963,926,300
|
Creates a handle to a BoostedTreesEnsembleResource
Args:
container: An optional `string`. Defaults to `""`.
shared_name: An optional `string`. Defaults to `""`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `resource`.
|
Keras_tensorflow_nightly/source2.7/tensorflow/python/ops/gen_boosted_trees_ops.py
|
boosted_trees_ensemble_resource_handle_op
|
Con-Mi/lambda-packs
|
python
|
def boosted_trees_ensemble_resource_handle_op(container=, shared_name=, name=None):
'Creates a handle to a BoostedTreesEnsembleResource\n\n Args:\n container: An optional `string`. Defaults to ``.\n shared_name: An optional `string`. Defaults to ``.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `resource`.\n '
_ctx = _context._context
if ((_ctx is None) or (not _ctx._eager_context.is_eager)):
if (container is None):
container =
container = _execute.make_str(container, 'container')
if (shared_name is None):
shared_name =
shared_name = _execute.make_str(shared_name, 'shared_name')
(_, _, _op) = _op_def_lib._apply_op_helper('BoostedTreesEnsembleResourceHandleOp', container=container, shared_name=shared_name, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ('container', _op.get_attr('container'), 'shared_name', _op.get_attr('shared_name'))
_execute.record_gradient('BoostedTreesEnsembleResourceHandleOp', _inputs_flat, _attrs, _result, name)
(_result,) = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(_ctx._context_handle, _ctx._eager_context.device_name, 'BoostedTreesEnsembleResourceHandleOp', name, _ctx._post_execution_callbacks, 'container', container, 'shared_name', shared_name)
return _result
except _core._FallbackException:
return boosted_trees_ensemble_resource_handle_op_eager_fallback(container=container, shared_name=shared_name, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if (name is not None):
message = ((e.message + ' name: ') + name)
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
|
def boosted_trees_ensemble_resource_handle_op_eager_fallback(container='', shared_name='', name=None, ctx=None):
'This is the slowpath function for Eager mode.\n This is for function boosted_trees_ensemble_resource_handle_op\n '
_ctx = (ctx if ctx else _context.context())
if (container is None):
container = ''
container = _execute.make_str(container, 'container')
if (shared_name is None):
shared_name = ''
shared_name = _execute.make_str(shared_name, 'shared_name')
_inputs_flat = []
_attrs = ('container', container, 'shared_name', shared_name)
_result = _execute.execute(b'BoostedTreesEnsembleResourceHandleOp', 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient('BoostedTreesEnsembleResourceHandleOp', _inputs_flat, _attrs, _result, name)
(_result,) = _result
return _result
| -9,014,903,550,404,713,000
|
This is the slowpath function for Eager mode.
This is for function boosted_trees_ensemble_resource_handle_op
|
Keras_tensorflow_nightly/source2.7/tensorflow/python/ops/gen_boosted_trees_ops.py
|
boosted_trees_ensemble_resource_handle_op_eager_fallback
|
Con-Mi/lambda-packs
|
python
|
def boosted_trees_ensemble_resource_handle_op_eager_fallback(container=, shared_name=, name=None, ctx=None):
'This is the slowpath function for Eager mode.\n This is for function boosted_trees_ensemble_resource_handle_op\n '
_ctx = (ctx if ctx else _context.context())
if (container is None):
container =
container = _execute.make_str(container, 'container')
if (shared_name is None):
shared_name =
shared_name = _execute.make_str(shared_name, 'shared_name')
_inputs_flat = []
_attrs = ('container', container, 'shared_name', shared_name)
_result = _execute.execute(b'BoostedTreesEnsembleResourceHandleOp', 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient('BoostedTreesEnsembleResourceHandleOp', _inputs_flat, _attrs, _result, name)
(_result,) = _result
return _result
|
def boosted_trees_get_ensemble_states(tree_ensemble_handle, name=None):
'Retrieves the tree ensemble resource stamp token, number of trees and growing statistics.\n\n Args:\n tree_ensemble_handle: A `Tensor` of type `resource`.\n Handle to the tree ensemble.\n name: A name for the operation (optional).\n\n Returns:\n A tuple of `Tensor` objects (stamp_token, num_trees, num_finalized_trees, num_attempted_layers, last_layer_nodes_range).\n\n stamp_token: A `Tensor` of type `int64`.\n num_trees: A `Tensor` of type `int32`.\n num_finalized_trees: A `Tensor` of type `int32`.\n num_attempted_layers: A `Tensor` of type `int32`.\n last_layer_nodes_range: A `Tensor` of type `int32`.\n '
_ctx = _context._context
if ((_ctx is None) or (not _ctx._eager_context.is_eager)):
(_, _, _op) = _op_def_lib._apply_op_helper('BoostedTreesGetEnsembleStates', tree_ensemble_handle=tree_ensemble_handle, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = None
_execute.record_gradient('BoostedTreesGetEnsembleStates', _inputs_flat, _attrs, _result, name)
_result = _BoostedTreesGetEnsembleStatesOutput._make(_result)
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(_ctx._context_handle, _ctx._eager_context.device_name, 'BoostedTreesGetEnsembleStates', name, _ctx._post_execution_callbacks, tree_ensemble_handle)
_result = _BoostedTreesGetEnsembleStatesOutput._make(_result)
return _result
except _core._FallbackException:
return boosted_trees_get_ensemble_states_eager_fallback(tree_ensemble_handle, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if (name is not None):
message = ((e.message + ' name: ') + name)
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
| 7,128,226,942,901,964,000
|
Retrieves the tree ensemble resource stamp token, number of trees and growing statistics.
Args:
tree_ensemble_handle: A `Tensor` of type `resource`.
Handle to the tree ensemble.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (stamp_token, num_trees, num_finalized_trees, num_attempted_layers, last_layer_nodes_range).
stamp_token: A `Tensor` of type `int64`.
num_trees: A `Tensor` of type `int32`.
num_finalized_trees: A `Tensor` of type `int32`.
num_attempted_layers: A `Tensor` of type `int32`.
last_layer_nodes_range: A `Tensor` of type `int32`.
|
Keras_tensorflow_nightly/source2.7/tensorflow/python/ops/gen_boosted_trees_ops.py
|
boosted_trees_get_ensemble_states
|
Con-Mi/lambda-packs
|
python
|
def boosted_trees_get_ensemble_states(tree_ensemble_handle, name=None):
'Retrieves the tree ensemble resource stamp token, number of trees and growing statistics.\n\n Args:\n tree_ensemble_handle: A `Tensor` of type `resource`.\n Handle to the tree ensemble.\n name: A name for the operation (optional).\n\n Returns:\n A tuple of `Tensor` objects (stamp_token, num_trees, num_finalized_trees, num_attempted_layers, last_layer_nodes_range).\n\n stamp_token: A `Tensor` of type `int64`.\n num_trees: A `Tensor` of type `int32`.\n num_finalized_trees: A `Tensor` of type `int32`.\n num_attempted_layers: A `Tensor` of type `int32`.\n last_layer_nodes_range: A `Tensor` of type `int32`.\n '
_ctx = _context._context
if ((_ctx is None) or (not _ctx._eager_context.is_eager)):
(_, _, _op) = _op_def_lib._apply_op_helper('BoostedTreesGetEnsembleStates', tree_ensemble_handle=tree_ensemble_handle, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = None
_execute.record_gradient('BoostedTreesGetEnsembleStates', _inputs_flat, _attrs, _result, name)
_result = _BoostedTreesGetEnsembleStatesOutput._make(_result)
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(_ctx._context_handle, _ctx._eager_context.device_name, 'BoostedTreesGetEnsembleStates', name, _ctx._post_execution_callbacks, tree_ensemble_handle)
_result = _BoostedTreesGetEnsembleStatesOutput._make(_result)
return _result
except _core._FallbackException:
return boosted_trees_get_ensemble_states_eager_fallback(tree_ensemble_handle, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if (name is not None):
message = ((e.message + ' name: ') + name)
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
|
def boosted_trees_get_ensemble_states_eager_fallback(tree_ensemble_handle, name=None, ctx=None):
'This is the slowpath function for Eager mode.\n This is for function boosted_trees_get_ensemble_states\n '
_ctx = (ctx if ctx else _context.context())
tree_ensemble_handle = _ops.convert_to_tensor(tree_ensemble_handle, _dtypes.resource)
_inputs_flat = [tree_ensemble_handle]
_attrs = None
_result = _execute.execute(b'BoostedTreesGetEnsembleStates', 5, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient('BoostedTreesGetEnsembleStates', _inputs_flat, _attrs, _result, name)
_result = _BoostedTreesGetEnsembleStatesOutput._make(_result)
return _result
| 5,859,933,895,684,794,000
|
This is the slowpath function for Eager mode.
This is for function boosted_trees_get_ensemble_states
|
Keras_tensorflow_nightly/source2.7/tensorflow/python/ops/gen_boosted_trees_ops.py
|
boosted_trees_get_ensemble_states_eager_fallback
|
Con-Mi/lambda-packs
|
python
|
def boosted_trees_get_ensemble_states_eager_fallback(tree_ensemble_handle, name=None, ctx=None):
'This is the slowpath function for Eager mode.\n This is for function boosted_trees_get_ensemble_states\n '
_ctx = (ctx if ctx else _context.context())
tree_ensemble_handle = _ops.convert_to_tensor(tree_ensemble_handle, _dtypes.resource)
_inputs_flat = [tree_ensemble_handle]
_attrs = None
_result = _execute.execute(b'BoostedTreesGetEnsembleStates', 5, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient('BoostedTreesGetEnsembleStates', _inputs_flat, _attrs, _result, name)
_result = _BoostedTreesGetEnsembleStatesOutput._make(_result)
return _result
|
def boosted_trees_make_stats_summary(node_ids, gradients, hessians, bucketized_features_list, max_splits, num_buckets, name=None):
'Makes the summary of accumulated stats for the batch.\n\n The summary stats contains gradients and hessians accumulated into the corresponding node and bucket for each example.\n\n Args:\n node_ids: A `Tensor` of type `int32`.\n int32 Rank 1 Tensor containing node ids, which each example falls into for the requested layer.\n gradients: A `Tensor` of type `float32`.\n float32; Rank 2 Tensor (shape=[#examples, 1]) for gradients.\n hessians: A `Tensor` of type `float32`.\n float32; Rank 2 Tensor (shape=[#examples, 1]) for hessians.\n bucketized_features_list: A list of at least 1 `Tensor` objects with type `int32`.\n int32 list of Rank 1 Tensors, each containing the bucketized feature (for each feature column).\n max_splits: An `int` that is `>= 1`.\n int; the maximum number of splits possible in the whole tree.\n num_buckets: An `int` that is `>= 1`.\n int; equals to the maximum possible value of bucketized feature.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `float32`.\n '
_ctx = _context._context
if ((_ctx is None) or (not _ctx._eager_context.is_eager)):
if (not isinstance(bucketized_features_list, (list, tuple))):
raise TypeError(("Expected list for 'bucketized_features_list' argument to 'boosted_trees_make_stats_summary' Op, not %r." % bucketized_features_list))
_attr_num_features = len(bucketized_features_list)
max_splits = _execute.make_int(max_splits, 'max_splits')
num_buckets = _execute.make_int(num_buckets, 'num_buckets')
(_, _, _op) = _op_def_lib._apply_op_helper('BoostedTreesMakeStatsSummary', node_ids=node_ids, gradients=gradients, hessians=hessians, bucketized_features_list=bucketized_features_list, max_splits=max_splits, num_buckets=num_buckets, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ('max_splits', _op.get_attr('max_splits'), 'num_buckets', _op.get_attr('num_buckets'), 'num_features', _op.get_attr('num_features'))
_execute.record_gradient('BoostedTreesMakeStatsSummary', _inputs_flat, _attrs, _result, name)
(_result,) = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(_ctx._context_handle, _ctx._eager_context.device_name, 'BoostedTreesMakeStatsSummary', name, _ctx._post_execution_callbacks, node_ids, gradients, hessians, bucketized_features_list, 'max_splits', max_splits, 'num_buckets', num_buckets)
return _result
except _core._FallbackException:
return boosted_trees_make_stats_summary_eager_fallback(node_ids, gradients, hessians, bucketized_features_list, max_splits=max_splits, num_buckets=num_buckets, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if (name is not None):
message = ((e.message + ' name: ') + name)
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
| -8,921,132,410,623,024,000
|
Makes the summary of accumulated stats for the batch.
The summary stats contains gradients and hessians accumulated into the corresponding node and bucket for each example.
Args:
node_ids: A `Tensor` of type `int32`.
int32 Rank 1 Tensor containing node ids, which each example falls into for the requested layer.
gradients: A `Tensor` of type `float32`.
float32; Rank 2 Tensor (shape=[#examples, 1]) for gradients.
hessians: A `Tensor` of type `float32`.
float32; Rank 2 Tensor (shape=[#examples, 1]) for hessians.
bucketized_features_list: A list of at least 1 `Tensor` objects with type `int32`.
int32 list of Rank 1 Tensors, each containing the bucketized feature (for each feature column).
max_splits: An `int` that is `>= 1`.
int; the maximum number of splits possible in the whole tree.
num_buckets: An `int` that is `>= 1`.
int; equals to the maximum possible value of bucketized feature.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32`.
|
Keras_tensorflow_nightly/source2.7/tensorflow/python/ops/gen_boosted_trees_ops.py
|
boosted_trees_make_stats_summary
|
Con-Mi/lambda-packs
|
python
|
def boosted_trees_make_stats_summary(node_ids, gradients, hessians, bucketized_features_list, max_splits, num_buckets, name=None):
'Makes the summary of accumulated stats for the batch.\n\n The summary stats contains gradients and hessians accumulated into the corresponding node and bucket for each example.\n\n Args:\n node_ids: A `Tensor` of type `int32`.\n int32 Rank 1 Tensor containing node ids, which each example falls into for the requested layer.\n gradients: A `Tensor` of type `float32`.\n float32; Rank 2 Tensor (shape=[#examples, 1]) for gradients.\n hessians: A `Tensor` of type `float32`.\n float32; Rank 2 Tensor (shape=[#examples, 1]) for hessians.\n bucketized_features_list: A list of at least 1 `Tensor` objects with type `int32`.\n int32 list of Rank 1 Tensors, each containing the bucketized feature (for each feature column).\n max_splits: An `int` that is `>= 1`.\n int; the maximum number of splits possible in the whole tree.\n num_buckets: An `int` that is `>= 1`.\n int; equals to the maximum possible value of bucketized feature.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `float32`.\n '
_ctx = _context._context
if ((_ctx is None) or (not _ctx._eager_context.is_eager)):
if (not isinstance(bucketized_features_list, (list, tuple))):
raise TypeError(("Expected list for 'bucketized_features_list' argument to 'boosted_trees_make_stats_summary' Op, not %r." % bucketized_features_list))
_attr_num_features = len(bucketized_features_list)
max_splits = _execute.make_int(max_splits, 'max_splits')
num_buckets = _execute.make_int(num_buckets, 'num_buckets')
(_, _, _op) = _op_def_lib._apply_op_helper('BoostedTreesMakeStatsSummary', node_ids=node_ids, gradients=gradients, hessians=hessians, bucketized_features_list=bucketized_features_list, max_splits=max_splits, num_buckets=num_buckets, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ('max_splits', _op.get_attr('max_splits'), 'num_buckets', _op.get_attr('num_buckets'), 'num_features', _op.get_attr('num_features'))
_execute.record_gradient('BoostedTreesMakeStatsSummary', _inputs_flat, _attrs, _result, name)
(_result,) = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(_ctx._context_handle, _ctx._eager_context.device_name, 'BoostedTreesMakeStatsSummary', name, _ctx._post_execution_callbacks, node_ids, gradients, hessians, bucketized_features_list, 'max_splits', max_splits, 'num_buckets', num_buckets)
return _result
except _core._FallbackException:
return boosted_trees_make_stats_summary_eager_fallback(node_ids, gradients, hessians, bucketized_features_list, max_splits=max_splits, num_buckets=num_buckets, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if (name is not None):
message = ((e.message + ' name: ') + name)
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
|
def boosted_trees_make_stats_summary_eager_fallback(node_ids, gradients, hessians, bucketized_features_list, max_splits, num_buckets, name=None, ctx=None):
'This is the slowpath function for Eager mode.\n This is for function boosted_trees_make_stats_summary\n '
_ctx = (ctx if ctx else _context.context())
if (not isinstance(bucketized_features_list, (list, tuple))):
raise TypeError(("Expected list for 'bucketized_features_list' argument to 'boosted_trees_make_stats_summary' Op, not %r." % bucketized_features_list))
_attr_num_features = len(bucketized_features_list)
max_splits = _execute.make_int(max_splits, 'max_splits')
num_buckets = _execute.make_int(num_buckets, 'num_buckets')
node_ids = _ops.convert_to_tensor(node_ids, _dtypes.int32)
gradients = _ops.convert_to_tensor(gradients, _dtypes.float32)
hessians = _ops.convert_to_tensor(hessians, _dtypes.float32)
bucketized_features_list = _ops.convert_n_to_tensor(bucketized_features_list, _dtypes.int32)
_inputs_flat = ([node_ids, gradients, hessians] + list(bucketized_features_list))
_attrs = ('max_splits', max_splits, 'num_buckets', num_buckets, 'num_features', _attr_num_features)
_result = _execute.execute(b'BoostedTreesMakeStatsSummary', 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient('BoostedTreesMakeStatsSummary', _inputs_flat, _attrs, _result, name)
(_result,) = _result
return _result
| -6,318,355,599,477,688,000
|
This is the slowpath function for Eager mode.
This is for function boosted_trees_make_stats_summary
|
Keras_tensorflow_nightly/source2.7/tensorflow/python/ops/gen_boosted_trees_ops.py
|
boosted_trees_make_stats_summary_eager_fallback
|
Con-Mi/lambda-packs
|
python
|
def boosted_trees_make_stats_summary_eager_fallback(node_ids, gradients, hessians, bucketized_features_list, max_splits, num_buckets, name=None, ctx=None):
'This is the slowpath function for Eager mode.\n This is for function boosted_trees_make_stats_summary\n '
_ctx = (ctx if ctx else _context.context())
if (not isinstance(bucketized_features_list, (list, tuple))):
raise TypeError(("Expected list for 'bucketized_features_list' argument to 'boosted_trees_make_stats_summary' Op, not %r." % bucketized_features_list))
_attr_num_features = len(bucketized_features_list)
max_splits = _execute.make_int(max_splits, 'max_splits')
num_buckets = _execute.make_int(num_buckets, 'num_buckets')
node_ids = _ops.convert_to_tensor(node_ids, _dtypes.int32)
gradients = _ops.convert_to_tensor(gradients, _dtypes.float32)
hessians = _ops.convert_to_tensor(hessians, _dtypes.float32)
bucketized_features_list = _ops.convert_n_to_tensor(bucketized_features_list, _dtypes.int32)
_inputs_flat = ([node_ids, gradients, hessians] + list(bucketized_features_list))
_attrs = ('max_splits', max_splits, 'num_buckets', num_buckets, 'num_features', _attr_num_features)
_result = _execute.execute(b'BoostedTreesMakeStatsSummary', 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient('BoostedTreesMakeStatsSummary', _inputs_flat, _attrs, _result, name)
(_result,) = _result
return _result
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.