hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
142c57102f42f9d4e92a394fc2208b8994abf6b8 | 155 | py | Python | 2020/inner_functions.py | srikanteswartalluri/pyutils | bf8d56ac9e9b0786861c08ef32eae49b021f20a3 | [
"0BSD"
] | null | null | null | 2020/inner_functions.py | srikanteswartalluri/pyutils | bf8d56ac9e9b0786861c08ef32eae49b021f20a3 | [
"0BSD"
] | null | null | null | 2020/inner_functions.py | srikanteswartalluri/pyutils | bf8d56ac9e9b0786861c08ef32eae49b021f20a3 | [
"0BSD"
] | null | null | null | def outer_util(l):
print(l)
def stay_tuned():
print("I am inside inner")
stay_tuned()
print("I am back to outer")
outer_util([1, 3, 4])
| 17.222222 | 32 | 0.606452 |
29120306298a7f9571aa5d053f13e1ce72ca2ac2 | 2,623 | py | Python | helper/markov_equiprob.py | cphysics/simulation | 6fc2056c77a021105a6851809e2bacdcc0148ba3 | [
"MIT"
] | 3 | 2020-12-02T19:34:02.000Z | 2022-03-17T03:12:07.000Z | helper/markov_equiprob.py | cphysics/simulation | 6fc2056c77a021105a6851809e2bacdcc0148ba3 | [
"MIT"
] | null | null | null | helper/markov_equiprob.py | cphysics/simulation | 6fc2056c77a021105a6851809e2bacdcc0148ba3 | [
"MIT"
] | 17 | 2020-09-17T12:32:46.000Z | 2021-11-06T03:25:15.000Z | import random
def markov_disks_box(L, delta, sigma):
condition = True
while condition == True:
a = random.choice(L)
b = [a[0] + random.uniform(-delta, delta), a[1] + random.uniform(-delta, delta)]
min_dist = min((b[0] - c[0]) ** 2 + (b[1] - c[1]) ** 2 for c in L if c != a)
box_cond = min(b[0], b[1]) < sigma or max(b[0], b[1]) > 1.0 - sigma
if not (box_cond or min_dist < 4.0 * sigma ** 2):
a[:] = b
condition = False
break
return L
#inputs of the markov_disks_box function:
#initial positions of the disks to startup the Markov-chain
L = [[0.25, 0.25], [0.75, 0.25], [0.25, 0.75], [0.75, 0.75]]
delta = 0.1
sigma = 0.15 #radius
n_steps = 1000000
del_xy = 0.05 #"uncertainty"
#Define the "marked" configurations:
conf_a = ((0.30, 0.30), (0.30, 0.70), (0.70, 0.30), (0.70,0.70))
conf_b = ((0.20, 0.20), (0.20, 0.80), (0.75, 0.25), (0.75,0.75))
conf_c = ((0.30, 0.20), (0.30, 0.80), (0.70, 0.20), (0.70,0.70))
configurations = [conf_a, conf_b, conf_c] #list the configurations
hits = {conf_a: 0, conf_b: 0, conf_c: 0} #initialise the number of times each marked configuration occurs
for run in range(n_steps):
x_vec = direct_disks_box(4, sigma) #generates a random sample by direct sampling
for conf in configurations: #run a loop iterating over the given 3 configurations
#condition that a randomly generated configuration L is the same as a, b or c up to uncertainty of del_xy
condition_hit = True
for b in conf: #run a loop iterating over the 4 disk coordinates in a specific configuration
#If the max(x distance and y distance between a disk in L and a disk in conf_a,b,c)
#is less than the given uncertainty del_xy, then we treat the two disks as in the same location.
#Note that the "any two disks" condition is realised by minimising over all 4 disks in a
#randomly sampled configuration L.
condition_b = min(max(abs(a[0] - b[0]), abs(a[1] - b[1])) for a in x_vec) < del_xy
#The following logical variable is 1 only if there exists 4 disk pairs are within del_xy range.
#If at least any one of the disks does not have a pair within del_xy, then it is 0.
condition_hit *= condition_b #multiplies condition_b's (for all 4 disks)
#If the current L and a, b or c are the same up to uncertainty del_xy, then increase:
if condition_hit:
hits[conf] += 1
for conf in configurations:
print conf, hits[conf] #Print the configurations and the number of times they occured. | 50.442308 | 109 | 0.636294 |
3142651428d6f3dd3ae78cd1d5976777efd6ce0e | 1,680 | py | Python | argus/migrations/0005_auto_20170609_2353.py | dehu4ka/lna | f5ee176bdb5c7507b76fba5ae651ce333b71c3db | [
"MIT"
] | null | null | null | argus/migrations/0005_auto_20170609_2353.py | dehu4ka/lna | f5ee176bdb5c7507b76fba5ae651ce333b71c3db | [
"MIT"
] | null | null | null | argus/migrations/0005_auto_20170609_2353.py | dehu4ka/lna | f5ee176bdb5c7507b76fba5ae651ce333b71c3db | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-09 18:53
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('argus', '0004_auto_20170609_2345'),
]
operations = [
migrations.AlterField(
model_name='argusadsl',
name='address',
field=models.CharField(default='N/A', max_length=512),
),
migrations.AlterField(
model_name='argusadsl',
name='city',
field=models.CharField(default='N/A', max_length=512),
),
migrations.AlterField(
model_name='argusadsl',
name='fio',
field=models.CharField(default='N/A', max_length=512),
),
migrations.AlterField(
model_name='argusadsl',
name='hostname',
field=models.CharField(default='None', max_length=512),
),
migrations.AlterField(
model_name='argusadsl',
name='inet_login',
field=models.CharField(db_index=True, max_length=512),
),
migrations.AlterField(
model_name='argusadsl',
name='iptv_login',
field=models.CharField(db_index=True, max_length=512),
),
migrations.AlterField(
model_name='argusadsl',
name='room',
field=models.CharField(default='N/A', max_length=512),
),
migrations.AlterField(
model_name='argusadsl',
name='xdsl_slot',
field=models.CharField(default='N/A', max_length=512),
),
]
| 30 | 67 | 0.560119 |
ff40206d99347bd096dcc0e0a0c784795cd7422a | 26,320 | py | Python | databrowse/support/renderer_support.py | limatix/Databrowse | af33bc6cca930e59acc3762beeec2409d8fd8634 | [
"BSD-3-Clause"
] | 3 | 2016-09-20T07:04:09.000Z | 2018-07-17T17:31:21.000Z | databrowse/support/renderer_support.py | limatix/Databrowse | af33bc6cca930e59acc3762beeec2409d8fd8634 | [
"BSD-3-Clause"
] | 19 | 2016-10-25T07:05:28.000Z | 2018-08-07T23:18:16.000Z | databrowse/support/renderer_support.py | limatix/Databrowse | af33bc6cca930e59acc3762beeec2409d8fd8634 | [
"BSD-3-Clause"
] | 2 | 2016-10-28T00:12:42.000Z | 2016-10-28T00:18:03.000Z | #!/usr/bin/env python
###############################################################################
## Databrowse: An Extensible Data Management Platform ##
## Copyright (C) 2012-2016 Iowa State University Research Foundation, Inc. ##
## All rights reserved. ##
## ##
## Redistribution and use in source and binary forms, with or without ##
## modification, are permitted provided that the following conditions are ##
## met: ##
## 1. Redistributions of source code must retain the above copyright ##
## notice, this list of conditions and the following disclaimer. ##
## 2. Redistributions in binary form must reproduce the above copyright ##
## notice, this list of conditions and the following disclaimer in the ##
## documentation and/or other materials provided with the distribution. ##
## 3. Neither the name of the copyright holder nor the names of its ##
## contributors may be used to endorse or promote products derived from ##
## this software without specific prior written permission. ##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ##
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED ##
## TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A ##
## PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER ##
## OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, ##
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, ##
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR ##
## PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF ##
## LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING ##
## NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ##
## SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ##
## ##
## This material is based on work supported by the Air Force Research ##
## Laboratory under Contract #FA8650-10-D-5210, Task Order #023 and ##
## performed at Iowa State University. ##
## ##
## DISTRIBUTION A. Approved for public release: distribution unlimited; ##
## 19 Aug 2016; 88ABW-2016-4051. ##
###############################################################################
""" support/renderer_support.py - Encapsulation Class for Renderer Plugins """
from lxml import etree
from errno import EEXIST
from stat import *
import sys
import os
import string
import random
import copy
import fnmatch
class renderer_class(object):
""" Renderer Plugin Support - Encapsulation Class for Renderer Plugins """
_relpath = None
_fullpath = None
_web_support = None
_handler_support = None
_caller = None
_handlers = None
_content_mode = None
_style_mode = None
_dynamic_style = None
_default_content_mode = "title"
_default_style_mode = "list"
_default_recursion_depth = 2
_disable_load_style = False
nsmap = {}
class RendererException(Exception):
pass
def __init__(self, relpath, fullpath, web_support, handler_support, caller, handlers, content_mode=None, style_mode=None, recursion_depth=None):
""" Default Initialization Function """
# Set all of our argument variables
#print "In RendererException.__init__".center(100, '=')
#print "Setting Class Variables"
self._relpath = relpath
self._fullpath = fullpath
self._web_support = web_support
self._handler_support = handler_support
self._caller = caller
self._handlers = handlers
if content_mode is None:
self._content_mode = self._default_content_mode
pass
else:
self._content_mode = content_mode
pass
if style_mode is None:
self._style_mode = self._default_style_mode
pass
else:
self._style_mode = style_mode
pass
if recursion_depth is None:
self._recursion_depth = self._default_recursion_depth
pass
else:
self._recursion_depth = recursion_depth
pass
#print "Class Variables Set - Here's a Summary"
#print "self._relpath = " + repr(self._relpath)
#print "self._fullpath = " + repr(self._fullpath)
#print "self._web_support = " + repr(self._web_support)
#print "self._web_support.req.filename = " + repr(self._web_support.req.filename)
#print "self._web_support.req.dirname = " + repr(self._web_support.req.dirname)
#print "self._web_support.req.unparsed_uri = " + repr(self._web_support.req.unparsed_uri)
#print "self._handler_support = " + repr(self._handler_support)
#print "self._caller = " + repr(self._caller)
#print "self._content_mode = " + repr(self._content_mode)
#print "self._style_mode = " + repr(self._style_mode)
#print "self._recursion_depth = " + repr(self._recursion_depth)
self.nsmap = {}
self.nsmap['db'] = 'http://thermal.cnde.iastate.edu/databrowse'
# Try to Load Style
if not self._disable_load_style:
#print "About to call self.loadStyle()"
self.loadStyle()
#print "About to call etree.register_namespace"
self.nsmap[self._namespace_local] = self._namespace_uri
#etree.register_namespace(self._namespace_local, self._namespace_uri)
pass
pass
def getContent(self):
''' Default getContent '''
return None
def getSize(self, fullpath=None):
""" Get Size of A File - Returns size of current file if none specified """
if fullpath is None:
fullpath = self._fullpath
pass
st = os.stat(fullpath)
return st[ST_SIZE]
def getUserFriendlySize(self, fullpath=None, mode="alternative", custom=None):
return self.ConvertUserFriendlySize(self.getSize(fullpath), mode, custom)
def ConvertUserFriendlySize(self, bytes, mode="alternative", custom=None, rounding=None):
"""Human-readable file size. """
if custom is not None:
formatstrings = custom
pass
elif mode == "traditional":
formatstrings = [
(1024 ** 5, 'P'),
(1024 ** 4, 'T'),
(1024 ** 3, 'G'),
(1024 ** 2, 'M'),
(1024 ** 1, 'K'),
(1024 ** 0, 'B'),
]
elif mode == "alternative":
formatstrings = [
(1024 ** 5, ' PB'),
(1024 ** 4, ' TB'),
(1024 ** 3, ' GB'),
(1024 ** 2, ' MB'),
(1024 ** 1, ' KB'),
(1024 ** 0, (' byte', ' bytes')),
]
elif mode == "bitrate":
formatstrings = [
(1024 ** 5, ' Pbps'),
(1024 ** 4, ' Tbps'),
(1024 ** 3, ' Gbps'),
(1024 ** 2, ' Mbps'),
(1024 ** 1, ' Kbps'),
(1024 ** 0, ' bps'),
]
elif mode == "frequency":
formatstrings = [
(1000 ** 5, ' PHz'),
(1000 ** 4, ' THz'),
(1000 ** 3, ' GHz'),
(1000 ** 2, ' MHz'),
(1000 ** 1, ' KHz'),
(1000 ** 0, ' Hz'),
]
elif mode == "time":
formatstrings = [
(4 ** 4, (' week', ' weeks')),
(7 ** 3, (' day', ' days')),
(24 ** 2, (' hr', ' hrs')),
(60 ** 1, ' min'),
(60 ** 0, ' sec'),
]
elif mode == "verbose":
formatstrings = [
(1024 ** 5, (' petabyte', ' petabytes')),
(1024 ** 4, (' terabyte', ' terabytes')),
(1024 ** 3, (' gigabyte', ' gigabytes')),
(1024 ** 2, (' megabyte', ' megabytes')),
(1024 ** 1, (' kilobyte', ' kilobytes')),
(1024 ** 0, (' byte', ' bytes')),
]
elif mode == "iec":
formatstrings = [
(1024 ** 5, 'Pi'),
(1024 ** 4, 'Ti'),
(1024 ** 3, 'Gi'),
(1024 ** 2, 'Mi'),
(1024 ** 1, 'Ki'),
(1024 ** 0, ''),
]
elif mode == "si":
formatstrings = [
(1000 ** 5, 'P'),
(1000 ** 4, 'T'),
(1000 ** 3, 'G'),
(1000 ** 2, 'M'),
(1000 ** 1, 'K'),
(1000 ** 0, 'B'),
]
else:
formatstrings = [
(1024 ** 5, ' PB'),
(1024 ** 4, ' TB'),
(1024 ** 3, ' GB'),
(1024 ** 2, ' MB'),
(1024 ** 1, ' KB'),
(1024 ** 0, (' byte', ' bytes')),
]
for factor, suffix in formatstrings:
if bytes >= factor:
break
amount = float(bytes/factor)
if isinstance(suffix, tuple):
singular, multiple = suffix
if amount == 1:
suffix = singular
else:
suffix = multiple
if rounding is not None:
amount = round(amount, rounding)
return str(amount) + suffix
def ConvertUserFriendlyPermissions(self, p):
ts = {
0o0140000: 'ssocket',
0o0120000: 'llink',
0o0100000: '-file',
0o0060000: 'bblock',
0o0040000: 'ddir',
0o0020000: 'cchar',
0o0010000: 'pfifo'
}
t = p & 0o0170000
permstr = ts[t][0] if t in ts else 'u'
permstr += 'r' if p & 0x0100 else '-'
permstr += 'w' if p & 0x0080 else '-'
permstr += 's' if p & 0x0800 else 'x' if p & 0x0040 else 'S' if p & 0x0800 else '-'
permstr += 'r' if p & 0x0020 else '-'
permstr += 'w' if p & 0x0010 else '-'
permstr += 's' if p & 0x0400 else 'x' if p & 0x0008 else 'S' if p & 0x0400 else '-'
permstr += 'r' if p & 0x0004 else '-'
permstr += 'w' if p & 0x0002 else '-'
permstr += 's' if p & 0x0200 else 'x' if p & 0x0001 else 'S' if p & 0x0200 else '-'
return permstr
def isRaw(self):
#print "isRaw being called"
if self._content_mode == "raw":
return True
else:
return False
def isGit(self):
if self._web_support.req.agent.startswith("git"):
return True
else:
return False
def getStyleMode(self):
#print "getStyleMode being called"
return self._style_mode
def getContentMode(self):
#print "getContentMode being called"
return self._content_mode
def getURL(self, relpath, **kwargs):
""" Return Full URL to a Relative Path """
#print "getURL being called"
# We need to tack in handler if handler is overridden
if self._handlers[-1] != self.__class__.__name__ and "handler" not in kwargs:
kwargs["handler"] = self.__class__.__name__
pass
elif "handler" in kwargs and kwargs["handler"] is None:
del kwargs["handler"]
pass
# Add flag for hidden files if needed
if "showhiddenfiles" in self._web_support.req.form and "showhiddenfiles" not in kwargs:
kwargs["showhiddenfiles"] = ""
elif "showhiddenfiles" in kwargs and kwargs["showhiddenfiles"] is None:
del kwargs["showhiddenfiles"]
# Build the URL
if self._web_support.seo_urls is True:
url = self._web_support.siteurl + relpath
if len(kwargs) > 0:
url = url + '?'
z = 1
pass
for i in kwargs:
if z == 1:
url = url + i + '=' + str(kwargs[i])
z = 2
pass
else:
url = url + '&' + i + '=' + str(kwargs[i])
pass
pass
pass
else:
url = self._web_support.siteurl + '/?path=' + relpath
for i in kwargs:
url = url + '&' + i + '=' + str(kwargs[i])
pass
pass
return url
def getURLToParent(self, relpath, **kwargs):
#print "getURLToParent being called"
if relpath == "/":
return self.getURL(relpath, **kwargs)
pass
else:
relpath = os.path.normpath(relpath + '/../')
return self.getURL(relpath, **kwargs)
pass
pass
def getDirectoryList(self, fullpath, sort=None, order="asc"):
""" Build a Sorted List of Files with Appropriate Files Removed """
#print "getDirectoryList being called"
(hiddenlist, shownlist) = self._handler_support.GetHiddenFileList()
reallist = os.listdir(fullpath)
if "showhiddenfiles" in self._web_support.req.form:
returnlist = reallist
else:
removelist = copy.copy(reallist)
for item in hiddenlist:
removelist = [n for n in removelist if not fnmatch.fnmatch(n, item[1])]
pass
addlist = []
for item in shownlist:
addlist = [n for n in reallist if fnmatch.fnmatch(n, item[1])]
pass
returnlist = list(set(removelist + addlist))
exec("returnlist.sort(%s%s)" % ("reverse=True" if order == "desc" else "reverse=False", ",key=%s" % sort if sort is not None else ",key=str.lower"))
returndirlist = [f for f in returnlist if os.path.isdir(os.path.join(fullpath, f))]
returnfilelist = [f for f in returnlist if os.path.isfile(os.path.join(fullpath, f))]
returnlist = returndirlist
returnlist.extend(returnfilelist)
return returnlist
pass
class CacheFileHandler(file):
""" Overrride File Close Class to Reassign Timestamp """
timestamp = None
def __init__(self, filename, mode='r', timestamp=None):
self.timestamp = timestamp
super(renderer_class.CacheFileHandler, self).__init__(filename, mode)
def close(self):
super(renderer_class.CacheFileHandler, self).close()
if self.mode not in ['r', 'rb'] and self.timestamp is not None:
st = os.stat(self.name)
atime = st[ST_ATIME]
os.utime(self.name, (atime, self.timestamp))
else:
pass
pass
def getCacheFileHandler(self, mode='r', tag=None, extension=None):
""" Return File Handler For Cache File """
filename = self.getCacheFileName(tag, extension)
st = os.stat(self._fullpath)
timestamp = st[ST_MTIME]
if mode not in ['r', 'rb']:
self.PrepareCacheDir()
if not os.access(filename, os.W_OK) and os.path.exists(filename):
raise self.RendererException("Unable to Open Cache File for Writing: " + filename)
else:
if not os.access(filename, os.R_OK):
raise self.RendererException("Unable to Open Cache File for Reading: " + filename)
return self.CacheFileHandler(filename, mode, timestamp)
def PrepareCacheDir(self):
cachedirname = self.getCacheDirName()
if not os.path.exists(cachedirname):
try:
os.makedirs(cachedirname)
except OSError as err:
if err.errno == EEXIST: # Handle the Race Condition
pass
else:
raise
pass
def CacheFileExists(self, tag=None, extension=None):
""" Return Boolean after Verifying the Existance of a Cache File """
if "ignorecache" in self._web_support.req.form:
return False
filename = self.getCacheFileName(tag, extension)
if os.access(filename, os.R_OK) and os.path.exists(filename):
basestat = os.stat(self._fullpath)
cachestat = os.stat(filename)
if basestat[ST_MTIME] > cachestat[ST_MTIME]:
return False
else:
return True
else:
return False
def getCacheDirName(self):
return os.path.abspath(os.path.dirname(self._fullpath) + "/.databrowse/cache/" + self.__class__.__name__ + "/")
def getCacheFileName(self, tag=None, extension=None):
""" Get the Name of a Cache File Given a Tag and Extension """
basefilename = os.path.splitext(os.path.basename(self._fullpath))
basedirname = self.getCacheDirName()
filename = basefilename[0]
if tag is not None:
filename = filename + "_" + tag
if extension is not None:
filename = filename + "." + extension
else:
filename = filename + basefilename[1]
return os.path.join(basedirname, filename)
def loadMenu(self):
""" Load Menu Items for all current handlers """
newmenu = etree.Element('{http://thermal.cnde.iastate.edu/databrowse}navbar')
isDirectory = os.path.isdir(self._fullpath)
for handler in reversed(self._handlers):
dirlist = [os.path.splitext(item)[0][4:] for item in os.listdir(os.path.abspath(os.path.dirname(sys.modules['databrowse.plugins.' + handler].__file__) + '/')) if item.lower().startswith("dbs_")]
additionalitems = []
if isDirectory:
if os.path.exists(os.path.join(self._fullpath, '.databrowse', 'stylesheets', handler)):
additionalitems = [os.path.splitext(item)[0][4:] for item in os.listdir(os.path.join(self._fullpath, '.databrowse', 'stylesheets', handler)) if item.lower().startswith("dbs_")]
else:
if os.path.exists(os.path.join(os.path.dirname(self._fullpath), '.databrowse', 'stylesheets', handler)):
additionalitems = [os.path.splitext(item)[0][4:] for item in os.listdir(os.path.join(os.path.dirname(self._fullpath), '.databrowse', 'stylesheets', handler)) if item.lower().startswith("dbs_")]
dirlist = dirlist + additionalitems
navelem = etree.SubElement(newmenu, "{http://thermal.cnde.iastate.edu/databrowse}navelem")
title = etree.SubElement(navelem, "{http://www.w3.org/1999/xhtml}a")
title.text = " ".join([i[0].title()+i[1:] for i in handler[3:].split("_")])
navitems = etree.SubElement(navelem, "{http://thermal.cnde.iastate.edu/databrowse}navdir", alwaysopen="true")
for item in dirlist:
if item not in self._handler_support.hiddenstylesheets:
if not isDirectory and item not in self._handler_support.directorystylesheets:
link = self.getURL(self._relpath, handler=handler, style_mode=item)
if self._style_mode == item and self.__class__.__name__ == handler:
itemelem = etree.SubElement(navitems, "{http://thermal.cnde.iastate.edu/databrowse}navelem", selected="true")
else:
itemelem = etree.SubElement(navitems, "{http://thermal.cnde.iastate.edu/databrowse}navelem")
menuitem = etree.SubElement(itemelem, "{http://www.w3.org/1999/xhtml}a", href=link)
menuitem.text = " ".join([i[0].title()+i[1:] for i in item.split("_")])
pass
elif isDirectory:
link = self.getURL(self._relpath, handler=handler, style_mode=item)
if self._style_mode == item and self.__class__.__name__ == handler:
itemelem = etree.SubElement(navitems, "{http://thermal.cnde.iastate.edu/databrowse}navelem", selected="true")
else:
itemelem = etree.SubElement(navitems, "{http://thermal.cnde.iastate.edu/databrowse}navelem")
menuitem = etree.SubElement(itemelem, "{http://www.w3.org/1999/xhtml}a", href=link)
menuitem.text = " ".join([i[0].title()+i[1:] for i in item.split("_")])
pass
else:
pass
pass
self._web_support.menu.AddMenu(newmenu)
pass
def loadStyle(self):
""" Safe Function Wrapper To Prevent Errors When Stylesheet Doesn't Exist """
#print "loadStyle being called"
try:
#print "About to call loadStyleFunction"
self.loadStyleFunction()
pass
except self.RendererException:
#print "loadStyleFunction failed with error"
if self._caller in self._handler_support.directoryplugins:
pass
elif self._style_mode == self._default_style_mode:
raise
else:
self._style_mode = self._default_style_mode
self.loadStyleFunction()
pass
pass
def loadStyleFunction(self):
""" Look In Standard Places For the Appropriate Static Stylesheet """
# Get Variables Containing Search Locations Ready
#print "In loadStyleFunction"
#print "Path = " + self._fullpath
#print "Plugin = " + self.__class__.__name__
custompath = os.path.abspath((self._fullpath if os.path.isdir(self._fullpath) else os.path.dirname(self._fullpath)) +
'/.databrowse/stylesheets/' + self.__class__.__name__ + '/dbs_' + self._style_mode + '.xml')
defaultpath = os.path.abspath(os.path.dirname(sys.modules['databrowse.plugins.' + self.__class__.__name__].__file__) + '/dbs_' + self._style_mode + '.xml')
#print "Custom Search Path = " + custompath
#print "Default Search Path = " + defaultpath
# Look for Custom Stylesheets in a .databrowse folder relative to the current path
filename = custompath if os.path.exists(custompath) else None
#print "Looking For Custom File === Filename is now " + repr(filename)
# If we find one, see if its overriding the standard stylesheet and set a flag to remind us later
override = False
if filename is not None:
override = True if (os.path.exists(defaultpath) or hasattr(self, '_style_' + self._style_mode)) else False
pass
#print "Checking for Default Stylesheets === Override is now " + repr(override)
# Let's first check if we have already loaded the standard stylesheets
if filename is None:
#print "Filename is still empty so let's see if we have loaded the default already"
if self._web_support.style.IsStyleLoaded(self._namespace_uri) and override != True:
#print "We have loaded already === IsStyleLoaded is %s and override is %s" % (repr(self._web_support.style.IsStyleLoaded(self._namespace_uri)), repr(override))
return
else:
# If not, let's look for normal stylesheets
#print "Not loaded already === IsStyleLoaded is %s and override is %s" % (repr(self._web_support.style.IsStyleLoaded(self._namespace_uri)), repr(override))
filename = defaultpath if os.path.exists(defaultpath) else None
pass
# Let's check for a stylesheet in the current file
if filename is None:
#print "Filename is still none = looking for variable"
if hasattr(self, '_style_' + self._style_mode):
stylestring = getattr(self, '_style_' + self._style_mode)
pass
else:
# Unable to Find Stylesheet Anywhere - Return Error
#print "Unable to find stylesheet"
raise self.RendererException("Unable To Locate Stylesheet for Style Mode %s in %s" % (self._style_mode, self.__class__.__name__))
else:
# Lets load up whatever stylesheet we found
f = open(filename, 'r')
stylestring = f.read()
f.close()
pass
#print "Stylesheet Loaded Successfully:"
#print stylestring
# If we set the flag earlier, we need to change the namespace
if override is True:
#print "Override is True = Lets Modify Our Stylesheet"
randomid = ''.join(random.choice(string.ascii_uppercase + string.digits) for x in range(10))
#print "Random ID is " + randomid
newnamespace = self._namespace_uri + randomid
newlocalns = self._namespace_local + randomid
#print "New namespace is " + newnamespace
newnamedtemplates = self.__class__.__name__ + '-' + randomid + '-'
#print "Named templates are now prefixed " + newnamedtemplates
stylestring = stylestring.replace(self._namespace_uri, newnamespace)
stylestring = stylestring.replace(self._namespace_local + ":", newlocalns + ":")
stylestring = stylestring.replace("xmlns:" + self._namespace_local, "xmlns:" + newlocalns)
#print "Namespace Changed:"
#print stylestring
stylestring = stylestring.replace(self.__class__.__name__ + '-', newnamedtemplates)
#print "Named Templates Updated:"
#print stylestring
self._namespace_uri = newnamespace
self._namespace_local = newlocalns
pass
#print "Adding Style"
self._web_support.style.AddStyle(self._namespace_uri, stylestring)
pass
| 44.309764 | 213 | 0.549582 |
18607162db83cf7bf76d8c3099f747d3be47138e | 49,059 | py | Python | predictor.py | Min-Sheng/medicaldetectiontoolkit | 2b24a87a0e3dfa9ccbcda4a35c82437f3ff11d22 | [
"Apache-2.0"
] | null | null | null | predictor.py | Min-Sheng/medicaldetectiontoolkit | 2b24a87a0e3dfa9ccbcda4a35c82437f3ff11d22 | [
"Apache-2.0"
] | null | null | null | predictor.py | Min-Sheng/medicaldetectiontoolkit | 2b24a87a0e3dfa9ccbcda4a35c82437f3ff11d22 | [
"Apache-2.0"
] | 1 | 2022-02-09T13:24:09.000Z | 2022-02-09T13:24:09.000Z | #!/usr/bin/env python
# Copyright 2018 Division of Medical Image Computing, German Cancer Research Center (DKFZ).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import numpy as np
import torch
from scipy.stats import norm
from collections import OrderedDict
from multiprocessing import Pool
import pickle
from copy import deepcopy
import pandas as pd
import utils.exp_utils as utils
from plotting import plot_batch_prediction
class Predictor:
"""
Prediction pipeline:
- receives a patched patient image (n_patches, c, y, x, (z)) from patient data loader.
- forwards patches through model in chunks of batch_size. (method: batch_tiling_forward)
- unmolds predictions (boxes and segmentations) to original patient coordinates. (method: spatial_tiling_forward)
Ensembling (mode == 'test'):
- for inference, forwards 4 mirrored versions of image to through model and unmolds predictions afterwards
accordingly (method: data_aug_forward)
- for inference, loads multiple parameter-sets of the trained model corresponding to different epochs. for each
parameter-set loops over entire test set, runs prediction pipeline for each patient. (method: predict_test_set)
Consolidation of predictions:
- consolidates a patient's predictions (boxes, segmentations) collected over patches, data_aug- and temporal ensembling,
performs clustering and weighted averaging (external function: apply_wbc_to_patient) to obtain consistent outptus.
- for 2D networks, consolidates box predictions to 3D cubes via clustering (adaption of non-maximum surpression).
(external function: merge_2D_to_3D_preds_per_patient)
Ground truth handling:
- dissmisses any ground truth boxes returned by the model (happens in validation mode, patch-based groundtruth)
- if provided by data loader, adds 3D ground truth to the final predictions to be passed to the evaluator.
"""
def __init__(self, cf, net, logger, mode):
self.cf = cf
self.logger = logger
# mode is 'val' for patient-based validation/monitoring and 'test' for inference.
self.mode = mode
# model instance. In validation mode, contains parameters of current epoch.
self.net = net
# rank of current epoch loaded (for temporal averaging). this info is added to each prediction,
# for correct weighting during consolidation.
self.rank_ix = '0'
# number of ensembled models. used to calculate the number of expected predictions per position
# during consolidation of predictions. Default is 1 (no ensembling, e.g. in validation).
self.n_ens = 1
if self.mode == 'test':
try:
self.epoch_ranking = np.load(os.path.join(self.cf.fold_dir, 'epoch_ranking.npy'))[:cf.test_n_epochs]
except:
raise RuntimeError('no epoch ranking file in fold directory. '
'seems like you are trying to run testing without prior training...')
self.n_ens = cf.test_n_epochs
if self.cf.test_aug:
self.n_ens *= 4
self.example_plot_dir = os.path.join(cf.test_dir, "example_plots")
os.makedirs(self.example_plot_dir, exist_ok=True)
def predict_patient(self, batch):
"""
predicts one patient.
called either directly via loop over validation set in exec.py (mode=='val')
or from self.predict_test_set (mode=='test).
in val mode: adds 3D ground truth info to predictions and runs consolidation and 2Dto3D merging of predictions.
in test mode: returns raw predictions (ground truth addition, consolidation, 2D to 3D merging are
done in self.predict_test_set, because patient predictions across several epochs might be needed
to be collected first, in case of temporal ensembling).
:return. results_dict: stores the results for one patient. dictionary with keys:
- 'boxes': list over batch elements. each element is a list over boxes, where each box is
one dictionary: [[box_0, ...], [box_n,...]]. batch elements are slices for 2D predictions
(if not merged to 3D), and a dummy batch dimension of 1 for 3D predictions.
- 'seg_preds': pixel-wise predictions. (b, 1, y, x, (z))
- losses (only in validation mode)
"""
#self.logger.info('\revaluating patient {} for fold {} '.format(batch['pid'], self.cf.fold))
print('\revaluating patient {} for fold {} '.format(batch['pid'], self.cf.fold), end="", flush=True)
# True if patient is provided in patches and predictions need to be tiled.
self.patched_patient = 'patch_crop_coords' in batch.keys()
# forward batch through prediction pipeline.
print(batch['data'].shape)
results_dict = self.data_aug_forward(batch)
if self.mode == 'val':
for b in range(batch['patient_bb_target'].shape[0]):
for t in range(len(batch['patient_bb_target'][b])):
results_dict['boxes'][b].append({'box_coords': batch['patient_bb_target'][b][t],
'box_label': batch['patient_roi_labels'][b][t],
'box_type': 'gt'})
if self.patched_patient:
wcs_input = [results_dict['boxes'], 'dummy_pid', self.cf.class_dict, self.cf.wcs_iou, self.n_ens]
results_dict['boxes'] = apply_wbc_to_patient(wcs_input)[0]
if self.cf.merge_2D_to_3D_preds:
merge_dims_inputs = [results_dict['boxes'], 'dummy_pid', self.cf.class_dict, self.cf.merge_3D_iou]
results_dict['boxes'] = merge_2D_to_3D_preds_per_patient(merge_dims_inputs)[0]
return results_dict
def predict_test_set(self, batch_gen, return_results=True):
"""
wrapper around test method, which loads multiple (or one) epoch parameters (temporal ensembling), loops through
the test set and collects predictions per patient. Also flattens the results per patient and epoch
and adds optional ground truth boxes for evaluation. Saves out the raw result list for later analysis and
optionally consolidates and returns predictions immediately.
:return: (optionally) list_of_results_per_patient: list over patient results. each entry is a dict with keys:
- 'boxes': list over batch elements. each element is a list over boxes, where each box is
one dictionary: [[box_0, ...], [box_n,...]]. batch elements are slices for 2D predictions
(if not merged to 3D), and a dummy batch dimension of 1 for 3D predictions.
- 'seg_preds': not implemented yet. todo for evaluation of instance/semantic segmentation.
"""
dict_of_patient_results = OrderedDict()
# get paths of all parameter sets to be loaded for temporal ensembling. (or just one for no temp. ensembling).
weight_paths = [os.path.join(self.cf.fold_dir, '{}_best_checkpoint'.format(epoch), 'params.pth') for epoch in
self.epoch_ranking]
n_test_plots = min(batch_gen['n_test'], 1)
for rank_ix, weight_path in enumerate(weight_paths):
self.logger.info(('tmp ensembling over rank_ix:{} epoch:{}'.format(rank_ix, weight_path)))
self.net.load_state_dict(torch.load(weight_path))
self.net.eval()
self.rank_ix = str(rank_ix) # get string of current rank for unique patch ids.
plot_batches = np.random.choice(np.arange(batch_gen['n_test']), size=n_test_plots, replace=False)
with torch.no_grad():
for i in range(batch_gen['n_test']):
batch = next(batch_gen['test'])
# store batch info in patient entry of results dict.
if rank_ix == 0:
dict_of_patient_results[batch['pid']] = {}
dict_of_patient_results[batch['pid']]['results_dicts'] = []
dict_of_patient_results[batch['pid']]['patient_bb_target'] = batch['patient_bb_target']
dict_of_patient_results[batch['pid']]['patient_roi_labels'] = batch['patient_roi_labels']
# call prediction pipeline and store results in dict.
results_dict = self.predict_patient(batch)
dict_of_patient_results[batch['pid']]['results_dicts'].append({"boxes": results_dict['boxes']})
if i in plot_batches and not self.patched_patient:
# view qualitative results of random test case
# plotting for patched patients is too expensive, thus not done. Change at will.
try:
out_file = os.path.join(self.example_plot_dir,
'batch_example_test_{}_rank_{}.png'.format(self.cf.fold,
rank_ix))
results_for_plotting = deepcopy(results_dict)
# seg preds of test augs are included separately. for viewing, only show aug 0 (merging
# would need multiple changes, incl in every model).
if results_for_plotting["seg_preds"].shape[1] > 1:
results_for_plotting["seg_preds"] = results_dict['seg_preds'][:, [0]]
for bix in range(batch["seg"].shape[0]): # batch dim should be 1
for tix in range(len(batch['bb_target'][bix])):
results_for_plotting['boxes'][bix].append({'box_coords': batch['bb_target'][bix][tix],
'box_label': batch['class_target'][bix][tix],
'box_type': 'gt'})
utils.split_off_process(plot_batch_prediction, batch, results_for_plotting, self.cf,
outfile=out_file, suptitle="Test plot:\nunmerged TTA overlayed.")
except Exception as e:
self.logger.info("WARNING: error in plotting example test batch: {}".format(e))
self.logger.info('finished predicting test set. starting post-processing of predictions.')
results_per_patient = []
# loop over patients again to flatten results across epoch predictions.
# if provided, add ground truth boxes for evaluation.
for pid, p_dict in dict_of_patient_results.items():
tmp_ens_list = p_dict['results_dicts']
results_dict = {}
# collect all boxes/seg_preds of same batch_instance over temporal instances.
b_size = len(tmp_ens_list[0]["boxes"])
results_dict['boxes'] = [[item for rank_dict in tmp_ens_list for item in rank_dict["boxes"][batch_instance]]
for batch_instance in range(b_size)]
# TODO return for instance segmentation:
# results_dict['seg_preds'] = np.mean(results_dict['seg_preds'], 1)[:, None]
# results_dict['seg_preds'] = np.array([[item for d in tmp_ens_list for item in d['seg_preds'][batch_instance]]
# for batch_instance in range(len(tmp_ens_list[0]['boxes']))])
# add 3D ground truth boxes for evaluation.
for b in range(p_dict['patient_bb_target'].shape[0]):
for t in range(len(p_dict['patient_bb_target'][b])):
results_dict['boxes'][b].append({'box_coords': p_dict['patient_bb_target'][b][t],
'box_label': p_dict['patient_roi_labels'][b][t],
'box_type': 'gt'})
results_per_patient.append([results_dict, pid])
# save out raw predictions.
out_string = 'raw_pred_boxes_hold_out_list' if self.cf.hold_out_test_set else 'raw_pred_boxes_list'
with open(os.path.join(self.cf.fold_dir, '{}.pickle'.format(out_string)), 'wb') as handle:
pickle.dump(results_per_patient, handle)
if return_results:
final_patient_box_results = [(res_dict["boxes"], pid) for res_dict, pid in results_per_patient]
# consolidate predictions.
self.logger.info('applying wcs to test set predictions with iou = {} and n_ens = {}.'.format(
self.cf.wcs_iou, self.n_ens))
pool = Pool(processes=6)
mp_inputs = [[ii[0], ii[1], self.cf.class_dict, self.cf.wcs_iou, self.n_ens] for ii in final_patient_box_results]
final_patient_box_results = pool.map(apply_wbc_to_patient, mp_inputs, chunksize=1)
pool.close()
pool.join()
# merge 2D boxes to 3D cubes. (if model predicts 2D but evaluation is run in 3D)
if self.cf.merge_2D_to_3D_preds:
self.logger.info('applying 2Dto3D merging to test set predictions with iou = {}.'.format(self.cf.merge_3D_iou))
pool = Pool(processes=6)
mp_inputs = [[ii[0], ii[1], self.cf.class_dict, self.cf.merge_3D_iou] for ii in final_patient_box_results]
final_patient_box_results = pool.map(merge_2D_to_3D_preds_per_patient, mp_inputs, chunksize=1)
pool.close()
pool.join()
# final_patient_box_results holds [avg_boxes, pid] if wbc
for ix in range(len(results_per_patient)):
assert results_per_patient[ix][1] == final_patient_box_results[ix][1], "should be same pid"
results_per_patient[ix][0]["boxes"] = final_patient_box_results[ix][0]
return results_per_patient
def load_saved_predictions(self, apply_wbc=False):
"""
loads raw predictions saved by self.predict_test_set. consolidates and merges 2D boxes to 3D cubes for evaluation.
(if model predicts 2D but evaluation is run in 3D)
:return: (optionally) results_list: list over patient results. each entry is a dict with keys:
- 'boxes': list over batch elements. each element is a list over boxes, where each box is
one dictionary: [[box_0, ...], [box_n,...]]. batch elements are slices for 2D predictions
(if not merged to 3D), and a dummy batch dimension of 1 for 3D predictions.
- 'seg_preds': not implemented yet. todo for evaluation of instance/semantic segmentation.
"""
# load predictions for a single test-set fold.
results_file = 'raw_pred_boxes_hold_out_list.pickle' if self.cf.hold_out_test_set else 'raw_pred_boxes_list.pickle'
if not self.cf.hold_out_test_set or not self.cf.ensemble_folds:
with open(os.path.join(self.cf.fold_dir, results_file), 'rb') as handle:
results_list = pickle.load(handle)
box_results_list = [(res_dict["boxes"], pid) for res_dict, pid in results_list]
da_factor = 4 if self.cf.test_aug else 1
n_ens = self.cf.test_n_epochs * da_factor
self.logger.info('loaded raw test set predictions with n_patients = {} and n_ens = {}'.format(
len(results_list), n_ens))
# if hold out test set was perdicted, aggregate predictions of all trained models
# corresponding to all CV-folds and flatten them.
else:
self.logger.info("loading saved predictions of hold-out test set and ensembling over folds.")
fold_dirs = sorted([os.path.join(self.cf.exp_dir, f) for f in os.listdir(self.cf.exp_dir) if
os.path.isdir(os.path.join(self.cf.exp_dir, f)) and f.startswith("fold")])
results_list = []
folds_loaded = 0
for fold in range(self.cf.n_cv_splits):
fold_dir = os.path.join(self.cf.exp_dir, 'fold_{}'.format(fold))
if fold_dir in fold_dirs:
with open(os.path.join(fold_dir, results_file), 'rb') as handle:
fold_list = pickle.load(handle)
results_list += fold_list
folds_loaded += 1
else:
self.logger.info("Skipping fold {} since no saved predictions found.".format(fold))
box_results_list = []
for res_dict, pid in results_list: #without filtering gt out:
box_results_list.append((res_dict['boxes'], pid))
da_factor = 4 if self.cf.test_aug else 1
n_ens = self.cf.test_n_epochs * da_factor * folds_loaded
# consolidate predictions.
if apply_wbc:
self.logger.info('applying wcs to test set predictions with iou = {} and n_ens = {}.'.format(
self.cf.wcs_iou, n_ens))
pool = Pool(processes=6)
mp_inputs = [[ii[0], ii[1], self.cf.class_dict, self.cf.wcs_iou, n_ens] for ii in box_results_list]
box_results_list = pool.map(apply_wbc_to_patient, mp_inputs, chunksize=1)
pool.close()
pool.join()
# merge 2D box predictions to 3D cubes (if model predicts 2D but evaluation is run in 3D)
if self.cf.merge_2D_to_3D_preds:
self.logger.info(
'applying 2Dto3D merging to test set predictions with iou = {}.'.format(self.cf.merge_3D_iou))
pool = Pool(processes=6)
mp_inputs = [[ii[0], ii[1], self.cf.class_dict, self.cf.merge_3D_iou] for ii in box_results_list]
box_results_list = pool.map(merge_2D_to_3D_preds_per_patient, mp_inputs, chunksize=1)
pool.close()
pool.join()
for ix in range(len(results_list)):
assert np.all(
results_list[ix][1] == box_results_list[ix][1]), "pid mismatch between loaded and aggregated results"
results_list[ix][0]["boxes"] = box_results_list[ix][0]
return results_list # holds (results_dict, pid)
def data_aug_forward(self, batch):
"""
in val_mode: passes batch through to spatial_tiling method without data_aug.
in test_mode: if cf.test_aug is set in configs, createst 4 mirrored versions of the input image,
passes all of them to the next processing step (spatial_tiling method) and re-transforms returned predictions
to original image version.
:return. results_dict: stores the results for one patient. dictionary with keys:
- 'boxes': list over batch elements. each element is a list over boxes, where each box is
one dictionary: [[box_0, ...], [box_n,...]]. batch elements are slices for 2D predictions,
and a dummy batch dimension of 1 for 3D predictions.
- 'seg_preds': pixel-wise predictions. (b, 1, y, x, (z))
- losses (only in validation mode)
"""
patch_crops = batch['patch_crop_coords'] if self.patched_patient else None
results_list = [self.spatial_tiling_forward(batch, patch_crops)]
org_img_shape = batch['original_img_shape']
if self.mode == 'test' and self.cf.test_aug:
if self.patched_patient:
# apply mirror transformations to patch-crop coordinates, for correct tiling in spatial_tiling method.
mirrored_patch_crops = get_mirrored_patch_crops(patch_crops, batch['original_img_shape'])
else:
mirrored_patch_crops = [None] * 3
img = np.copy(batch['data'])
# first mirroring: y-axis.
batch['data'] = np.flip(img, axis=2).copy()
chunk_dict = self.spatial_tiling_forward(batch, mirrored_patch_crops[0], n_aug='1')
# re-transform coordinates.
for ix in range(len(chunk_dict['boxes'])):
for boxix in range(len(chunk_dict['boxes'][ix])):
coords = chunk_dict['boxes'][ix][boxix]['box_coords'].copy()
coords[0] = org_img_shape[2] - chunk_dict['boxes'][ix][boxix]['box_coords'][2]
coords[2] = org_img_shape[2] - chunk_dict['boxes'][ix][boxix]['box_coords'][0]
assert coords[2] >= coords[0], [coords, chunk_dict['boxes'][ix][boxix]['box_coords'].copy()]
assert coords[3] >= coords[1], [coords, chunk_dict['boxes'][ix][boxix]['box_coords'].copy()]
chunk_dict['boxes'][ix][boxix]['box_coords'] = coords
# re-transform segmentation predictions.
chunk_dict['seg_preds'] = np.flip(chunk_dict['seg_preds'], axis=2)
results_list.append(chunk_dict)
# second mirroring: x-axis.
batch['data'] = np.flip(img, axis=3).copy()
chunk_dict = self.spatial_tiling_forward(batch, mirrored_patch_crops[1], n_aug='2')
# re-transform coordinates.
for ix in range(len(chunk_dict['boxes'])):
for boxix in range(len(chunk_dict['boxes'][ix])):
coords = chunk_dict['boxes'][ix][boxix]['box_coords'].copy()
coords[1] = org_img_shape[3] - chunk_dict['boxes'][ix][boxix]['box_coords'][3]
coords[3] = org_img_shape[3] - chunk_dict['boxes'][ix][boxix]['box_coords'][1]
assert coords[2] >= coords[0], [coords, chunk_dict['boxes'][ix][boxix]['box_coords'].copy()]
assert coords[3] >= coords[1], [coords, chunk_dict['boxes'][ix][boxix]['box_coords'].copy()]
chunk_dict['boxes'][ix][boxix]['box_coords'] = coords
# re-transform segmentation predictions.
chunk_dict['seg_preds'] = np.flip(chunk_dict['seg_preds'], axis=3)
results_list.append(chunk_dict)
# third mirroring: y-axis and x-axis.
batch['data'] = np.flip(np.flip(img, axis=2), axis=3).copy()
chunk_dict = self.spatial_tiling_forward(batch, mirrored_patch_crops[2], n_aug='3')
# re-transform coordinates.
for ix in range(len(chunk_dict['boxes'])):
for boxix in range(len(chunk_dict['boxes'][ix])):
coords = chunk_dict['boxes'][ix][boxix]['box_coords'].copy()
coords[0] = org_img_shape[2] - chunk_dict['boxes'][ix][boxix]['box_coords'][2]
coords[2] = org_img_shape[2] - chunk_dict['boxes'][ix][boxix]['box_coords'][0]
coords[1] = org_img_shape[3] - chunk_dict['boxes'][ix][boxix]['box_coords'][3]
coords[3] = org_img_shape[3] - chunk_dict['boxes'][ix][boxix]['box_coords'][1]
assert coords[2] >= coords[0], [coords, chunk_dict['boxes'][ix][boxix]['box_coords'].copy()]
assert coords[3] >= coords[1], [coords, chunk_dict['boxes'][ix][boxix]['box_coords'].copy()]
chunk_dict['boxes'][ix][boxix]['box_coords'] = coords
# re-transform segmentation predictions.
chunk_dict['seg_preds'] = np.flip(np.flip(chunk_dict['seg_preds'], axis=2), axis=3).copy()
results_list.append(chunk_dict)
batch['data'] = img
# aggregate all boxes/seg_preds per batch element from data_aug predictions.
results_dict = {}
results_dict['boxes'] = [[item for d in results_list for item in d['boxes'][batch_instance]]
for batch_instance in range(org_img_shape[0])]
results_dict['seg_preds'] = np.array([[item for d in results_list for item in d['seg_preds'][batch_instance]]
for batch_instance in range(org_img_shape[0])])
if self.mode == 'val':
try:
results_dict['torch_loss'] = results_list[0]['torch_loss']
results_dict['class_loss'] = results_list[0]['class_loss']
except KeyError:
pass
return results_dict
def spatial_tiling_forward(self, batch, patch_crops=None, n_aug='0'):
"""
forwards batch to batch_tiling_forward method and receives and returns a dictionary with results.
if patch-based prediction, the results received from batch_tiling_forward will be on a per-patch-basis.
this method uses the provided patch_crops to re-transform all predictions to whole-image coordinates.
Patch-origin information of all box-predictions will be needed for consolidation, hence it is stored as
'patch_id', which is a unique string for each patch (also takes current data aug and temporal epoch instances
into account). all box predictions get additional information about the amount overlapping patches at the
respective position (used for consolidation).
:return. results_dict: stores the results for one patient. dictionary with keys:
- 'boxes': list over batch elements. each element is a list over boxes, where each box is
one dictionary: [[box_0, ...], [box_n,...]]. batch elements are slices for 2D predictions,
and a dummy batch dimension of 1 for 3D predictions.
- 'seg_preds': pixel-wise predictions. (b, 1, y, x, (z))
- losses (only in validation mode)
"""
if patch_crops is not None:
patches_dict = self.batch_tiling_forward(batch)
results_dict = {'boxes': [[] for _ in range(batch['original_img_shape'][0])]}
# instanciate segemntation output array. Will contain averages over patch predictions.
out_seg_preds = np.zeros(batch['original_img_shape'], dtype=np.float16)[:, 0][:, None]
# counts patch instances per pixel-position.
patch_overlap_map = np.zeros_like(out_seg_preds, dtype='uint8')
#unmold segmentation outputs. loop over patches.
for pix, pc in enumerate(patch_crops):
if self.cf.dim == 3:
out_seg_preds[:, :, pc[0]:pc[1], pc[2]:pc[3], pc[4]:pc[5]] += patches_dict['seg_preds'][pix][None]
patch_overlap_map[:, :, pc[0]:pc[1], pc[2]:pc[3], pc[4]:pc[5]] += 1
else:
out_seg_preds[pc[4]:pc[5], :, pc[0]:pc[1], pc[2]:pc[3], ] += patches_dict['seg_preds'][pix]
patch_overlap_map[pc[4]:pc[5], :, pc[0]:pc[1], pc[2]:pc[3], ] += 1
# take mean in overlapping areas.
out_seg_preds[patch_overlap_map > 0] /= patch_overlap_map[patch_overlap_map > 0]
results_dict['seg_preds'] = out_seg_preds
# unmold box outputs. loop over patches.
for pix, pc in enumerate(patch_crops):
patch_boxes = patches_dict['boxes'][pix]
for box in patch_boxes:
# add unique patch id for consolidation of predictions.
box['patch_id'] = self.rank_ix + '_' + n_aug + '_' + str(pix)
# boxes from the edges of a patch have a lower prediction quality, than the ones at patch-centers.
# hence they will be downweighted for consolidation, using the 'box_patch_center_factor', which is
# obtained by a normal distribution over positions in the patch and average over spatial dimensions.
# Also the info 'box_n_overlaps' is stored for consolidation, which depicts the amount over
# overlapping patches at the box's position.
c = box['box_coords']
box_centers = [(c[ii] + c[ii + 2]) / 2 for ii in range(2)]
if self.cf.dim == 3:
box_centers.append((c[4] + c[5]) / 2)
box['box_patch_center_factor'] = np.mean(
[norm.pdf(bc, loc=pc, scale=pc * 0.8) * np.sqrt(2 * np.pi) * pc * 0.8 for bc, pc in
zip(box_centers, np.array(self.cf.patch_size) / 2)])
if self.cf.dim == 3:
c += np.array([pc[0], pc[2], pc[0], pc[2], pc[4], pc[4]])
int_c = [int(np.floor(ii)) if ix%2 == 0 else int(np.ceil(ii)) for ix, ii in enumerate(c)]
box['box_n_overlaps'] = np.mean(patch_overlap_map[:, :, int_c[1]:int_c[3], int_c[0]:int_c[2], int_c[4]:int_c[5]])
results_dict['boxes'][0].append(box)
else:
c += np.array([pc[0], pc[2], pc[0], pc[2]])
int_c = [int(np.floor(ii)) if ix % 2 == 0 else int(np.ceil(ii)) for ix, ii in enumerate(c)]
box['box_n_overlaps'] = np.mean(patch_overlap_map[pc[4], :, int_c[1]:int_c[3], int_c[0]:int_c[2]])
results_dict['boxes'][pc[4]].append(box)
if self.mode == 'val':
try:
results_dict['torch_loss'] = patches_dict['torch_loss']
results_dict['class_loss'] = patches_dict['class_loss']
except KeyError:
pass
# if predictions are not patch-based:
# add patch-origin info to boxes (entire image is the same patch with overlap=1) and return results.
else:
results_dict = self.batch_tiling_forward(batch)
for b in results_dict['boxes']:
for box in b:
box['box_patch_center_factor'] = 1
box['box_n_overlaps'] = 1
box['patch_id'] = self.rank_ix + '_' + n_aug
return results_dict
def batch_tiling_forward(self, batch):
"""
calls the actual network forward method. in patch-based prediction, the batch dimension might be overladed
with n_patches >> batch_size, which would exceed gpu memory. In this case, batches are processed in chunks of
batch_size. validation mode calls the train method to monitor losses (returned ground truth objects are discarded).
test mode calls the test forward method, no ground truth required / involved.
:return. results_dict: stores the results for one patient. dictionary with keys:
- 'boxes': list over batch elements. each element is a list over boxes, where each box is
one dictionary: [[box_0, ...], [box_n,...]]. batch elements are slices for 2D predictions,
and a dummy batch dimension of 1 for 3D predictions.
- 'seg_preds': pixel-wise predictions. (b, 1, y, x, (z))
- losses (only in validation mode)
"""
#self.logger.info('forwarding (patched) patient with shape: {}'.format(batch['data'].shape))
img = batch['data']
if img.shape[0] <= self.cf.batch_size:
if self.mode == 'val':
# call training method to monitor losses
results_dict = self.net.train_forward(batch, is_validation=True)
# discard returned ground-truth boxes (also training info boxes).
results_dict['boxes'] = [[box for box in b if box['box_type'] == 'det'] for b in results_dict['boxes']]
else:
results_dict = self.net.test_forward(batch, return_masks=self.cf.return_masks_in_test)
else:
split_ixs = np.split(np.arange(img.shape[0]), np.arange(img.shape[0])[::self.cf.batch_size])
chunk_dicts = []
for chunk_ixs in split_ixs[1:]: # first split is elements before 0, so empty
b = {k: batch[k][chunk_ixs] for k in batch.keys()
if (isinstance(batch[k], np.ndarray) and batch[k].shape[0] == img.shape[0])}
if self.mode == 'val':
chunk_dicts += [self.net.train_forward(b, is_validation=True)]
else:
chunk_dicts += [self.net.test_forward(b, return_masks=self.cf.return_masks_in_test)]
results_dict = {}
# flatten out batch elements from chunks ([chunk, chunk] -> [b, b, b, b, ...])
results_dict['boxes'] = [item for d in chunk_dicts for item in d['boxes']]
results_dict['seg_preds'] = np.array([item for d in chunk_dicts for item in d['seg_preds']])
if self.mode == 'val':
try:
# estimate metrics by mean over batch_chunks. Most similar to training metrics.
results_dict['torch_loss'] = torch.mean(torch.cat([d['torch_loss'] for d in chunk_dicts]))
results_dict['class_loss'] = np.mean([d['class_loss'] for d in chunk_dicts])
except KeyError:
# losses are not necessarily monitored
pass
# discard returned ground-truth boxes (also training info boxes).
results_dict['boxes'] = [[box for box in b if box['box_type'] == 'det'] for b in results_dict['boxes']]
return results_dict
def apply_wbc_to_patient(inputs):
"""
wrapper around prediction box consolidation: weighted cluster scoring (wcs). processes a single patient.
loops over batch elements in patient results (1 in 3D, slices in 2D) and foreground classes,
aggregates and stores results in new list.
:return. patient_results_list: list over batch elements. each element is a list over boxes, where each box is
one dictionary: [[box_0, ...], [box_n,...]]. batch elements are slices for 2D
predictions, and a dummy batch dimension of 1 for 3D predictions.
:return. pid: string. patient id.
"""
in_patient_results_list, pid, class_dict, wcs_iou, n_ens = inputs
out_patient_results_list = [[] for _ in range(len(in_patient_results_list))]
for bix, b in enumerate(in_patient_results_list):
for cl in list(class_dict.keys()):
boxes = [(ix, box) for ix, box in enumerate(b) if (box['box_type'] == 'det' and box['box_pred_class_id'] == cl)]
box_coords = np.array([b[1]['box_coords'] for b in boxes])
box_scores = np.array([b[1]['box_score'] for b in boxes])
box_center_factor = np.array([b[1]['box_patch_center_factor'] for b in boxes])
box_n_overlaps = np.array([b[1]['box_n_overlaps'] for b in boxes])
box_patch_id = np.array([b[1]['patch_id'] for b in boxes])
if 0 not in box_scores.shape:
keep_scores, keep_coords = weighted_box_clustering(
np.concatenate((box_coords, box_scores[:, None], box_center_factor[:, None],
box_n_overlaps[:, None]), axis=1), box_patch_id, wcs_iou, n_ens)
for boxix in range(len(keep_scores)):
out_patient_results_list[bix].append({'box_type': 'det', 'box_coords': keep_coords[boxix],
'box_score': keep_scores[boxix], 'box_pred_class_id': cl})
# add gt boxes back to new output list.
out_patient_results_list[bix].extend([box for box in b if box['box_type'] == 'gt'])
return [out_patient_results_list, pid]
def merge_2D_to_3D_preds_per_patient(inputs):
"""
wrapper around 2Dto3D merging operation. Processes a single patient. Takes 2D patient results (slices in batch dimension)
and returns 3D patient results (dummy batch dimension of 1). Applies an adaption of Non-Maximum Surpression
(Detailed methodology is described in nms_2to3D).
:return. results_dict_boxes: list over batch elements (1 in 3D). each element is a list over boxes, where each box is
one dictionary: [[box_0, ...], [box_n,...]].
:return. pid: string. patient id.
"""
in_patient_results_list, pid, class_dict, merge_3D_iou = inputs
out_patient_results_list = []
for cl in list(class_dict.keys()):
boxes, slice_ids = [], []
# collect box predictions over batch dimension (slices) and store slice info as slice_ids.
for bix, b in enumerate(in_patient_results_list):
det_boxes = [(ix, box) for ix, box in enumerate(b) if
(box['box_type'] == 'det' and box['box_pred_class_id'] == cl)]
boxes += det_boxes
slice_ids += [bix] * len(det_boxes)
box_coords = np.array([b[1]['box_coords'] for b in boxes])
box_scores = np.array([b[1]['box_score'] for b in boxes])
slice_ids = np.array(slice_ids)
if 0 not in box_scores.shape:
keep_ix, keep_z = nms_2to3D(
np.concatenate((box_coords, box_scores[:, None], slice_ids[:, None]), axis=1), merge_3D_iou)
else:
keep_ix, keep_z = [], []
# store kept predictions in new results list and add corresponding z-dimension info to coordinates.
for kix, kz in zip(keep_ix, keep_z):
out_patient_results_list.append({'box_type': 'det', 'box_coords': list(box_coords[kix]) + kz,
'box_score': box_scores[kix], 'box_pred_class_id': cl})
gt_boxes = [box for b in in_patient_results_list for box in b if box['box_type'] == 'gt']
if len(gt_boxes) > 0:
assert np.all([len(box["box_coords"]) == 6 for box in gt_boxes]), "expanded preds to 3D but GT is 2D."
out_patient_results_list += gt_boxes
# add dummy batch dimension 1 for 3D.
return [[out_patient_results_list], pid]
def weighted_box_clustering(dets, box_patch_id, thresh, n_ens):
"""
consolidates overlapping predictions resulting from patch overlaps, test data augmentations and temporal ensembling.
clusters predictions together with iou > thresh (like in NMS). Output score and coordinate for one cluster are the
average weighted by individual patch center factors (how trustworthy is this candidate measured by how centered
its position the patch is) and the size of the corresponding box.
The number of expected predictions at a position is n_data_aug * n_temp_ens * n_overlaps_at_position
(1 prediction per unique patch). Missing predictions at a cluster position are defined as the number of unique
patches in the cluster, which did not contribute any predict any boxes.
:param dets: (n_dets, (y1, x1, y2, x2, (z1), (z2), scores, box_pc_facts, box_n_ovs)
:param thresh: threshold for iou_matching.
:param n_ens: number of models, that are ensembled. (-> number of expected predicitions per position)
:return: keep_scores: (n_keep) new scores of boxes to be kept.
:return: keep_coords: (n_keep, (y1, x1, y2, x2, (z1), (z2)) new coordinates of boxes to be kept.
"""
dim = 2 if dets.shape[1] == 7 else 3
y1 = dets[:, 0]
x1 = dets[:, 1]
y2 = dets[:, 2]
x2 = dets[:, 3]
scores = dets[:, -3]
box_pc_facts = dets[:, -2]
box_n_ovs = dets[:, -1]
areas = (y2 - y1 + 1) * (x2 - x1 + 1)
if dim == 3:
z1 = dets[:, 4]
z2 = dets[:, 5]
areas *= (z2 - z1 + 1)
# order is the sorted index. maps order to index o[1] = 24 (rank1, ix 24)
order = scores.argsort()[::-1]
keep = []
keep_scores = []
keep_coords = []
while order.size > 0:
i = order[0] # higehst scoring element
xx1 = np.maximum(x1[i], x1[order])
yy1 = np.maximum(y1[i], y1[order])
xx2 = np.minimum(x2[i], x2[order])
yy2 = np.minimum(y2[i], y2[order])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
if dim == 3:
zz1 = np.maximum(z1[i], z1[order])
zz2 = np.minimum(z2[i], z2[order])
d = np.maximum(0.0, zz2 - zz1 + 1)
inter *= d
# overall between currently highest scoring box and all boxes.
ovr = inter / (areas[i] + areas[order] - inter)
# get all the predictions that match the current box to build one cluster.
matches = np.argwhere(ovr > thresh)
match_n_ovs = box_n_ovs[order[matches]]
match_pc_facts = box_pc_facts[order[matches]]
match_patch_id = box_patch_id[order[matches]]
match_ov_facts = ovr[matches]
match_areas = areas[order[matches]]
match_scores = scores[order[matches]]
# weight all socres in cluster by patch factors, and size.
match_score_weights = match_ov_facts * match_areas * match_pc_facts
match_scores *= match_score_weights
# for the weigted average, scores have to be divided by the number of total expected preds at the position
# of the current cluster. 1 Prediction per patch is expected. therefore, the number of ensembled models is
# multiplied by the mean overlaps of patches at this position (boxes of the cluster might partly be
# in areas of different overlaps).
n_expected_preds = n_ens * np.mean(match_n_ovs)
# the number of missing predictions is obtained as the number of patches,
# which did not contribute any prediction to the current cluster.
n_missing_preds = np.max((0, n_expected_preds - np.unique(match_patch_id).shape[0]))
# missing preds are given the mean weighting
# (expected prediction is the mean over all predictions in cluster).
denom = np.sum(match_score_weights) + n_missing_preds * np.mean(match_score_weights)
# compute weighted average score for the cluster
avg_score = np.sum(match_scores) / denom
# compute weighted average of coordinates for the cluster. now only take existing
# predictions into account.
avg_coords = [np.sum(y1[order[matches]] * match_scores) / np.sum(match_scores),
np.sum(x1[order[matches]] * match_scores) / np.sum(match_scores),
np.sum(y2[order[matches]] * match_scores) / np.sum(match_scores),
np.sum(x2[order[matches]] * match_scores) / np.sum(match_scores)]
if dim == 3:
avg_coords.append(np.sum(z1[order[matches]] * match_scores) / np.sum(match_scores))
avg_coords.append(np.sum(z2[order[matches]] * match_scores) / np.sum(match_scores))
# some clusters might have very low scores due to high amounts of missing predictions.
# filter out the with a conservative threshold, to speed up evaluation.
if avg_score > 0.01:
keep_scores.append(avg_score)
keep_coords.append(avg_coords)
# get index of all elements that were not matched and discard all others.
inds = np.where(ovr <= thresh)[0]
order = order[inds]
return keep_scores, keep_coords
def nms_2to3D(dets, thresh):
"""
Merges 2D boxes to 3D cubes. Therefore, boxes of all slices are projected into one slices. An adaptation of Non-maximum surpression
is applied, where clusters are found (like in NMS) with an extra constrained, that surpressed boxes have to have 'connected'
z-coordinates w.r.t the core slice (cluster center, highest scoring box). 'connected' z-coordinates are determined
as the z-coordinates with predictions until the first coordinate, where no prediction was found.
example: a cluster of predictions was found overlap > iou thresh in xy (like NMS). The z-coordinate of the highest
scoring box is 50. Other predictions have 23, 46, 48, 49, 51, 52, 53, 56, 57.
Only the coordinates connected with 50 are clustered to one cube: 48, 49, 51, 52, 53. (46 not because nothing was
found in 47, so 47 is a 'hole', which interrupts the connection). Only the boxes corresponding to these coordinates
are surpressed. All others are kept for building of further clusters.
This algorithm works better with a certain min_confidence of predictions, because low confidence (e.g. noisy/cluttery)
predictions can break the relatively strong assumption of defining cubes' z-boundaries at the first 'hole' in the cluster.
:param dets: (n_detections, (y1, x1, y2, x2, scores, slice_id)
:param thresh: iou matchin threshold (like in NMS).
:return: keep: (n_keep) 1D tensor of indices to be kept.
:return: keep_z: (n_keep, [z1, z2]) z-coordinates to be added to boxes, which are kept in order to form cubes.
"""
y1 = dets[:, 0]
x1 = dets[:, 1]
y2 = dets[:, 2]
x2 = dets[:, 3]
scores = dets[:, -2]
slice_id = dets[:, -1]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
keep = []
keep_z = []
while order.size > 0: # order is the sorted index. maps order to index o[1] = 24 (rank1, ix 24)
i = order[0] # pop higehst scoring element
xx1 = np.maximum(x1[i], x1[order])
yy1 = np.maximum(y1[i], y1[order])
xx2 = np.minimum(x2[i], x2[order])
yy2 = np.minimum(y2[i], y2[order])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[i] + areas[order] - inter)
matches = np.argwhere(ovr > thresh) # get all the elements that match the current box and have a lower score
slice_ids = slice_id[order[matches]]
core_slice = slice_id[int(i)]
upper_wholes = [ii for ii in np.arange(core_slice, np.max(slice_ids)) if ii not in slice_ids]
lower_wholes = [ii for ii in np.arange(np.min(slice_ids), core_slice) if ii not in slice_ids]
max_valid_slice_id = np.min(upper_wholes) if len(upper_wholes) > 0 else np.max(slice_ids)
min_valid_slice_id = np.max(lower_wholes) if len(lower_wholes) > 0 else np.min(slice_ids)
z_matches = matches[(slice_ids <= max_valid_slice_id) & (slice_ids >= min_valid_slice_id)]
z1 = np.min(slice_id[order[z_matches]]) - 1
z2 = np.max(slice_id[order[z_matches]]) + 1
keep.append(i)
keep_z.append([z1, z2])
order = np.delete(order, z_matches, axis=0)
return keep, keep_z
def get_mirrored_patch_crops(patch_crops, org_img_shape):
"""
apply 3 mirrror transformations (x-axis, y-axis, x&y-axis)
to given patch crop coordinates and return the transformed coordinates.
Handles 2D and 3D coordinates.
:param patch_crops: list of crops: each element is a list of coordinates for given crop [[y1, x1, ...], [y1, x1, ..]]
:param org_img_shape: shape of patient volume used as world coordinates.
:return: list of mirrored patch crops: lenght=3. each element is a list of transformed patch crops.
"""
mirrored_patch_crops = []
# y-axis transform.
mirrored_patch_crops.append([[org_img_shape[2] - ii[1],
org_img_shape[2] - ii[0],
ii[2], ii[3]] if len(ii) == 4 else
[org_img_shape[2] - ii[1],
org_img_shape[2] - ii[0],
ii[2], ii[3], ii[4], ii[5]] for ii in patch_crops])
# x-axis transform.
mirrored_patch_crops.append([[ii[0], ii[1],
org_img_shape[3] - ii[3],
org_img_shape[3] - ii[2]] if len(ii) == 4 else
[ii[0], ii[1],
org_img_shape[3] - ii[3],
org_img_shape[3] - ii[2],
ii[4], ii[5]] for ii in patch_crops])
# y-axis and x-axis transform.
mirrored_patch_crops.append([[org_img_shape[2] - ii[1],
org_img_shape[2] - ii[0],
org_img_shape[3] - ii[3],
org_img_shape[3] - ii[2]] if len(ii) == 4 else
[org_img_shape[2] - ii[1],
org_img_shape[2] - ii[0],
org_img_shape[3] - ii[3],
org_img_shape[3] - ii[2],
ii[4], ii[5]] for ii in patch_crops])
return mirrored_patch_crops
| 55.371332 | 137 | 0.602805 |
038b2e5473d1a26776a50eb83498478505d6268d | 3,850 | py | Python | tests/test_handler_hausdorff_distance.py | tatuanb/monai_V1 | 41e492b61c78bb3c303f38b03fe9fdc74a3c2e96 | [
"Apache-2.0"
] | 1 | 2021-05-06T15:08:26.000Z | 2021-05-06T15:08:26.000Z | tests/test_handler_hausdorff_distance.py | catherine1996cn/MONAI | ff9bbfa82763de46cbac75553e340633e3d84ecb | [
"Apache-2.0"
] | 2 | 2020-11-13T23:15:00.000Z | 2020-11-16T14:54:08.000Z | tests/test_handler_hausdorff_distance.py | catherine1996cn/MONAI | ff9bbfa82763de46cbac75553e340633e3d84ecb | [
"Apache-2.0"
] | 1 | 2021-11-18T22:37:40.000Z | 2021-11-18T22:37:40.000Z | # Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from typing import Tuple
import numpy as np
import torch
from ignite.engine import Engine
from monai.handlers import HausdorffDistance
def create_spherical_seg_3d(
radius: float = 20.0, centre: Tuple[int, int, int] = (49, 49, 49), im_shape: Tuple[int, int, int] = (99, 99, 99)
) -> np.ndarray:
"""
Return a 3D image with a sphere inside. Voxel values will be
1 inside the sphere, and 0 elsewhere.
Args:
radius: radius of sphere (in terms of number of voxels, can be partial)
centre: location of sphere centre.
im_shape: shape of image to create
See also:
:py:meth:`~create_test_image_3d`
"""
# Create image
image = np.zeros(im_shape, dtype=np.int32)
spy, spx, spz = np.ogrid[
-centre[0] : im_shape[0] - centre[0], -centre[1] : im_shape[1] - centre[1], -centre[2] : im_shape[2] - centre[2]
]
circle = (spx * spx + spy * spy + spz * spz) <= radius * radius
image[circle] = 1
image[~circle] = 0
return image
sampler_sphere = torch.Tensor(create_spherical_seg_3d(radius=20, centre=(20, 20, 20))).unsqueeze(0).unsqueeze(0)
# test input a list of channel-first tensor
sampler_sphere_gt = [torch.Tensor(create_spherical_seg_3d(radius=20, centre=(10, 20, 20))).unsqueeze(0)]
sampler_sphere_zeros = torch.zeros_like(sampler_sphere)
TEST_SAMPLE_1 = [sampler_sphere, sampler_sphere_gt]
TEST_SAMPLE_2 = [sampler_sphere_gt, sampler_sphere_gt]
TEST_SAMPLE_3 = [sampler_sphere_zeros, sampler_sphere_gt]
TEST_SAMPLE_4 = [sampler_sphere_zeros, sampler_sphere_zeros]
class TestHandlerHausdorffDistance(unittest.TestCase):
# TODO test multi node Hausdorff Distance
def test_compute(self):
hd_metric = HausdorffDistance(include_background=True)
def _val_func(engine, batch):
pass
engine = Engine(_val_func)
hd_metric.attach(engine, "hausdorff_distance")
y_pred, y = TEST_SAMPLE_1
hd_metric.update([y_pred, y])
self.assertEqual(hd_metric.compute(), 10)
y_pred, y = TEST_SAMPLE_2
hd_metric.update([y_pred, y])
self.assertEqual(hd_metric.compute(), 5)
y_pred, y = TEST_SAMPLE_3
hd_metric.update([y_pred, y])
self.assertEqual(hd_metric.compute(), float("inf"))
y_pred, y = TEST_SAMPLE_4
hd_metric.update([y_pred, y])
self.assertEqual(hd_metric.compute(), float("inf"))
def test_shape_mismatch(self):
hd_metric = HausdorffDistance(include_background=True)
with self.assertRaises((AssertionError, ValueError)):
y_pred = TEST_SAMPLE_1[0]
y = torch.ones((1, 1, 10, 10, 10))
hd_metric.update([y_pred, y])
def test_reduction(self):
hd_metric = HausdorffDistance(include_background=True, reduction="mean_channel")
def _val_func(engine, batch):
pass
engine = Engine(_val_func)
hd_metric.attach(engine, "hausdorff_distance")
y_pred, y = TEST_SAMPLE_1
hd_metric.update([y_pred, y])
y_pred, y = TEST_SAMPLE_2
hd_metric.update([y_pred, y])
torch.testing.assert_allclose(hd_metric.compute().float(), torch.tensor([10.0, 0.0]))
if __name__ == "__main__":
unittest.main()
| 35 | 120 | 0.683377 |
7fe8369b76005cc6046340ffec7dc890dd67e097 | 5,625 | py | Python | StarCipher/StarCipher.py | starhound/StarCipher | f07faccfb6389b78494a5f87b8f3c4443f526d02 | [
"MIT"
] | 3 | 2019-09-16T21:02:42.000Z | 2021-01-24T17:08:23.000Z | StarCipher/StarCipher.py | starhound/StarCipher | f07faccfb6389b78494a5f87b8f3c4443f526d02 | [
"MIT"
] | null | null | null | StarCipher/StarCipher.py | starhound/StarCipher | f07faccfb6389b78494a5f87b8f3c4443f526d02 | [
"MIT"
] | 2 | 2019-09-16T08:56:12.000Z | 2021-01-15T04:45:46.000Z | # StarCipher (Written and Maintained by Wesley Reid - http://starhound.com)
import py.pycrypt
import pyperclip
import secretpy.cmdecorators as md
from secretpy import Atbash
from secretpy import Trifid
from secretpy import CryptMachine
from secretpy import Zigzag
from secretpy import alphabet
from py.pycrypt import reverse_cipher
from py.pycrypt import rot13_cipher
out = ''
SYMBOLS = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
MAX_KEY_SIZE = 24
def getType():
while True:
print("""\nSelect cipher type:
(1) Ceasar
(2) Zig-Zag
(3) Trifid
(4) Reverse
(5) Atbash
(6) Rot13
""")
mode = input().lower()
if mode in 'ceasar 1 reverse 2 rot13 3 xor 4 affine 5 viginere 6'.split():
return mode
else:
print('Please select value 1 through 6 for a cipher type.')
def getMode():
while True:
print('Do you wish to (e)ncrypt or (d)ecrypt a message?')
mode = input().lower()
if mode in 'encrypt e decrypt d'.split():
return mode
else:
print('Enter either "encrypt" or "e" or "decrypt" or "d".')
def getMessage():
print('\nEnter your message:')
return input()
def getKey():
key = 0
while True:
print('\nEnter the key number (1-%s)' % (MAX_KEY_SIZE))
key = int(input())
if (key >= 1 and key <= MAX_KEY_SIZE):
return key
def hasNumbers(inputString):
return any(char.isdigit() for char in inputString)
#Some ciphers from secretpy expect string/letter based keys
def getKeyString():
while True:
print('\nEnter the key value (String Form):')
key = input()
if hasNumbers(key):
print('\nKey must be a string value.')
else:
if len(key) > 0:
return key
else:
print("\nPlease enter a key with at least one letter length.")
def ceasarTranslatedMessage(mode, message, key):
if mode[0] == 'd':
key = -key
translated = ''
for symbol in message:
symbolIndex = SYMBOLS.find(symbol)
if symbolIndex == -1: # Symbol not found in SYMBOLS.
translated += symbol
else:
symbolIndex += key
if symbolIndex >= len(SYMBOLS):
symbolIndex -= len(SYMBOLS)
elif symbolIndex < 0:
symbolIndex += len(SYMBOLS)
translated += SYMBOLS[symbolIndex]
return translated
#Completed
def ceasarCipher():
mode = getMode()
message = getMessage()
key = getKey()
print('\nYour translated text is:')
out = ceasarTranslatedMessage(mode, message, key)
print(out)
return out
#Completed
def zigzagCipher():
mode = getMode()
message = getMessage()
key = getKey()
cipher = Zigzag()
print('\nYour translated text is:')
if mode == 'e' or mode == 'encrypt':
out = cipher.encrypt(message, key)
print(out)
else:
out = cipher.decrypt(message, key, SYMBOLS)
print(out)
return out
#Complete
def reverseCipher():
mode = getMode()
message = getMessage()
print('\nYour translated text is:')
if mode == 'e' or mode == 'encrypt':
out = py.pycrypt.reverse_cipher(message)
print(out)
else:
out = reverse_cipher(message)
print(out)
return out
#Completed
def trifidCipher():
mode = getMode()
message = getMessage()
key = getKey()
machine = CryptMachine(Trifid(), key)
print('\nYour translated text is:')
if mode == 'e' or mode == 'encrypt':
out = machine.encrypt(message)
print(out)
else:
out = machine.decrypt(message)
print(out)
return out
#TODO: key input
def atbashCipher():
mode = getMode()
message = getMessage()
cm = CryptMachine(Atbash())
cm = md.NoSpaces(md.UpperCase(cm))
print('\nYour translated text is:')
if mode == 'e' or mode == 'encrypt':
out = cm.encrypt(message)
print(out)
else:
out = cm.decrypt(message)
print(out)
return out
#Completed
def rot13Cipher():
mode = getMode()
message = getMessage()
print('\nYour translated text is:')
out = ''
if mode == 'e' or mode == 'encrypt':
out = py.pycrypt.rot13_cipher(message)
print(out)
else:
out = rot13_cipher(message)
print(out)
return out
def copyPrompt(message):
print("\nCopy output to clipboard? (y/n)")
mode = input().lower()
if mode == 'y' or mode == 'yes':
pyperclip.copy(message)
spam = pyperclip.paste()
print('Text copied to clipboard.')
def determineType():
type = getType()
if type == '1':
out = ceasarCipher()
if type == '2':
out = zigzagCipher()
if type == '3':
out = trifidCipher()
if type == '4':
out = reverseCipher()
if type == '5':
out = atbashCipher()
if type == '6':
out = rot13Cipher()
if out:
copyPrompt(out)
def main():
print("\nWelcome to the StarCipher v1.0")
determineType()
restart = input("\nDo you want to restart the program? (y/n) > ")
if str(restart) == str("y") or str(restart) == str('yes'):
print('\nRestarting...')
main()
else:
print("\nThe program will be closed.")
main() | 27.043269 | 83 | 0.558933 |
8c41e02808229de35fe9c889149f42676a2821ee | 972 | py | Python | setup.py | altoyield/python-beanieclient | 448b8dd328054eaf32dd7d0bdff700e603b5c27d | [
"Apache-2.0"
] | null | null | null | setup.py | altoyield/python-beanieclient | 448b8dd328054eaf32dd7d0bdff700e603b5c27d | [
"Apache-2.0"
] | null | null | null | setup.py | altoyield/python-beanieclient | 448b8dd328054eaf32dd7d0bdff700e603b5c27d | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Beanie ERP API
An API specification for interacting with the Beanie ERP system # noqa: E501
OpenAPI spec version: 0.8
Contact: dev@bean.ie
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from setuptools import setup, find_packages # noqa: H301
NAME = "beanie-api"
VERSION = "0.8.1"
# To install the library, run the following
#
# python setup.py install
#
# prerequisite: setuptools
# http://pypi.python.org/pypi/setuptools
REQUIRES = ["urllib3 >= 1.15", "six >= 1.10", "certifi", "python-dateutil"]
setup(
name=NAME,
version=VERSION,
description="Beanie ERP API",
author_email="dev@bean.ie",
url="https://bean.ie/beanie-api",
keywords=["Swagger", "Beanie ERP API"],
install_requires=REQUIRES,
packages=find_packages(),
include_package_data=True,
long_description="""\
An API specification for interacting with the Beanie ERP system # noqa: E501
"""
)
| 23.707317 | 81 | 0.680041 |
6d623370b4b071249f1e204969c9e6122c713aad | 5,772 | py | Python | lib/utils/filelock.py | modulus-sa/ganeti | 592c0e945cc2c7b0013f813ea8c9d8ec0d5bab98 | [
"BSD-2-Clause"
] | 396 | 2015-01-22T11:44:32.000Z | 2022-03-31T14:14:29.000Z | lib/utils/filelock.py | modulus-sa/ganeti | 592c0e945cc2c7b0013f813ea8c9d8ec0d5bab98 | [
"BSD-2-Clause"
] | 1,550 | 2015-04-05T09:53:50.000Z | 2022-03-28T17:42:20.000Z | lib/utils/filelock.py | modulus-sa/ganeti | 592c0e945cc2c7b0013f813ea8c9d8ec0d5bab98 | [
"BSD-2-Clause"
] | 119 | 2015-01-06T21:37:15.000Z | 2022-03-07T06:36:26.000Z | #
#
# Copyright (C) 2006, 2007, 2010, 2011 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Utility functions for file-based locks.
"""
import fcntl
import errno
import os
import logging
from ganeti import errors
from ganeti.utils import retry
def LockFile(fd):
"""Locks a file using POSIX locks.
@type fd: int
@param fd: the file descriptor we need to lock
"""
try:
fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError as err:
if err.errno == errno.EAGAIN:
raise errors.LockError("File already locked")
raise
class FileLock(object):
"""Utility class for file locks.
"""
def __init__(self, fd, filename):
"""Constructor for FileLock.
@type fd: file
@param fd: File object
@type filename: str
@param filename: Path of the file opened at I{fd}
"""
self.fd = fd
self.filename = filename
@classmethod
def Open(cls, filename):
"""Creates and opens a file to be used as a file-based lock.
@type filename: string
@param filename: path to the file to be locked
"""
# Using "os.open" is necessary to allow both opening existing file
# read/write and creating if not existing. Vanilla "open" will truncate an
# existing file -or- allow creating if not existing.
_flags = os.O_RDWR | os.O_CREAT
return cls(os.fdopen(os.open(filename, _flags, 0o664), "w+"), filename)
def __del__(self):
self.Close()
def Close(self):
"""Close the file and release the lock.
"""
if hasattr(self, "fd") and self.fd:
self.fd.close()
self.fd = None
def _flock(self, flag, blocking, timeout, errmsg):
"""Wrapper for fcntl.flock.
@type flag: int
@param flag: operation flag
@type blocking: bool
@param blocking: whether the operation should be done in blocking mode.
@type timeout: None or float
@param timeout: for how long the operation should be retried (implies
non-blocking mode).
@type errmsg: string
@param errmsg: error message in case operation fails.
"""
assert self.fd, "Lock was closed"
assert timeout is None or timeout >= 0, \
"If specified, timeout must be positive"
assert not (flag & fcntl.LOCK_NB), "LOCK_NB must not be set"
# When a timeout is used, LOCK_NB must always be set
if not (timeout is None and blocking):
flag |= fcntl.LOCK_NB
if timeout is None:
self._Lock(self.fd, flag, timeout)
else:
try:
retry.Retry(self._Lock, (0.1, 1.2, 1.0), timeout,
args=(self.fd, flag, timeout))
except retry.RetryTimeout:
raise errors.LockError(errmsg)
@staticmethod
def _Lock(fd, flag, timeout):
try:
fcntl.flock(fd, flag)
except IOError as err:
if timeout is not None and err.errno == errno.EAGAIN:
raise retry.RetryAgain()
logging.exception("fcntl.flock failed")
raise
def Exclusive(self, blocking=False, timeout=None):
"""Locks the file in exclusive mode.
@type blocking: boolean
@param blocking: whether to block and wait until we
can lock the file or return immediately
@type timeout: int or None
@param timeout: if not None, the duration to wait for the lock
(in blocking mode)
"""
self._flock(fcntl.LOCK_EX, blocking, timeout,
"Failed to lock %s in exclusive mode" % self.filename)
def Shared(self, blocking=False, timeout=None):
"""Locks the file in shared mode.
@type blocking: boolean
@param blocking: whether to block and wait until we
can lock the file or return immediately
@type timeout: int or None
@param timeout: if not None, the duration to wait for the lock
(in blocking mode)
"""
self._flock(fcntl.LOCK_SH, blocking, timeout,
"Failed to lock %s in shared mode" % self.filename)
def Unlock(self, blocking=True, timeout=None):
"""Unlocks the file.
According to C{flock(2)}, unlocking can also be a nonblocking
operation::
To make a non-blocking request, include LOCK_NB with any of the above
operations.
@type blocking: boolean
@param blocking: whether to block and wait until we
can lock the file or return immediately
@type timeout: int or None
@param timeout: if not None, the duration to wait for the lock
(in blocking mode)
"""
self._flock(fcntl.LOCK_UN, blocking, timeout,
"Failed to unlock %s" % self.filename)
| 30.539683 | 78 | 0.682259 |
0dee57a434e4bbccce19541392a8863fb61bd3cf | 2,516 | py | Python | pylib/serialcomm.py | jheidel/ece4760-lab5 | c1ab262e40c22ee77e54067fe6dbfa2b7ee78c89 | [
"MIT"
] | 4 | 2016-02-05T08:04:17.000Z | 2021-08-30T15:44:46.000Z | pylib/serialcomm.py | jheidel/ece4760-lab5 | c1ab262e40c22ee77e54067fe6dbfa2b7ee78c89 | [
"MIT"
] | null | null | null | pylib/serialcomm.py | jheidel/ece4760-lab5 | c1ab262e40c22ee77e54067fe6dbfa2b7ee78c89 | [
"MIT"
] | 4 | 2016-02-05T08:04:18.000Z | 2019-06-04T15:48:49.000Z | from threading import Thread, Event, Condition
from multiprocessing import Process
from ctypes import *
from pylib.log import with_logging
from time import sleep, time
try:
clib = CDLL("clib/clib.so")
except:
print "C shared library missing!"
print "Did you forget to \"make\"?"
sys.exit(1)
@with_logging
class SerialComm(Thread):
def __init__(self, port):
Thread.__init__(self)
self.log.info("Starting serial communications on port %s" % port)
ret = clib.init_serial(port)
if ret != 0:
raise Exception("SERIAL LIBRARY RETURNED %d" % ret)
clib.scanner_init()
self.kill = Event()
self.c = Condition()
self.frame = None
self.set_pps(800) #default PPS
def set_point(self, x, y, blank):
"""
Set point for the laser (x,y) with blank indicating whether the
laser is blanked or not
"""
#self.log.info("Setting point %d, %d" % (x,y))
ret = clib.serial_new_point(x, y, blank)
if ret != 0:
raise Exception("SERIAL LIBRARY RETURNED %d" % ret)
#Conversion from ILDA point space to laser coordinates
def set_ilda_point(self, x, y, blank):
def map_pt(p):
#return (p + 2**15) / 16
return int((((math.asin((float(p) / 2**15)) / math.asin(1.0)) + 1) / 2) * 4096)
self.set_point(map_pt(x), map_pt(y), blank)
def stop(self):
clib.scanner_stop()
sleep(2 * 1.0 / self.pps)
self.set_point(0,0,True)
def set_frame(self, frame):
"""
Sets a new ilda frame for the laser projector to scan through
None indicates that scanning should stop and the laser should be blanked
"""
if frame is not None:
if frame != self.frame:
self.frame = frame
#self.log.debug("Frame has %d points" % frame.get_length())
clib.new_point_set(frame.get_length())
for i,pt in enumerate(frame.get_mapped_points()):
clib.set_point_by_index(i, pt["x"], pt["y"], pt["blank"])
clib.activate_point_set()
else:
self.set_point(0,0,True)
def set_pps(self, pps):
"""
Sets a new scanning speed for the ilda frame
"""
self.pps = pps
self.log.info("Settings laser to %d PPS" % pps)
clib.scanner_set_pps(pps)
def run(self):
pass
| 30.313253 | 91 | 0.564388 |
a506d292c8099e0edfa24a839f59adbb50a38f79 | 951 | py | Python | docs/examples/compute/profitbricks/create_volume.py | zimventures/libcloud | be0765df384f1baccde24539156119856cb96816 | [
"Apache-2.0"
] | 1,435 | 2015-01-07T05:32:51.000Z | 2022-03-25T19:39:34.000Z | docs/examples/compute/profitbricks/create_volume.py | zimventures/libcloud | be0765df384f1baccde24539156119856cb96816 | [
"Apache-2.0"
] | 1,158 | 2015-01-04T18:08:42.000Z | 2022-03-24T14:34:57.000Z | docs/examples/compute/profitbricks/create_volume.py | zimventures/libcloud | be0765df384f1baccde24539156119856cb96816 | [
"Apache-2.0"
] | 832 | 2015-01-05T09:20:21.000Z | 2022-03-24T19:22:19.000Z | import os
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
cls = get_driver(Provider.PROFIT_BRICKS)
# Get ProfitBricks credentials from environment variables
pb_username = os.environ.get('PROFITBRICKS_USERNAME')
pb_password = os.environ.get('PROFITBRICKS_PASSWORD')
driver = cls(pb_username, pb_password)
datacenters = driver.ex_list_datacenters()
location = driver.ex_describe_location(ex_location_id='us/las')
datacenter = [dc for dc in datacenters if dc.extra['location'] == location.id]
images = driver.list_images(image_type='HDD')
image = [img for img in images if img.extra['location'] == location.id][0]
# Create a new SSD volume. Set `ex_type='HDD'` to create a HDD volume.
ssd_volume = driver.create_volume(
name='Example SSD volume',
size=100,
image=image,
ex_type='SSD',
ex_datacenter=datacenter[0],
ex_password='PuTSoMeSTRONGPaSsWoRdHeRe2017'
)
print(ssd_volume)
| 31.7 | 78 | 0.768665 |
13d858dfd7b4a7ec6acb817bfb6f091a9a221a22 | 12,375 | py | Python | nltk/tag/perceptron.py | RedShiftCompany/nltk | 3bf1254da2567418e6168021a2b9f29bec582ab3 | [
"Apache-2.0"
] | null | null | null | nltk/tag/perceptron.py | RedShiftCompany/nltk | 3bf1254da2567418e6168021a2b9f29bec582ab3 | [
"Apache-2.0"
] | null | null | null | nltk/tag/perceptron.py | RedShiftCompany/nltk | 3bf1254da2567418e6168021a2b9f29bec582ab3 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# This module is a port of the Textblob Averaged Perceptron Tagger
# Author: Matthew Honnibal <honnibal+gh@gmail.com>,
# Long Duong <longdt219@gmail.com> (NLTK port)
# URL: <https://github.com/sloria/textblob-aptagger>
# <http://nltk.org/>
# Copyright 2013 Matthew Honnibal
# NLTK modifications Copyright 2015 The NLTK Project
#
# This module is provided under the terms of the MIT License.
import random
from collections import defaultdict
import pickle
import logging
from nltk.tag.api import TaggerI
from nltk.data import find, load
try:
import numpy as np
except ImportError:
pass
PICKLE = "averaged_perceptron_tagger.pickle"
class AveragedPerceptron(object):
"""An averaged perceptron, as implemented by Matthew Honnibal.
See more implementation details here:
https://explosion.ai/blog/part-of-speech-pos-tagger-in-python
"""
def __init__(self):
# Each feature gets its own weight vector, so weights is a dict-of-dicts
self.weights = {}
self.classes = set()
# The accumulated values, for the averaging. These will be keyed by
# feature/clas tuples
self._totals = defaultdict(int)
# The last time the feature was changed, for the averaging. Also
# keyed by feature/clas tuples
# (tstamps is short for timestamps)
self._tstamps = defaultdict(int)
# Number of instances seen
self.i = 0
def _softmax(self, scores):
s = np.fromiter(scores.values(), dtype=float)
exps = np.exp(s)
return exps / np.sum(exps)
def predict(self, features, return_conf=False):
"""Dot-product the features and current weights and return the best label."""
scores = defaultdict(float)
for feat, value in features.items():
if feat not in self.weights or value == 0:
continue
weights = self.weights[feat]
for label, weight in weights.items():
scores[label] += value * weight
# Do a secondary alphabetic sort, for stability
best_label = max(self.classes, key=lambda label: (scores[label], label))
# compute the confidence
conf = max(self._softmax(scores)) if return_conf == True else None
return best_label, conf
def update(self, truth, guess, features):
"""Update the feature weights."""
def upd_feat(c, f, w, v):
param = (f, c)
self._totals[param] += (self.i - self._tstamps[param]) * w
self._tstamps[param] = self.i
self.weights[f][c] = w + v
self.i += 1
if truth == guess:
return None
for f in features:
weights = self.weights.setdefault(f, {})
upd_feat(truth, f, weights.get(truth, 0.0), 1.0)
upd_feat(guess, f, weights.get(guess, 0.0), -1.0)
def average_weights(self):
"""Average weights from all iterations."""
for feat, weights in self.weights.items():
new_feat_weights = {}
for clas, weight in weights.items():
param = (feat, clas)
total = self._totals[param]
total += (self.i - self._tstamps[param]) * weight
averaged = round(total / self.i, 3)
if averaged:
new_feat_weights[clas] = averaged
self.weights[feat] = new_feat_weights
def save(self, path):
"""Save the pickled model weights."""
with open(path, "wb") as fout:
return pickle.dump(dict(self.weights), fout)
def load(self, path):
"""Load the pickled model weights."""
self.weights = load(path)
class PerceptronTagger(TaggerI):
"""
Greedy Averaged Perceptron tagger, as implemented by Matthew Honnibal.
See more implementation details here:
https://explosion.ai/blog/part-of-speech-pos-tagger-in-python
>>> from nltk.tag.perceptron import PerceptronTagger
Train the model
>>> tagger = PerceptronTagger(load=False)
>>> tagger.train([[('today','NN'),('is','VBZ'),('good','JJ'),('day','NN')],
... [('yes','NNS'),('it','PRP'),('beautiful','JJ')]])
>>> tagger.tag(['today','is','a','beautiful','day'])
[('today', 'NN'), ('is', 'PRP'), ('a', 'PRP'), ('beautiful', 'JJ'), ('day', 'NN')]
Use the pretrain model (the default constructor)
>>> pretrain = PerceptronTagger()
>>> pretrain.tag('The quick brown fox jumps over the lazy dog'.split())
[('The', 'DT'), ('quick', 'JJ'), ('brown', 'NN'), ('fox', 'NN'), ('jumps', 'VBZ'), ('over', 'IN'), ('the', 'DT'), ('lazy', 'JJ'), ('dog', 'NN')]
>>> pretrain.tag("The red cat".split())
[('The', 'DT'), ('red', 'JJ'), ('cat', 'NN')]
"""
START = ["-START-", "-START2-"]
END = ["-END-", "-END2-"]
def __init__(self, load=True):
"""
:param load: Load the pickled model upon instantiation.
"""
self.model = AveragedPerceptron()
self.tagdict = {}
self.classes = set()
if load:
AP_MODEL_LOC = "file:" + str(
find("taggers/averaged_perceptron_tagger/" + PICKLE)
)
self.load(AP_MODEL_LOC)
def tag(self, tokens, return_conf=False, use_tagdict=True):
"""
Tag tokenized sentences.
:params tokens: list of word
:type tokens: list(str)
"""
prev, prev2 = self.START
output = []
context = self.START + [self.normalize(w) for w in tokens] + self.END
for i, word in enumerate(tokens):
tag, conf = (
(self.tagdict.get(word), 1.0) if use_tagdict == True else (None, None)
)
if not tag:
features = self._get_features(i, word, context, prev, prev2)
tag, conf = self.model.predict(features, return_conf)
output.append((word, tag, conf) if return_conf == True else (word, tag))
prev2 = prev
prev = tag
return output
def train(self, sentences, save_loc=None, nr_iter=5):
"""Train a model from sentences, and save it at ``save_loc``. ``nr_iter``
controls the number of Perceptron training iterations.
:param sentences: A list or iterator of sentences, where each sentence
is a list of (words, tags) tuples.
:param save_loc: If not ``None``, saves a pickled model in this location.
:param nr_iter: Number of training iterations.
"""
# We'd like to allow ``sentences`` to be either a list or an iterator,
# the latter being especially important for a large training dataset.
# Because ``self._make_tagdict(sentences)`` runs regardless, we make
# it populate ``self._sentences`` (a list) with all the sentences.
# This saves the overheard of just iterating through ``sentences`` to
# get the list by ``sentences = list(sentences)``.
self._sentences = list() # to be populated by self._make_tagdict...
self._make_tagdict(sentences)
self.model.classes = self.classes
for iter_ in range(nr_iter):
c = 0
n = 0
for sentence in self._sentences:
words, tags = zip(*sentence)
prev, prev2 = self.START
context = self.START + [self.normalize(w) for w in words] + self.END
for i, word in enumerate(words):
guess = self.tagdict.get(word)
if not guess:
feats = self._get_features(i, word, context, prev, prev2)
guess, _ = self.model.predict(feats)
self.model.update(tags[i], guess, feats)
prev2 = prev
prev = guess
c += guess == tags[i]
n += 1
random.shuffle(self._sentences)
logging.info("Iter {0}: {1}/{2}={3}".format(iter_, c, n, _pc(c, n)))
# We don't need the training sentences anymore, and we don't want to
# waste space on them when we pickle the trained tagger.
self._sentences = None
self.model.average_weights()
# Pickle as a binary file
if save_loc is not None:
with open(save_loc, "wb") as fout:
# changed protocol from -1 to 2 to make pickling Python 2 compatible
pickle.dump((self.model.weights, self.tagdict, self.classes), fout, 2)
def load(self, loc):
"""
:param loc: Load a pickled model at location.
:type loc: str
"""
self.model.weights, self.tagdict, self.classes = load(loc)
self.model.classes = self.classes
def normalize(self, word):
"""
Normalization used in pre-processing.
- All words are lower cased
- Groups of digits of length 4 are represented as !YEAR;
- Other digits are represented as !DIGITS
:rtype: str
"""
if "-" in word and word[0] != "-":
return "!HYPHEN"
elif word.isdigit() and len(word) == 4:
return "!YEAR"
elif word[0].isdigit():
return "!DIGITS"
else:
return word.lower()
def _get_features(self, i, word, context, prev, prev2):
"""Map tokens into a feature representation, implemented as a
{hashable: int} dict. If the features change, a new model must be
trained.
"""
def add(name, *args):
features[" ".join((name,) + tuple(args))] += 1
i += len(self.START)
features = defaultdict(int)
# It's useful to have a constant feature, which acts sort of like a prior
add("bias")
add("i suffix", word[-3:])
add("i pref1", word[0])
add("i-1 tag", prev)
add("i-2 tag", prev2)
add("i tag+i-2 tag", prev, prev2)
add("i word", context[i])
add("i-1 tag+i word", prev, context[i])
add("i-1 word", context[i - 1])
add("i-1 suffix", context[i - 1][-3:])
add("i-2 word", context[i - 2])
add("i+1 word", context[i + 1])
add("i+1 suffix", context[i + 1][-3:])
add("i+2 word", context[i + 2])
return features
def _make_tagdict(self, sentences):
"""
Make a tag dictionary for single-tag words.
:param sentences: A list of list of (word, tag) tuples.
"""
counts = defaultdict(lambda: defaultdict(int))
for sentence in sentences:
self._sentences.append(sentence)
for word, tag in sentence:
counts[word][tag] += 1
self.classes.add(tag)
freq_thresh = 20
ambiguity_thresh = 0.97
for word, tag_freqs in counts.items():
tag, mode = max(tag_freqs.items(), key=lambda item: item[1])
n = sum(tag_freqs.values())
# Don't add rare words to the tag dictionary
# Only add quite unambiguous words
if n >= freq_thresh and (mode / n) >= ambiguity_thresh:
self.tagdict[word] = tag
def _pc(n, d):
return (n / d) * 100
def _load_data_conll_format(filename):
print("Read from file: ", filename)
with open(filename, "rb") as fin:
sentences = []
sentence = []
for line in fin.readlines():
line = line.strip()
# print line
if len(line) == 0:
sentences.append(sentence)
sentence = []
continue
tokens = line.split("\t")
word = tokens[1]
tag = tokens[4]
sentence.append((word, tag))
return sentences
def _get_pretrain_model():
# Train and test on English part of ConLL data (WSJ part of Penn Treebank)
# Train: section 2-11
# Test : section 23
tagger = PerceptronTagger()
training = _load_data_conll_format("english_ptb_train.conll")
testing = _load_data_conll_format("english_ptb_test.conll")
print("Size of training and testing (sentence)", len(training), len(testing))
# Train and save the model
tagger.train(training, PICKLE)
print("Accuracy : ", tagger.evaluate(testing))
if __name__ == "__main__":
# _get_pretrain_model()
pass
| 35.458453 | 148 | 0.567919 |
b26744e4deea27c27acaa3c66eb2e9e1aaeac771 | 40,432 | py | Python | legtool/tabs/gait_tab.py | jpieper/legtool | ab3946051bd16817b61d3073ce7be8bd27af90d0 | [
"Apache-2.0"
] | 10 | 2015-09-23T19:28:06.000Z | 2021-04-27T02:32:27.000Z | legtool/tabs/gait_tab.py | jpieper/legtool | ab3946051bd16817b61d3073ce7be8bd27af90d0 | [
"Apache-2.0"
] | null | null | null | legtool/tabs/gait_tab.py | jpieper/legtool | ab3946051bd16817b61d3073ce7be8bd27af90d0 | [
"Apache-2.0"
] | 9 | 2015-10-16T07:26:18.000Z | 2021-01-13T07:18:35.000Z | # Copyright 2014 Josh Pieper, jjp@pobox.com.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import numpy
import time
import sys
import trollius as asyncio
from trollius import From, Task
import PySide.QtCore as QtCore
import PySide.QtGui as QtGui
from ..tf import tf
from ..servo import selector
from ..gait import ripple
from . import convexhull
from . import settings
from .common import BoolContext
from . import graphics_scene
PLAYBACK_TIMEOUT_MS = 40
class GaitGeometryDisplay(object):
FRAME_ROBOT, FRAME_WORLD, FRAME_BODY = range(3)
PROJECTION_XY, PROJECTION_YZ, PROJECTION_XZ = range(3)
def __init__(self, ui):
self.graphics_scene = QtGui.QGraphicsScene()
self.graphics_view = ui.gaitGeometryView
self.graphics_view.setScene(self.graphics_scene)
self.graphics_view.setTransform(QtGui.QTransform().scale(1, -1))
self.config = None
self.state = None
self.frame = self.FRAME_ROBOT
self.projection = self.PROJECTION_XY
self.scale = 300.0
self.axes_item = graphics_scene.AxesItem(true_scale=True, grid_skip=2)
self.axes_item.x_scale = self.scale
self.axes_item.y_scale = self.scale
self.axes_item.x_suffix = 'mm'
self.axes_item.y_suffix = 'mm'
self.graphics_scene.addItem(self.axes_item)
self.support_poly = QtGui.QGraphicsPolygonItem()
self.support_poly.setPen(QtGui.QColor(0, 0, 0, 0))
self.support_poly.setBrush(QtGui.QBrush(QtGui.QColor(0, 128, 0, 15)))
self.graphics_scene.addItem(self.support_poly)
self.items = []
def resize(self):
self.graphics_view.fitInView(-self.scale, -self.scale,
2 * self.scale, 2 * self.scale,
QtCore.Qt.KeepAspectRatio)
def set_view(self, frame, projection, scale):
self.frame = frame
self.projection = projection
self.scale = scale
self.axes_item.x_scale = scale
self.axes_item.y_scale = scale
if self.config is not None and self.state is not None:
self.set_state(self.state)
self.axes_item.update()
def set_gait_config(self, config):
assert config is not None
self.config = config
for item in self.items:
self.graphics_scene.removeItem(item)
self.items = []
# Things to render:
# * Body position
# * Body CoG
# * Shoulder positions
# * Leg positions
# * Stability polygon
#
# Nice to have: Coordinate axes and scale labels.
body_poly = QtGui.QPolygonF([
QtCore.QPointF(-10.0, -10.0),
QtCore.QPointF(-10.0, 10.0),
QtCore.QPointF(10.0, 10.0),
QtCore.QPointF(10.0, -10.0)])
self.body = self.graphics_scene.addPolygon(
body_poly, QtGui.QPen(QtCore.Qt.black),
QtGui.QBrush(QtCore.Qt.red))
self.body.setFlags(QtGui.QGraphicsItem.ItemIgnoresTransformations)
self.items.append(self.body)
self.cog = self.graphics_scene.addEllipse(
-5, -5, 10, 10,
QtGui.QPen(QtCore.Qt.black),
QtGui.QBrush(QtCore.Qt.yellow))
self.cog.setFlags(QtGui.QGraphicsItem.ItemIgnoresTransformations)
self.items.append(self.cog)
self.shoulders = {}
self.legs = {}
shoulder_poly = QtGui.QPolygonF([
QtCore.QPointF(-10, 0),
QtCore.QPointF(0, 10),
QtCore.QPointF(10, 0),
QtCore.QPointF(0, -10)])
for leg_num, leg in config.mechanical.leg_config.iteritems():
this_shoulder = self.graphics_scene.addPolygon(
shoulder_poly, QtGui.QPen(QtCore.Qt.black),
QtGui.QBrush(QtCore.Qt.blue))
this_shoulder.setFlags(
QtGui.QGraphicsItem.ItemIgnoresTransformations)
self.shoulders[leg_num] = this_shoulder
self.items.append(this_shoulder)
this_leg = self.graphics_scene.addEllipse(
-10, -10, 20, 20,
QtGui.QPen(QtCore.Qt.black),
QtGui.QBrush(QtCore.Qt.green))
this_leg.setFlags(QtGui.QGraphicsItem.ItemIgnoresTransformations)
self.legs[leg_num] = this_leg
self.items.append(this_leg)
def _project(self, point, frame):
target_frame = None
if self.frame == self.FRAME_ROBOT:
target_frame = self.state.robot_frame
elif self.frame == self.FRAME_WORLD:
target_frame = self.state.world_frame
elif self.frame == self.FRAME_BODY:
target_frame = self.state.body_frame
target_point = target_frame.map_from_frame(frame, point)
if self.projection == self.PROJECTION_XY:
return (target_point.x, target_point.y)
elif self.projection == self.PROJECTION_YZ:
return (target_point.y, target_point.z)
elif self.projection == self.PROJECTION_XZ:
return (target_point.x, target_point.z)
def set_state(self, state):
assert self.config is not None
self.state = state
stance_points = []
self.body.setPos(*self._project(tf.Point3D(), state.body_frame))
self.cog.setPos(*self._project(tf.Point3D(), state.cog_frame))
for leg_num, shoulder in self.shoulders.iteritems():
if leg_num not in state.legs:
continue
shoulder.setPos(*self._project(tf.Point3D(),
state.legs[leg_num].shoulder_frame))
for leg_num, leg_item in self.legs.iteritems():
if leg_num not in state.legs:
continue
leg = state.legs[leg_num]
point = self._project(leg.point, leg.frame)
leg_item.setPos(*point)
shoulder_point = leg.shoulder_frame.map_from_frame(
leg.frame, leg.point)
ik_result = leg.leg_ik.do_ik(shoulder_point)
if ik_result is None:
color = QtCore.Qt.red
elif leg.mode == ripple.STANCE:
color = QtCore.Qt.green
stance_points.append(point)
elif leg.mode == ripple.SWING:
color = QtCore.Qt.yellow
else:
assert False, 'unknown leg mode %d' % leg.mode
leg_item.setBrush(QtGui.QBrush(color))
if len(stance_points) >= 3:
self.support_poly.setVisible(True)
hull = convexhull.convexHull(stance_points)
poly = QtGui.QPolygonF([QtCore.QPointF(x, y) for x, y in hull])
self.support_poly.setPolygon(poly)
else:
self.support_poly.setVisible(False)
self.resize()
class GaitGraphDisplay(object):
def __init__(self, ui):
self.graphics_scene = QtGui.QGraphicsScene()
self.graphics_view = ui.gaitGraphView
self.graphics_view.setScene(self.graphics_scene)
self.phase_line = None
self.phase = 0.0
def resize(self):
self.fit_in_view()
def fit_in_view(self):
self.graphics_view.fitInView(QtCore.QRectF(-0.1, 0, 1.1, 1))
def set_phase(self, phase):
self.phase = phase
if self.phase_line:
pos = self.phase_line.pos()
self.phase_line.setPos(phase, pos.y())
def set_gait_graph(self, graph):
self.graphics_scene.clear()
leg_numbers = sorted(graph.leg.keys())
count = len(leg_numbers)
if count == 0:
return
self.graphics_scene.addRect(0., 0., 1., 1.)
self.phase_line = self.graphics_scene.addLine(0., 0., 0., 1.0)
y_offset = 0.0
y_size = 1.0 / count
for leg_number in leg_numbers:
label = self.graphics_scene.addSimpleText("%d" % leg_number)
label.setPos(-0.08, y_offset + 0.1 * y_size)
label.setFlag(QtGui.QGraphicsItem.ItemIgnoresTransformations)
self.graphics_scene.addLine(0, y_offset, 1.0, y_offset)
old_phase = 0.0
old_swing = False
for phase, mode in graph.leg[leg_number].sequence:
if mode == ripple.STANCE and old_swing:
# Render a black bar for this swing phase.
self.graphics_scene.addRect(
old_phase, y_offset + 0.1 * y_size,
phase - old_phase, 0.8 * y_size,
QtGui.QPen(),
QtGui.QBrush(QtCore.Qt.black))
old_phase = phase
old_swing = True if mode == ripple.SWING else False
y_offset += y_size
self.fit_in_view()
class CommandWidget(object):
ATTR_NAMES = ['translate_x_mm_s',
'translate_y_mm_s',
'rotate_deg_s',
'body_x_mm',
'body_y_mm',
'body_z_mm',
'body_pitch_deg',
'body_roll_deg',
'body_yaw_deg',]
ATTR_SUFFIXES = ['mm/s',
'mm/s',
'deg/s',
'mm',
'mm',
'mm',
'deg',
'deg',
'deg',]
def __init__(self, ui, command, command_change_callback):
self.ui = ui
self.parent_command = command
self.command_change_callback = command_change_callback
self.scales = [ 400.0, 400.0, 50.0,
100.0, 100.0, 100.0,
45.0, 45.0, 45.0 ]
self.update_lock = asyncio.Lock()
self.config = None
self.command = None
self.graphics_scene = graphics_scene.GraphicsScene()
self.graphics_scene.sceneMouseMoveEvent.connect(
self.handle_mouse_move)
self.graphics_scene.sceneMousePressEvent.connect(
self.handle_mouse_press)
self.graphics_view = self.ui.gaitCommandView
self.graphics_view.setTransform(QtGui.QTransform().scale(1, -1))
self.graphics_view.setScene(self.graphics_scene)
self.in_scale_changed = BoolContext()
for combo in [self.ui.commandXCombo,
self.ui.commandYCombo]:
combo.currentIndexChanged.connect(self.handle_axis_change)
for spin in [self.ui.commandXScaleSpin,
self.ui.commandYScaleSpin]:
spin.valueChanged.connect(self.handle_scale_change)
self.axes_item = graphics_scene.AxesItem()
self.graphics_scene.addItem(self.axes_item)
self.grid_count = 10
self.usable_rects = {}
for x in range(-self.grid_count + 1, self.grid_count):
for y in range(-self.grid_count + 1, self.grid_count):
self.usable_rects[(x, y)] = \
self.graphics_scene.addRect(
(x - 0.5) / self.grid_count,
(y - 0.5) / self.grid_count,
1.0 / self.grid_count, 1.0 / self.grid_count)
for rect in self.usable_rects.itervalues():
rect.setPen(QtGui.QPen(QtCore.Qt.NoPen))
rect.setZValue(-20)
def resize(self):
self.fit_in_view()
def fit_in_view(self):
self.graphics_view.fitInView(QtCore.QRectF(-1, -1, 2, 2))
def read_settings(self, config):
def set_combo(combo, name):
settings.restore_combo(config, 'gaitconfig', combo, name)
set_combo(self.ui.commandXCombo, 'command_x_axis')
set_combo(self.ui.commandYCombo, 'command_y_axis')
for index in range(len(self.scales)):
name = 'command_axis_scale_%d' % index
if config.has_option('gaitconfig', name):
self.scales[index] = config.getfloat('gaitconfig', name)
self.handle_axis_change()
def write_settings(self, config):
config.set('gaitconfig', 'command_x_axis',
self.ui.commandXCombo.currentText())
config.set('gaitconfig', 'command_y_axis',
self.ui.commandYCombo.currentText())
for index, value in enumerate(self.scales):
config.set('gaitconfig', 'command_axis_scale_%d' % index, value)
def handle_axis_change(self):
with self.in_scale_changed:
self.ui.commandXScaleSpin.setValue(self.x_scale())
self.ui.commandYScaleSpin.setValue(self.y_scale())
self.axes_item.x_scale = self.x_scale()
self.axes_item.y_scale = self.y_scale()
self.axes_item.x_suffix = self.ATTR_SUFFIXES[self.x_axis()]
self.axes_item.y_suffix = self.ATTR_SUFFIXES[self.y_axis()]
self.axes_item.update()
if self.config is not None:
self.update_allowable(self.config, self.command)
def handle_scale_change(self, value):
if self.in_scale_changed.value:
return
with self.in_scale_changed:
if self.x_axis() == self.y_axis():
self.ui.commandXScaleSpin.setValue(value)
self.ui.commandYScaleSpin.setValue(value)
self.scales[self.x_axis()] = self.ui.commandXScaleSpin.value()
self.scales[self.y_axis()] = self.ui.commandYScaleSpin.value()
self.axes_item.x_scale = self.x_scale()
self.axes_item.y_scale = self.y_scale()
self.axes_item.update()
if self.config is not None:
self.update_allowable(self.config, self.command)
def x_axis(self):
return self.ui.commandXCombo.currentIndex()
def y_axis(self):
return self.ui.commandYCombo.currentIndex()
def x_scale(self):
return self.scales[self.x_axis()]
def y_scale(self):
return self.scales[self.y_axis()]
def handle_mouse_move(self, cursor):
x_value = cursor.x() * self.x_scale()
y_value = cursor.y() * self.y_scale()
if self.x_axis() == self.y_axis():
x_value = y_value = 0.5 * (x_value + y_value)
setattr(self.parent_command,
self.ATTR_NAMES[self.x_axis()], x_value)
setattr(self.parent_command,
self.ATTR_NAMES[self.y_axis()], y_value)
self.command_change_callback()
def handle_mouse_press(self, cursor):
self.handle_mouse_move(cursor)
def update_allowable(self, config, command):
self.next_config = config.copy()
self.next_command = command.copy()
for (x, y), rect in self.usable_rects.iteritems():
old_brush = rect.brush()
old_color = old_brush.color()
rect.setBrush(QtGui.QBrush(QtGui.QColor(
old_color.red(),
old_color.green(),
old_color.blue(),
64)))
Task(self._really_update_allowable())
@asyncio.coroutine
def _really_update_allowable(self):
if self.update_lock.locked():
return
yield From(self.update_lock.acquire())
try:
while self.next_config is not None:
yield From(self.do_update_allowable())
finally:
self.update_lock.release()
@asyncio.coroutine
def do_update_allowable(self):
self.config = self.next_config
self.command = self.next_command
my_gait = ripple.RippleGait(self.next_config)
my_command = self.next_command.copy()
self.next_config = None
self.next_command = None
next_wait = time.time() + 0.5 * 0.001 * PLAYBACK_TIMEOUT_MS
for (x, y), rect in self.usable_rects.iteritems():
x_value = self.x_scale() * float(x) / self.grid_count
y_value = self.y_scale() * float(y) / self.grid_count
setattr(my_command, self.ATTR_NAMES[self.x_axis()], x_value)
setattr(my_command, self.ATTR_NAMES[self.y_axis()], y_value)
color = (0, 255, 0)
try:
my_gait.set_command(my_command)
actual_command = my_gait.command
actual_x_value = getattr(
actual_command, self.ATTR_NAMES[self.x_axis()])
actual_y_value = getattr(
actual_command, self.ATTR_NAMES[self.y_axis()])
if actual_x_value != x_value or actual_y_value != y_value:
color = (255, 255, 0)
except ripple.NotSupported:
color = (255, 0, 0)
rect.setBrush(QtGui.QBrush(QtGui.QColor(*color)))
if time.time() > next_wait:
yield From(asyncio.sleep(0.5 * 0.001 * PLAYBACK_TIMEOUT_MS))
if self.next_config is not None:
return
next_wait = time.time() + 0.5 * 0.001 * PLAYBACK_TIMEOUT_MS
class GaitTab(object):
(PLAYBACK_IDLE,
PLAYBACK_SINGLE,
PLAYBACK_REPEAT,
PLAYBACK_SLOW_REPEAT) = range(4)
def __init__(self, ui, ikconfig_tab, servo_tab):
self.ui = ui
self.ikconfig_tab = ikconfig_tab
self.servo_tab = servo_tab
self.current_command = None
self.next_state = None
self.playback_mode = self.PLAYBACK_IDLE
self.in_gait_changed = BoolContext()
self.in_number_changed = BoolContext()
self.in_command_changed = BoolContext()
self.ripple_config = ripple.RippleConfig()
self.ripple = ripple.RippleGait(self.ripple_config)
self.command = ripple.Command()
self.command_widget = CommandWidget(
ui, self.command, self.handle_widget_set_command)
self.current_states = []
self.gait_graph_display = GaitGraphDisplay(self.ui)
self.gait_geometry_display = GaitGeometryDisplay(self.ui)
self.ui.gaitLegList.currentItemChanged.connect(self.handle_leg_change)
for spin in [self.ui.mountingLegXSpin,
self.ui.mountingLegYSpin,
self.ui.mountingLegZSpin]:
spin.valueChanged.connect(self.handle_leg_data_change)
for spin in [self.ui.bodyCogXSpin,
self.ui.bodyCogYSpin,
self.ui.bodyCogZSpin,
self.ui.idlePositionXSpin,
self.ui.idlePositionYSpin,
self.ui.idlePositionZSpin,
self.ui.maxCycleTimeSpin,
self.ui.liftHeightSpin,
self.ui.swingPercentSpin,
self.ui.positionMarginSpin,
self.ui.bodyZOffsetSpin,
self.ui.staticCenterSpin,
self.ui.staticStableSpin,
self.ui.staticMarginSpin]:
spin.valueChanged.connect(self.handle_gait_config_change)
self.ui.staticEnableCheck.toggled.connect(
self.handle_gait_config_change)
self.ui.legOrderEdit.editingFinished.connect(
self.handle_leg_order_editing_finished)
self.command_spins = [
(self.ui.commandXSpin, 'translate_x_mm_s'),
(self.ui.commandYSpin, 'translate_y_mm_s'),
(self.ui.commandRotSpin, 'rotate_deg_s'),
(self.ui.commandBodyXSpin, 'body_x_mm'),
(self.ui.commandBodyYSpin, 'body_y_mm'),
(self.ui.commandBodyZSpin, 'body_z_mm'),
(self.ui.commandPitchSpin, 'body_pitch_deg'),
(self.ui.commandRollSpin, 'body_roll_deg'),
(self.ui.commandYawSpin, 'body_yaw_deg'),
]
for spin, _ in self.command_spins:
spin.valueChanged.connect(self.handle_command_change)
self.ui.commandResetButton.clicked.connect(self.handle_command_reset)
for combo in [self.ui.geometryFrameCombo,
self.ui.geometryProjectionCombo]:
combo.currentIndexChanged.connect(self.handle_geometry_change)
self.ui.geometryScaleSpin.valueChanged.connect(
self.handle_geometry_change)
self.ui.tabWidget.currentChanged.connect(self.handle_current_changed)
self.phase_step = 2.0 / self.ui.playbackPhaseSlider.maximum()
self.ui.playbackBeginCombo.currentIndexChanged.connect(
self.handle_playback_config_change)
self.ui.playbackPhaseSlider.valueChanged.connect(
self.handle_playback_phase_change)
for button, state in [
(self.ui.playbackSingleButton, self.PLAYBACK_SINGLE),
(self.ui.playbackRepeatButton, self.PLAYBACK_REPEAT),
(self.ui.playbackSlowRepeatButton, self.PLAYBACK_SLOW_REPEAT)]:
button.toggled.connect(
functools.partial(self.handle_playback_state_change, state))
self.playback_timer = QtCore.QTimer()
self.playback_timer.timeout.connect(self.handle_playback_timer)
def resizeEvent(self, event):
if self.ui.tabWidget.currentIndex() == 2:
self.gait_graph_display.resize()
self.gait_geometry_display.resize()
self.command_widget.resize()
def get_float_configs(self):
return [(self.ui.geometryScaleSpin, 'geometry_scale_mm'),
(self.ui.commandXSpin, 'command_x_mm_s'),
(self.ui.commandYSpin, 'command_y_mm_s'),
(self.ui.commandRotSpin, 'command_rot_deg_s'),
(self.ui.commandBodyXSpin, 'command_body_x_mm'),
(self.ui.commandBodyYSpin, 'command_body_y_mm'),
(self.ui.commandBodyZSpin, 'command_body_z_mm'),
(self.ui.commandPitchSpin, 'command_body_pitch_deg'),
(self.ui.commandRollSpin, 'command_body_roll_deg'),
(self.ui.commandYawSpin, 'command_body_yaw_deg'),
]
def string_to_leg_config(self, value):
assert isinstance(value, str)
fields = [float(x) for x in value.split(',')]
assert len(fields) >= 3
result = ripple.LegConfig()
result.mount_x_mm = fields[0]
result.mount_y_mm = fields[1]
result.mount_z_mm = fields[2]
return result
def leg_config_to_string(self, leg_config):
assert isinstance(leg_config, ripple.LegConfig)
return '%.2f,%.2f,%.2f' % (
leg_config.mount_x_mm,
leg_config.mount_y_mm,
leg_config.mount_z_mm)
def read_settings(self, config):
if not config.has_section('gaitconfig'):
return
class IkGetter(object):
def __init__(self, parent):
self.parent = parent
def __getitem__(self, index):
return self.parent.ikconfig_tab.get_leg_ik(index)
self.ripple_config = \
ripple.RippleConfig.read_settings(config, 'gaitconfig',
IkGetter(self))
with self.in_command_changed:
for spin, name in self.get_float_configs():
if config.has_option('gaitconfig', name):
spin.setValue(config.getfloat('gaitconfig', name))
def set_combo(combo, name):
settings.restore_combo(config, 'gaitconfig', combo, name)
set_combo(self.ui.geometryFrameCombo, 'geometry_frame')
set_combo(self.ui.geometryProjectionCombo, 'geometry_projection')
self.command_widget.read_settings(config)
self.update_ui_from_config()
self.handle_leg_change(self.ui.gaitLegList.currentItem())
self.handle_gait_config_change()
self.handle_geometry_change()
self.handle_command_change()
def update_ui_from_config(self):
with self.in_gait_changed:
c = self.ripple_config
m = c.mechanical
legs = m.leg_config.values()
l = legs[0] if len(legs) > 0 else ripple.LegConfig()
spins = [
(self.ui.bodyCogXSpin, m.body_cog_x_mm),
(self.ui.bodyCogYSpin, m.body_cog_y_mm),
(self.ui.bodyCogZSpin, m.body_cog_z_mm),
(self.ui.idlePositionXSpin, l.idle_x_mm),
(self.ui.idlePositionYSpin, l.idle_y_mm),
(self.ui.idlePositionZSpin, l.idle_z_mm),
(self.ui.maxCycleTimeSpin, c.max_cycle_time_s),
(self.ui.liftHeightSpin, c.lift_height_mm),
(self.ui.swingPercentSpin, c.swing_percent),
(self.ui.positionMarginSpin, c.position_margin_percent),
(self.ui.bodyZOffsetSpin, c.body_z_offset_mm),
(self.ui.staticCenterSpin, c.static_center_factor),
(self.ui.staticStableSpin, c.static_stable_factor),
(self.ui.staticMarginSpin, c.static_margin_mm),
]
for spin, value in spins:
spin.setValue(value)
self.ui.staticEnableCheck.setChecked(c.statically_stable)
self.ui.legOrderEdit.setText(c.str_leg_order(c.leg_order))
self.handle_leg_change(self.ui.gaitLegList.currentItem())
def write_settings(self, config):
self.ripple_config.write_settings(config, 'gaitconfig')
for spin, name in self.get_float_configs():
config.set('gaitconfig', name, spin.value())
config.set('gaitconfig', 'geometry_frame',
self.ui.geometryFrameCombo.currentText())
config.set('gaitconfig', 'geometry_projection',
self.ui.geometryProjectionCombo.currentText())
self.command_widget.write_settings(config)
def stringify_leg_order(self, data):
return ripple.RippleConfig.str_leg_order(data)
def validate_leg_order(self, data):
"""Accept data that is human entered. Return a string
containing a valid leg ordering, which will consist of a comma
separate list of leg numbers, or tuples of leg numbers."""
entered_values = []
try:
entered_values = ripple.RippleConfig.parse_leg_order(data)
except:
pass
required_legs = self.ripple_config.mechanical.leg_config.keys()
used_legs = {}
actual_legs = []
for group in entered_values:
if isinstance(group, int):
x = group
if x in required_legs and not x in used_legs:
actual_legs.append(x)
used_legs[x] = True
else:
next_tuple = ()
for x in group:
if x in required_legs and not x in used_legs:
next_tuple += (x,)
used_legs[x] = True
if len(next_tuple) > 1:
actual_legs.append(next_tuple)
elif len(next_tuple) == 1:
actual_legs.append(next_tuple[0])
for x in required_legs:
if not x in used_legs:
actual_legs.append(x)
return self.stringify_leg_order(actual_legs)
def handle_current_changed(self, index=2):
if index != 2:
# Make sure we're not still playing.
self.ui.playbackSingleButton.setChecked(False)
self.ui.playbackRepeatButton.setChecked(False)
self.ui.playbackSlowRepeatButton.setChecked(False)
if self.servo_tab.controller:
Task(self.servo_tab.controller.enable_power(
selector.POWER_BRAKE))
return
if self.servo_tab.controller:
Task(self.servo_tab.controller.enable_power(
selector.POWER_ENABLE))
# Update the leg list widget.
available_legs = self.ikconfig_tab.get_all_legs()
enabled_legs = set(self.ikconfig_tab.get_enabled_legs())
for leg_num in available_legs:
leg_str = str(leg_num)
if not self.ui.gaitLegList.findItems(
leg_str, QtCore.Qt.MatchExactly):
self.ui.gaitLegList.addItem(leg_str)
items = self.ui.gaitLegList.findItems(
leg_str, QtCore.Qt.MatchExactly)
item = items[0]
if leg_num in enabled_legs:
item.setFlags(QtCore.Qt.ItemIsEnabled |
QtCore.Qt.ItemIsSelectable)
else:
item.setFlags(0)
self.handle_leg_change(self.ui.gaitLegList.currentItem())
# Make sure that our configuration is fully up to date.
self.handle_gait_config_change()
self.command_widget.fit_in_view()
def handle_leg_change(self, current_item):
widgets = [self.ui.mountingLegXSpin,
self.ui.mountingLegYSpin,
self.ui.mountingLegZSpin]
if current_item is None:
[x.setEnabled(False) for x in widgets]
return
[x.setEnabled(True) for x in widgets]
with self.in_number_changed:
number = int(current_item.text())
leg_config = self.ripple_config.mechanical.leg_config.get(
number, ripple.LegConfig())
if leg_config.mount_x_mm is not None:
self.ui.mountingLegXSpin.setValue(leg_config.mount_x_mm)
if leg_config.mount_y_mm is not None:
self.ui.mountingLegYSpin.setValue(leg_config.mount_y_mm)
if leg_config.mount_z_mm is not None:
self.ui.mountingLegZSpin.setValue(leg_config.mount_z_mm)
self.handle_leg_data_change()
def handle_leg_data_change(self):
if self.in_number_changed.value:
return
number_item = self.ui.gaitLegList.currentItem()
if number_item is None:
return
number = int(number_item.text())
leg_config = self.ripple_config.mechanical.leg_config.get(
number, ripple.LegConfig())
leg_config.mount_x_mm = self.ui.mountingLegXSpin.value()
leg_config.mount_y_mm = self.ui.mountingLegYSpin.value()
leg_config.mount_z_mm = self.ui.mountingLegZSpin.value()
self.ripple_config.mechanical.leg_config[number] = leg_config
self.handle_gait_config_change()
def handle_gait_config_change(self):
if self.in_gait_changed.value:
return
# Put all of our GUI information into the RippleGait that
# needs to be.
mechanical = self.ripple_config.mechanical
for item_num in range(self.ui.gaitLegList.count()):
item = self.ui.gaitLegList.item(item_num)
leg_number = int(item.text())
if not (item.flags() & QtCore.Qt.ItemIsEnabled):
if leg_number in mechanical.leg_config:
del mechanical.leg_config[leg_number]
elif leg_number not in mechanical.leg_config:
mechanical.leg_config[leg_number] = ripple.LegConfig()
for leg_data in self.ripple_config.mechanical.leg_config.iteritems():
leg_number, leg_config = leg_data
leg_config.idle_x_mm = self.ui.idlePositionXSpin.value()
leg_config.idle_y_mm = self.ui.idlePositionYSpin.value()
leg_config.idle_z_mm = self.ui.idlePositionZSpin.value()
leg_config.leg_ik = self.ikconfig_tab.get_leg_ik(leg_number)
self.ripple_config.mechanical.body_cog_x_mm = \
self.ui.bodyCogXSpin.value()
self.ripple_config.mechanical.body_cog_y_mm = \
self.ui.bodyCogYSpin.value()
self.ripple_config.mechanical.body_cog_z_mm = \
self.ui.bodyCogZSpin.value()
self.ripple_config.max_cycle_time_s = self.ui.maxCycleTimeSpin.value()
self.ripple_config.lift_height_mm = self.ui.liftHeightSpin.value()
self.ripple_config.swing_percent = \
self.ui.swingPercentSpin.value()
self.ripple_config.position_margin_percent = \
self.ui.positionMarginSpin.value()
self.ripple_config.leg_order = \
ripple.RippleConfig.parse_leg_order(
self.validate_leg_order(self.ui.legOrderEdit.text()))
self.ripple_config.body_z_offset_mm = self.ui.bodyZOffsetSpin.value()
self.ripple_config.statically_stable = \
self.ui.staticEnableCheck.isChecked()
self.ripple_config.static_center_factor = \
self.ui.staticCenterSpin.value()
self.ripple_config.static_stable_factor = \
self.ui.staticStableSpin.value()
self.ripple_config.static_margin_mm = \
self.ui.staticMarginSpin.value()
self.ripple = ripple.RippleGait(self.ripple_config)
self.gait_geometry_display.set_gait_config(self.ripple_config)
self.update_gait_graph()
self.handle_playback_config_change()
self.update_allowable_commands()
def handle_leg_order_editing_finished(self):
self.ui.legOrderEdit.setText(self.validate_leg_order(
self.ui.legOrderEdit.text()))
self.handle_gait_config_change()
def update_gait_graph(self):
self.gait_graph_display.set_gait_graph(self.ripple.get_gait_graph())
def get_start_state(self):
begin_index = self.ui.playbackBeginCombo.currentIndex()
this_ripple = ripple.RippleGait(self.ripple_config)
if begin_index == 0: # Idle
begin_state = this_ripple.get_idle_state()
else:
begin_state = this_ripple.get_idle_state()
this_ripple.set_state(begin_state, self.command)
for x in range(int(1.0 / self.phase_step)):
begin_state = this_ripple.advance_phase(self.phase_step)
# When setting a state, we are required to be exactly
# zero. Verify that we are close enough to zero from a
# numerical perspective, then force it to be exactly zero.
assert abs(((begin_state.phase + 0.5) % 1.0) - 0.5) < 1e-4
begin_state.phase = 0.0
return begin_state
def handle_playback_config_change(self):
# Re-run the playback recording the state through an entire
# phase. Then make sure that the graphic state is current for
# the phase that is selected now.
try:
begin_state = self.get_start_state()
begin_state = self.ripple.set_state(begin_state, self.command)
except ripple.NotSupported:
# guess we can't change anything
self.ui.gaitOptionsBrowser.setText('command not possible')
return
self.current_states = (
[begin_state.copy()] +
[self.ripple.advance_phase(self.phase_step).copy()
for x in range(self.ui.playbackPhaseSlider.maximum())])
self.handle_playback_phase_change()
options = self.ripple.options
text = 'cycle_time: %.2fs\nservo_speed: %.1fdps' % (
options.cycle_time_s,
options.servo_speed_dps)
self.ui.gaitOptionsBrowser.setText(text)
def handle_playback_phase_change(self):
if self.playback_mode != self.PLAYBACK_IDLE:
return
self.update_phase(self.ui.playbackPhaseSlider.value() * self.phase_step)
def update_phase(self, phase):
# Update the current geometry rendering.
state = self.current_states[int(phase / self.phase_step)]
self.render_state(state)
def render_state(self, state):
# Render the phase line in the gait graph.
self.gait_graph_display.set_phase(state.phase % 1.0)
self.gait_geometry_display.set_state(state)
if self.servo_tab.controller:
try:
command = state.command_dict()
except ripple.NotSupported:
return
self.next_command = command
if (self.current_command is not None and
not self.current_command.done()):
return
self.current_command = Task(self.set_next_pose())
@asyncio.coroutine
def set_next_pose(self):
count = 0
while self.next_command is not None:
count += 1
command, self.next_command = self.next_command, None
yield From(self.servo_tab.controller.set_pose(
command,
pose_time=2 * 0.001 * PLAYBACK_TIMEOUT_MS))
def handle_geometry_change(self):
frame = [GaitGeometryDisplay.FRAME_ROBOT,
GaitGeometryDisplay.FRAME_WORLD,
GaitGeometryDisplay.FRAME_BODY][
self.ui.geometryFrameCombo.currentIndex()]
projection = [GaitGeometryDisplay.PROJECTION_XY,
GaitGeometryDisplay.PROJECTION_YZ,
GaitGeometryDisplay.PROJECTION_XZ][
self.ui.geometryProjectionCombo.currentIndex()]
self.gait_geometry_display.set_view(
frame, projection, self.ui.geometryScaleSpin.value())
def handle_command_change(self):
if self.in_command_changed.value:
return
with self.in_command_changed:
for spin, name in self.command_spins:
setattr(self.command, name, spin.value())
self.update_command()
self.update_allowable_commands()
def update_allowable_commands(self):
self.command_widget.update_allowable(self.ripple_config, self.command)
def handle_command_reset(self):
with self.in_command_changed:
for spin, _ in self.command_spins:
spin.setValue(0.0)
self.handle_command_change()
def handle_playback_state_change(self, state, checked):
if not checked:
# If nothing is checked, then stop playback.
if (not self.ui.playbackSingleButton.isChecked() and
not self.ui.playbackRepeatButton.isChecked() and
not self.ui.playbackSlowRepeatButton.isChecked()):
self.playback_timer.stop()
self.playback_mode = self.PLAYBACK_IDLE
self.handle_playback_config_change()
return
# Make sure everything else is unchecked.
if state != self.PLAYBACK_SINGLE:
self.ui.playbackSingleButton.setChecked(False)
if state != self.PLAYBACK_REPEAT:
self.ui.playbackRepeatButton.setChecked(False)
if state != self.PLAYBACK_SLOW_REPEAT:
self.ui.playbackSlowRepeatButton.setChecked(False)
# Otherwise, start the appropriate playback mode.
self.ripple.set_state(self.get_start_state(), self.command)
self.playback_mode = state
self.playback_timer.start(PLAYBACK_TIMEOUT_MS)
def handle_playback_timer(self):
if self.playback_mode == self.PLAYBACK_IDLE:
print "WARNING: Playback timer fired when idle."
return
old_phase = self.ripple.state.phase
advance = PLAYBACK_TIMEOUT_MS / 1000.0
if self.playback_mode == self.PLAYBACK_SLOW_REPEAT:
advance *= 0.1
state = self.ripple.advance_time(advance)
if (self.playback_mode == self.PLAYBACK_SINGLE and
state.phase < 0.5 and old_phase > 0.5):
self.ui.playbackSingleButton.setChecked(False)
return
self.render_state(state)
def handle_widget_set_command(self):
with self.in_command_changed:
for spin, name in self.command_spins:
spin.setValue(getattr(self.command, name))
self.update_command()
def update_command(self):
if self.playback_mode == self.PLAYBACK_IDLE:
# If we're idle, we can just update the phase list right away.
self.handle_playback_config_change()
else:
# Otherwise, just set the command on our gait and let
# playback do its thing.
self.ripple.set_command(self.command)
| 36.45807 | 80 | 0.608577 |
ca86916925e2940d7fc44bc91018986ceb5c1079 | 1,075 | py | Python | cl_inn/contrib/sites/migrations/0003_set_site_domain_and_name.py | Ibrahem3amer/pos_api | af758680c583b85f87ed2fba985b5d2955ebdb23 | [
"MIT"
] | null | null | null | cl_inn/contrib/sites/migrations/0003_set_site_domain_and_name.py | Ibrahem3amer/pos_api | af758680c583b85f87ed2fba985b5d2955ebdb23 | [
"MIT"
] | null | null | null | cl_inn/contrib/sites/migrations/0003_set_site_domain_and_name.py | Ibrahem3amer/pos_api | af758680c583b85f87ed2fba985b5d2955ebdb23 | [
"MIT"
] | null | null | null | """
To understand why this file is here, please read:
http://cookiecutter-django.readthedocs.io/en/latest/faq.html#why-is-there-a-django-contrib-sites-directory-in-cookiecutter-django
"""
from django.conf import settings
from django.db import migrations
def update_site_forward(apps, schema_editor):
"""Set site domain and name."""
Site = apps.get_model('sites', 'Site')
Site.objects.update_or_create(
id=settings.SITE_ID,
defaults={
'domain': 'cloudinn.com',
'name': 'cl_inn'
}
)
def update_site_backward(apps, schema_editor):
"""Revert site domain and name to default."""
Site = apps.get_model('sites', 'Site')
Site.objects.update_or_create(
id=settings.SITE_ID,
defaults={
'domain': 'example.com',
'name': 'example.com'
}
)
class Migration(migrations.Migration):
dependencies = [
('sites', '0002_alter_domain_unique'),
]
operations = [
migrations.RunPython(update_site_forward, update_site_backward),
]
| 25 | 129 | 0.64093 |
32696d7d6408a6de31313f7ff0436a8d0c94def5 | 269 | py | Python | ur_operator/config.py | brennerm/uptimerobot-operator | 25cdca5ec06e15d05ddd5ed4b13b20082fbb8889 | [
"MIT"
] | 47 | 2021-02-03T06:59:17.000Z | 2022-03-30T03:44:43.000Z | ur_operator/config.py | brennerm/uptimerobot-operator | 25cdca5ec06e15d05ddd5ed4b13b20082fbb8889 | [
"MIT"
] | 19 | 2021-02-02T23:26:56.000Z | 2021-11-09T12:13:42.000Z | ur_operator/config.py | brennerm/uptimerobot-operator | 25cdca5ec06e15d05ddd5ed4b13b20082fbb8889 | [
"MIT"
] | 2 | 2021-06-09T02:24:46.000Z | 2021-08-15T06:45:20.000Z | import os
class Config:
@property
def DISABLE_INGRESS_HANDLING(self):
return os.getenv('URO_DISABLE_INGRESS_HANDLING', 'False').lower() in ['true', '1']
@property
def UPTIMEROBOT_API_KEY(self):
return os.environ['UPTIMEROBOT_API_KEY']
| 24.454545 | 90 | 0.687732 |
4b97a3f9934f8f57c8419fa3ca58eb92a8546959 | 824 | py | Python | app/core/tests/test_commands.py | katalonac/recipe-app-api | e6347ec0aec0e1da1dc51e1c55e9d17b16a8d9f1 | [
"MIT"
] | null | null | null | app/core/tests/test_commands.py | katalonac/recipe-app-api | e6347ec0aec0e1da1dc51e1c55e9d17b16a8d9f1 | [
"MIT"
] | 3 | 2020-06-06T01:28:06.000Z | 2021-06-10T19:50:40.000Z | app/core/tests/test_commands.py | katalonac/recipe-app-api | e6347ec0aec0e1da1dc51e1c55e9d17b16a8d9f1 | [
"MIT"
] | null | null | null | from unittest.mock import patch
from django.core.management import call_command
from django.db.utils import OperationalError
from django.test import TestCase
class CommandTests(TestCase):
def test_wait_for_db_ready(self):
"""Test waiting for db when db is available"""
with patch('django.db.utils.ConnectionHandler.__getitem__') as gi:
gi.return_value = True
call_command('wait_for_db')
self.assertEqual(gi.call_count, 1)
@patch('time.sleep', return_value=True)
def test_wait_for_db(self, ts):
"""Test waiting for db"""
with patch('django.db.utils.ConnectionHandler.__getitem__') as gi:
gi.side_effect = [OperationalError] * 5 + [True]
call_command('wait_for_db')
self.assertEqual(gi.call_count, 6)
| 29.428571 | 74 | 0.674757 |
971ded76bed6277cca1a792de89daef4f37ea15a | 9,605 | py | Python | crichtonweb/release/widgets.py | bpluly/crichton | a2fa09c181ba1e44ee1aae7a57769e1778de7f3a | [
"Apache-2.0"
] | null | null | null | crichtonweb/release/widgets.py | bpluly/crichton | a2fa09c181ba1e44ee1aae7a57769e1778de7f3a | [
"Apache-2.0"
] | null | null | null | crichtonweb/release/widgets.py | bpluly/crichton | a2fa09c181ba1e44ee1aae7a57769e1778de7f3a | [
"Apache-2.0"
] | null | null | null | # Crichton, Admirable Source Configuration Management
# Copyright 2012 British Broadcasting Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
import re
# from django.forms.extras.widgets import SelectDateWidget
from django.forms import DateInput
from django.forms.widgets import Widget, Select, MultiWidget
from django.utils.safestring import mark_safe
__all__ = ('SelectTimeWidget', 'SplitSelectDateTimeWidget')
# Attempt to match many time formats:
# Example: "12:34:56 P.M." matches:
# ('12', '34', ':56', '56', 'P.M.', 'P', '.', 'M', '.')
# ('12', '34', ':56', '56', 'P.M.')
# Note that the colon ":" before seconds is optional, but only if seconds are omitted
time_pattern = r'(\d\d?):(\d\d)(:(\d\d))? *([aApP]\.?[mM]\.?)?$'
RE_TIME = re.compile(time_pattern)
# The following are just more readable ways to access re.matched groups:
HOURS = 0
MINUTES = 1
SECONDS = 3
MERIDIEM = 4
class SelectTimeWidget(Widget):
"""
A Widget that splits time input into <select> elements.
Allows form to show as 24hr: <hour>:<minute>:<second>, (default)
or as 12hr: <hour>:<minute>:<second> <am|pm>
Also allows user-defined increments for minutes/seconds
Modified from: http://djangosnippets.org/snippets/1202/
"""
hour_field = '%s_hour'
minute_field = '%s_minute'
second_field = '%s_second'
meridiem_field = '%s_meridiem'
twelve_hr = False # Default to 24hr.
def __init__(self, attrs=None, hour_step=None, minute_step=None, second_step=None, twelve_hr=False, show_seconds=True):
"""
hour_step, minute_step, second_step are optional step values for
for the range of values for the associated select element
"""
self.attrs = attrs or {}
self.show_seconds = show_seconds
if twelve_hr:
self.twelve_hr = True # Do 12hr (rather than 24hr)
self.meridiem_val = 'a.m.' # Default to Morning (A.M.)
if hour_step and twelve_hr:
self.hours = range(1,13,hour_step)
elif hour_step: # 24hr, with stepping.
self.hours = range(0,24,hour_step)
elif twelve_hr: # 12hr, no stepping
self.hours = range(1,13)
else: # 24hr, no stepping
self.hours = range(0,24)
if minute_step:
self.minutes = range(0,60,minute_step)
else:
self.minutes = range(0,60)
if second_step:
self.seconds = range(0,60,second_step)
else:
self.seconds = range(0,60)
def render(self, name, value, attrs=None):
try: # try to get time values from a datetime.time object (value)
hour_val, minute_val, second_val = value.hour, value.minute, value.second
if self.twelve_hr:
if hour_val >= 12:
self.meridiem_val = 'p.m.'
else:
self.meridiem_val = 'a.m.'
except AttributeError:
hour_val = minute_val = second_val = 0
if isinstance(value, basestring):
match = RE_TIME.match(value)
if match:
time_groups = match.groups();
hour_val = int(time_groups[HOURS]) % 24 # force to range(0-24)
minute_val = int(time_groups[MINUTES])
if time_groups[SECONDS] is None:
second_val = 0
else:
second_val = int(time_groups[SECONDS])
# check to see if meridiem was passed in
if time_groups[MERIDIEM] is not None:
self.meridiem_val = time_groups[MERIDIEM]
else: # otherwise, set the meridiem based on the time
if self.twelve_hr:
if hour_val >= 12:
self.meridiem_val = 'p.m.'
else:
self.meridiem_val = 'a.m.'
else:
self.meridiem_val = None
# If we're doing a 12-hr clock, there will be a meridiem value, so make sure the
# hours get printed correctly
if self.twelve_hr and self.meridiem_val:
if self.meridiem_val.lower().startswith('p') and hour_val > 12 and hour_val < 24:
hour_val = hour_val % 12
elif hour_val == 0:
hour_val = 12
output = []
if 'id' in self.attrs:
id_ = self.attrs['id']
else:
id_ = 'id_%s' % name
# For times to get displayed correctly, the values MUST be converted to unicode
# When Select builds a list of options, it checks against Unicode values
hour_val = u"%.2d" % hour_val
minute_val = u"%.2d" % minute_val
second_val = u"%.2d" % second_val
hour_choices = [("%.2d"%i, "%.2d"%i) for i in self.hours]
local_attrs = self.build_attrs(id=self.hour_field % id_)
select_html = Select(choices=hour_choices).render(self.hour_field % name, hour_val, local_attrs)
output.append(select_html)
minute_choices = [("%.2d"%i, "%.2d"%i) for i in self.minutes]
local_attrs['id'] = self.minute_field % id_
select_html = Select(choices=minute_choices).render(self.minute_field % name, minute_val, local_attrs)
output.append(select_html)
if self.show_seconds:
second_choices = [("%.2d"%i, "%.2d"%i) for i in self.seconds]
local_attrs['id'] = self.second_field % id_
select_html = Select(choices=second_choices).render(self.second_field % name, second_val, local_attrs)
output.append(select_html)
if self.twelve_hr:
# If we were given an initial value, make sure the correct meridiem gets selected.
if self.meridiem_val is not None and self.meridiem_val.startswith('p'):
meridiem_choices = [('p.m.','p.m.'), ('a.m.','a.m.')]
else:
meridiem_choices = [('a.m.','a.m.'), ('p.m.','p.m.')]
local_attrs['id'] = local_attrs['id'] = self.meridiem_field % id_
select_html = Select(choices=meridiem_choices).render(self.meridiem_field % name, self.meridiem_val, local_attrs)
output.append(select_html)
return mark_safe(u'\n'.join(output))
def id_for_label(self, id_):
return '%s_hour' % id_
id_for_label = classmethod(id_for_label)
def value_from_datadict(self, data, files, name):
# if there's not h:m:s data, assume zero:
h = data.get(self.hour_field % name, '00') # hour
m = data.get(self.minute_field % name, '00') # minute
s = data.get(self.second_field % name, '00') # second
if not h or not m or (self.show_seconds and not s):
return data.get(name, None)
meridiem = data.get(self.meridiem_field % name, None)
#NOTE: if meridiem is None, assume 24-hr
if meridiem is not None:
if meridiem.lower().startswith('p') and int(h) != 12:
h = '%.2d' % (int(h)+12)%24
elif meridiem.lower().startswith('a') and int(h) == 12:
h = '00'
return '%s:%s:%s' % (h, m, s)
class SplitSelectDateTimeWidget(MultiWidget):
"""
MultiWidget = A widget that is composed of multiple widgets.
This class combines SelectTimeWidget and SelectDateWidget so we have something
like SpliteDateTimeWidget (in django.forms.widgets), but with Select elements.
Modified from: http://djangosnippets.org/snippets/1206/
"""
def __init__(self, attrs=None, date_format=None, hour_step=None, minute_step=None, second_step=None, twelve_hr=None, show_seconds=True, show_labels=True):
""" pass all these parameters to their respective widget constructors..."""
widgets = (DateInput(attrs=attrs, format=date_format),
SelectTimeWidget(attrs=attrs, hour_step=hour_step, minute_step=minute_step, second_step=second_step, twelve_hr=twelve_hr, show_seconds=show_seconds))
super(SplitSelectDateTimeWidget, self).__init__(widgets, attrs)
self.show_labels = show_labels
def decompress(self, value):
if value:
return [value.date(), value.time().replace(microsecond=0)]
return [None, None]
def format_output(self, rendered_widgets):
"""
Given a list of rendered widgets (as strings), it inserts an HTML
linebreak between them and optionally adds labels.
Returns a string representing the HTML for the whole lot.
"""
if self.show_labels:
return u"Date: %s<br/>Time: %s" % (rendered_widgets[0], rendered_widgets[1])
else:
return u"<br/>".join(rendered_widgets)
# def value_from_datadict(self, data, files, name):
# values = super(SplitSelectDateTimeWidget, self).value_from_datadict(data, files, name)
# return mark_safe(u' '.join(values))
| 41.76087 | 168 | 0.603019 |
8bb864f877aeaaa619c40ddfa528ca1daab64f14 | 1,664 | py | Python | Ninja/Leetcode/482_License_Key_Formatting.py | cyandterry/Python-Study | b40e6c4db10da417e72247f61146f7570621106a | [
"MIT"
] | 61 | 2015-02-03T20:25:55.000Z | 2021-05-17T19:33:40.000Z | Ninja/Leetcode/482_License_Key_Formatting.py | cyandterry/Python-Study | b40e6c4db10da417e72247f61146f7570621106a | [
"MIT"
] | null | null | null | Ninja/Leetcode/482_License_Key_Formatting.py | cyandterry/Python-Study | b40e6c4db10da417e72247f61146f7570621106a | [
"MIT"
] | 37 | 2015-02-04T07:12:52.000Z | 2020-05-16T18:47:16.000Z | """
You are given a license key represented as a string S which consists only alphanumeric character and dashes. The string is separated into N+1 groups by N dashes.
Given a number K, we would want to reformat the strings such that each group contains exactly K characters, except for the first group which could be shorter than K, but still must contain at least one character. Furthermore, there must be a dash inserted between two groups and all lowercase letters should be converted to uppercase.
Given a non-empty string S and a number K, format the string according to the rules described above.
Example 1:
Input: S = "5F3Z-2e-9-w", K = 4
Output: "5F3Z-2E9W"
Explanation: The string S has been split into two parts, each part has 4 characters.
Note that the two extra dashes are not needed and can be removed.
Example 2:
Input: S = "2-5g-3-J", K = 2
Output: "2-5G-3J"
Explanation: The string S has been split into three parts, each part has 2 characters except the first part as it could be shorter as mentioned above.
Note:
1. The length of string S will not exceed 12,000, and K is a positive integer.
2. String S consists only of alphanumerical characters (a-z and/or A-Z and/or 0-9) and dashes(-).
3. String S is non-empty.
"""
class Solution:
def licenseKeyFormatting(self, S: str, K: int) -> str:
original = S.replace('-','').upper()
reformat_list = []
length = len(original)
for i in range(int(length/K) + 1):
current = original[max(0, length-(i+1)*K):length-i*K]
if current:
reformat_list.insert(0, current)
return '-'.join(reformat_list)
| 41.6 | 334 | 0.699519 |
5ba2d1883f0601ba1553e3955cb26406ca7949b7 | 2,658 | py | Python | nltkma/test/unit/test_rte_classify.py | aydtmiri/nltk-ma | 5d7dd01844ee063fc910a648948624b6a2dddaf9 | [
"Apache-2.0"
] | null | null | null | nltkma/test/unit/test_rte_classify.py | aydtmiri/nltk-ma | 5d7dd01844ee063fc910a648948624b6a2dddaf9 | [
"Apache-2.0"
] | null | null | null | nltkma/test/unit/test_rte_classify.py | aydtmiri/nltk-ma | 5d7dd01844ee063fc910a648948624b6a2dddaf9 | [
"Apache-2.0"
] | null | null | null | import pytest
from nltkma.corpus import rte as rte_corpus
from nltkma.classify.rte_classify import RTEFeatureExtractor, rte_features, rte_classifier
expected_from_rte_feature_extration = """
alwayson => True
ne_hyp_extra => 0
ne_overlap => 1
neg_hyp => 0
neg_txt => 0
word_hyp_extra => 3
word_overlap => 3
alwayson => True
ne_hyp_extra => 0
ne_overlap => 1
neg_hyp => 0
neg_txt => 0
word_hyp_extra => 2
word_overlap => 1
alwayson => True
ne_hyp_extra => 1
ne_overlap => 1
neg_hyp => 0
neg_txt => 0
word_hyp_extra => 1
word_overlap => 2
alwayson => True
ne_hyp_extra => 1
ne_overlap => 0
neg_hyp => 0
neg_txt => 0
word_hyp_extra => 6
word_overlap => 2
alwayson => True
ne_hyp_extra => 1
ne_overlap => 0
neg_hyp => 0
neg_txt => 0
word_hyp_extra => 4
word_overlap => 0
alwayson => True
ne_hyp_extra => 1
ne_overlap => 0
neg_hyp => 0
neg_txt => 0
word_hyp_extra => 3
word_overlap => 1
"""
class TestRTEClassifier:
# Test the feature extraction method.
def test_rte_feature_extraction(self):
pairs = rte_corpus.pairs(['rte1_dev.xml'])[:6]
test_output = [
"%-15s => %s" % (key, rte_features(pair)[key])
for pair in pairs
for key in sorted(rte_features(pair))
]
expected_output = expected_from_rte_feature_extration.strip().split('\n')
# Remove null strings.
expected_output = list(filter(None, expected_output))
assert test_output == expected_output
# Test the RTEFeatureExtractor object.
def test_feature_extractor_object(self):
rtepair = rte_corpus.pairs(['rte3_dev.xml'])[33]
extractor = RTEFeatureExtractor(rtepair)
assert extractor.hyp_words == {'member', 'China', 'SCO.'}
assert extractor.overlap('word') == set()
assert extractor.overlap('ne') == {'China'}
assert extractor.hyp_extra('word') == {'member'}
# Test the RTE classifier training.
def test_rte_classification_without_megam(self):
# Use a sample size for unit testing, since we
# don't need to fully train these classifiers
clf = rte_classifier('IIS', sample_N=100)
clf = rte_classifier('GIS', sample_N=100)
@pytest.mark.skip("Skipping tests with dependencies on MEGAM")
def test_rte_classification_with_megam(self):
nltk.config_megam('/usr/local/bin/megam')
clf = rte_classifier('megam', sample_N=100)
clf = rte_classifier('BFGS', sample_N=100)
| 28.580645 | 90 | 0.624153 |
87950ece82941a89383034c41dc1552efac3a0a5 | 1,703 | py | Python | schedule.py | bradgrantham/weather_fax | 0ad4c9e54a405afa65357804da7e7677e56f5cbf | [
"Apache-2.0"
] | null | null | null | schedule.py | bradgrantham/weather_fax | 0ad4c9e54a405afa65357804da7e7677e56f5cbf | [
"Apache-2.0"
] | null | null | null | schedule.py | bradgrantham/weather_fax | 0ad4c9e54a405afa65357804da7e7677e56f5cbf | [
"Apache-2.0"
] | null | null | null | import urllib2
import bisect
import time
# http://tgftp.nws.noaa.gov/fax/hfreyes.txt
stations = [
["New Orleans, LA", "http://tgftp.nws.noaa.gov/fax/hfgulf.txt"],
["Pt. Reyes, CA", "http://tgftp.nws.noaa.gov/fax/hfreyes.txt"],
["Honolulu, HI", "http://tgftp.nws.noaa.gov/fax/hfhi.txt"],
["Kodiak, AK", "http://tgftp.nws.noaa.gov/fax/hfak.txt"],
["Boston, Massachusetts", "http://tgftp.nws.noaa.gov/fax/hfmarsh.txt"],
]
for (name, url) in stations:
stream = urllib2.urlopen(url)
content = stream.read()
lines = content.split("\r\n")
last_time = 0
last_title = ""
schedule = []
for l in lines:
if len(l) >= 5 and l[4] == "/":
start1 = l[0:4]
start2 = l[5:9]
title = l[11:50]
if start1 != "----":
t = int(start1[0:2]) * 100 + int(start1[2:4])
schedule.append((t, title))
if t > last_time:
(last_time, last_title) = (t, title)
if start2 != "----":
t = int(start2[0:2]) * 100 + int(start2[2:4])
schedule.append((t, title))
if t > last_time:
(last_time, last_title) = (t, title)
schedule.append((0, last_title))
schedule.sort(key = lambda e : e[0])
gmt = time.gmtime()
now = gmt[3] * 100 + gmt[4]
which = bisect.bisect_left([t for (t, title) in schedule], now)
print "%s:" % name
print " current : %s started at %d" % (schedule[which - 1][1], schedule[which - 1][0])
print " next : %s starting at %d" % (schedule[which][1], schedule[which][0])
# for start, title in schedule:
# print start, title
| 30.963636 | 93 | 0.526718 |
e8b72bb007d60498713fe9b445da437929f32bad | 557 | py | Python | export_models.py | eudaimoniatech/microNER | 30ee51ee2360d8796844149bee6c47069d84c32a | [
"MIT"
] | null | null | null | export_models.py | eudaimoniatech/microNER | 30ee51ee2360d8796844149bee6c47069d84c32a | [
"MIT"
] | null | null | null | export_models.py | eudaimoniatech/microNER | 30ee51ee2360d8796844149bee6c47069d84c32a | [
"MIT"
] | 2 | 2022-01-13T21:42:39.000Z | 2022-01-14T10:01:45.000Z | # Since we were unable to reproduce the build of this Dockerfile, we had to export the models from the docker image uhhlt/microner:v0.1 on docker hub.
import tensorflow as tf
import scripts.ner as ner
def export_model(filename):
folder = "exported_models/"
n = ner.NerModel(filename)
n.model.save(folder+filename)
mjson = n.model.to_json()
f = open(folder+filename+".json", "w")
f.write(mjson)
f.close()
export_model("conll.h5")
export_model("germeval-conll.h5")
export_model("germeval.h5")
export_model("germeval-inner.h5")
| 27.85 | 150 | 0.721724 |
0560891d19d58b8d82714b22f25524cc155827a1 | 6,368 | py | Python | src/main/python/apache/thermos/testing/runner.py | jeremyvdw/aurora | fa9d83a7ef3a96c522884089a471bbb0bef74c48 | [
"Apache-2.0"
] | 479 | 2015-03-27T22:59:49.000Z | 2022-03-09T08:40:49.000Z | src/main/python/apache/thermos/testing/runner.py | jeremyvdw/aurora | fa9d83a7ef3a96c522884089a471bbb0bef74c48 | [
"Apache-2.0"
] | 69 | 2015-05-26T20:06:29.000Z | 2020-01-13T19:18:59.000Z | src/main/python/apache/thermos/testing/runner.py | jeremyvdw/aurora | fa9d83a7ef3a96c522884089a471bbb0bef74c48 | [
"Apache-2.0"
] | 226 | 2015-03-27T20:02:59.000Z | 2022-03-09T08:40:53.000Z | #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import atexit
import errno
import os
import shutil
import subprocess
import sys
import tempfile
import time
from thrift.TSerialization import deserialize as thrift_deserialize
from twitter.common.contextutil import environment_as, temporary_file
from apache.thermos.common.ckpt import CheckpointDispatcher
from apache.thermos.common.path import TaskPath
from apache.thermos.config.loader import ThermosTaskWrapper
from gen.apache.thermos.ttypes import RunnerState
class Runner(object):
RUN_JOB_SCRIPT = """
# this is a hack to process wheel nspkg declarations
import os, sys, site
for path in sys.path:
if path.endswith('.whl') and os.path.isdir(path):
site.addsitedir(path)
import os
import random
import sys
from twitter.common import log
from twitter.common.log.options import LogOptions
from apache.thermos.config.loader import ThermosConfigLoader
from apache.thermos.core.helper import TaskRunnerHelper
from apache.thermos.core.runner import TaskRunner, TaskRunnerUniversalHandler
from thrift.TSerialization import serialize as thrift_serialize
random.seed(%(random_seed)d)
log.init('runner_base')
LogOptions.set_disk_log_level('DEBUG')
task = ThermosConfigLoader.load_json('%(filename)s')
task = task.tasks()[0].task
success_rate=%(success_rate)d
class AngryHandler(TaskRunnerUniversalHandler):
def checkpoint(self, record):
if not self._runner._recovery:
if random.randint(0, 100) <= success_rate:
super(AngryHandler, self).checkpoint(record)
else:
sys.exit(1)
sandbox = os.path.join('%(sandbox)s', '%(task_id)s')
args = %(extra_task_runner_args)r
args['task_id'] = '%(task_id)s'
args['universal_handler'] = AngryHandler
runner = TaskRunner(task, '%(root)s', sandbox, **args)
runner.run()
with open('%(state_filename)s', 'w') as fp:
fp.write(thrift_serialize(runner.state))
"""
def __init__(self, task, success_rate=100, random_seed=31337, **extra_task_runner_args):
"""
task = Thermos task
portmap = port map
success_rate = success rate of writing checkpoint to disk
"""
self.task = task
with temporary_file(cleanup=False) as fp:
self.job_filename = fp.name
fp.write(ThermosTaskWrapper(task).to_json())
self.state_filename = tempfile.mktemp()
self.tempdir = tempfile.mkdtemp()
self.task_id = '%s-runner-base' % int(time.time() * 1000000)
self.sandbox = os.path.join(self.tempdir, 'sandbox')
self.extra_task_runner_args = extra_task_runner_args
self.cleaned = False
self.pathspec = TaskPath(root=self.tempdir, task_id=self.task_id)
self.script_filename = None
self.success_rate = success_rate
self.random_seed = random_seed
self._run_count = 0
@property
def pid(self):
return self.po.pid
@property
def root(self):
return self.tempdir
def run(self):
self._run_count += 1
atexit.register(self.cleanup)
if self.script_filename:
os.unlink(self.script_filename)
with temporary_file(cleanup=False) as fp:
self.script_filename = fp.name
fp.write(self.RUN_JOB_SCRIPT % {
'filename': self.job_filename,
'sandbox': self.sandbox,
'root': self.tempdir,
'task_id': self.task_id,
'state_filename': self.state_filename,
'success_rate': self.success_rate,
'random_seed': self.random_seed + self._run_count,
'extra_task_runner_args': self.extra_task_runner_args,
})
with environment_as(PYTHONPATH=os.pathsep.join(sys.path)):
self.po = subprocess.Popen([sys.executable, self.script_filename],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
try:
so, se = self.po.communicate()
except OSError as e:
if e.errno == errno.ECHILD:
so = se = 'Killed'
else:
raise
rc = self.po.returncode
if rc != 0:
if os.path.exists(self.job_filename):
with open(self.job_filename) as fp:
config = fp.read()
else:
config = 'Nonexistent!'
if 'THERMOS_DEBUG' in os.environ:
print("Runner failed!\n\n\nconfig:%s\n\n\nstdout:%s\n\n\nstderr:%s\n\n\n" % (
config, so, se))
try:
with open(self.state_filename, 'r') as fp:
self.state = thrift_deserialize(RunnerState(), fp.read())
except Exception as e:
if 'THERMOS_DEBUG' in os.environ:
print('Failed to load Runner state: %s' % e, file=sys.stderr)
self.state = RunnerState()
try:
self.reconstructed_state = CheckpointDispatcher.from_file(
self.pathspec.getpath('runner_checkpoint'))
except Exception as e:
print('Failed to replay checkpoint: %s' % e, file=sys.stderr)
self.reconstructed_state = None
self.initialized = True
return rc
def cleanup(self):
if not self.cleaned:
if hasattr(self, 'po'):
try:
self.po.kill()
except Exception as e:
print('Failed to kill runner: %s' % e, file=sys.stderr)
pass
os.unlink(self.job_filename)
os.unlink(self.script_filename)
if 'THERMOS_DEBUG' not in os.environ:
shutil.rmtree(self.tempdir, ignore_errors=True)
else:
print('Logs saved in %s' % self.tempdir)
self.cleaned = True
class RunnerTestBase(object):
@classmethod
def extra_task_runner_args(cls):
return dict(portmap=getattr(cls, 'portmap', {}))
@classmethod
def task(cls):
raise NotImplementedError
@classmethod
def setup_class(cls):
cls.runner = Runner(cls.task(), **cls.extra_task_runner_args())
cls.runner.run()
cls.state = cls.runner.state
@classmethod
def teardown_class(cls):
cls.runner.cleanup()
def test_runner_state_reconstruction(self):
assert self.state == self.runner.reconstructed_state
| 29.896714 | 90 | 0.696765 |
35226a09720f04f29e4bb7e8d62d263c2e23d78e | 58,395 | py | Python | test/engine/test_deprecations.py | daniel--/sqlalchemy | 78e598e3a5b8df7419a600c291f90260e598c9b7 | [
"MIT"
] | null | null | null | test/engine/test_deprecations.py | daniel--/sqlalchemy | 78e598e3a5b8df7419a600c291f90260e598c9b7 | [
"MIT"
] | null | null | null | test/engine/test_deprecations.py | daniel--/sqlalchemy | 78e598e3a5b8df7419a600c291f90260e598c9b7 | [
"MIT"
] | null | null | null | import re
import time
import sqlalchemy as tsa
from sqlalchemy import column
from sqlalchemy import create_engine
from sqlalchemy import engine_from_config
from sqlalchemy import event
from sqlalchemy import ForeignKey
from sqlalchemy import func
from sqlalchemy import inspect
from sqlalchemy import INT
from sqlalchemy import Integer
from sqlalchemy import literal
from sqlalchemy import MetaData
from sqlalchemy import pool
from sqlalchemy import select
from sqlalchemy import Sequence
from sqlalchemy import String
from sqlalchemy import testing
from sqlalchemy import text
from sqlalchemy import TypeDecorator
from sqlalchemy import VARCHAR
from sqlalchemy.engine.base import Engine
from sqlalchemy.interfaces import ConnectionProxy
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import engines
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing.engines import testing_engine
from sqlalchemy.testing.mock import call
from sqlalchemy.testing.mock import Mock
from sqlalchemy.testing.schema import Column
from sqlalchemy.testing.schema import Table
from sqlalchemy.testing.util import gc_collect
from sqlalchemy.testing.util import lazy_gc
from .test_parseconnect import mock_dbapi
tlengine = None
class SomeException(Exception):
pass
def _tlengine_deprecated():
return testing.expect_deprecated(
"The 'threadlocal' engine strategy is deprecated"
)
class TableNamesOrderByTest(fixtures.TestBase):
@testing.provide_metadata
def test_order_by_foreign_key(self):
Table(
"t1",
self.metadata,
Column("id", Integer, primary_key=True),
test_needs_acid=True,
)
Table(
"t2",
self.metadata,
Column("id", Integer, primary_key=True),
Column("t1id", Integer, ForeignKey("t1.id")),
test_needs_acid=True,
)
Table(
"t3",
self.metadata,
Column("id", Integer, primary_key=True),
Column("t2id", Integer, ForeignKey("t2.id")),
test_needs_acid=True,
)
self.metadata.create_all()
insp = inspect(testing.db)
with testing.expect_deprecated(
"The get_table_names.order_by parameter is deprecated "
):
tnames = insp.get_table_names(order_by="foreign_key")
eq_(tnames, ["t1", "t2", "t3"])
class CreateEngineTest(fixtures.TestBase):
def test_pool_threadlocal_from_config(self):
dbapi = mock_dbapi
config = {
"sqlalchemy.url": "postgresql://scott:tiger@somehost/test",
"sqlalchemy.pool_threadlocal": "false",
}
e = engine_from_config(config, module=dbapi, _initialize=False)
eq_(e.pool._use_threadlocal, False)
config = {
"sqlalchemy.url": "postgresql://scott:tiger@somehost/test",
"sqlalchemy.pool_threadlocal": "true",
}
with testing.expect_deprecated(
"The Pool.use_threadlocal parameter is deprecated"
):
e = engine_from_config(config, module=dbapi, _initialize=False)
eq_(e.pool._use_threadlocal, True)
class RecycleTest(fixtures.TestBase):
__backend__ = True
def test_basic(self):
with testing.expect_deprecated(
"The Pool.use_threadlocal parameter is deprecated"
):
engine = engines.reconnecting_engine(
options={"pool_threadlocal": True}
)
with testing.expect_deprecated(
r"The Engine.contextual_connect\(\) method is deprecated"
):
conn = engine.contextual_connect()
eq_(conn.execute(select([1])).scalar(), 1)
conn.close()
# set the pool recycle down to 1.
# we aren't doing this inline with the
# engine create since cx_oracle takes way
# too long to create the 1st connection and don't
# want to build a huge delay into this test.
engine.pool._recycle = 1
# kill the DB connection
engine.test_shutdown()
# wait until past the recycle period
time.sleep(2)
# can connect, no exception
with testing.expect_deprecated(
r"The Engine.contextual_connect\(\) method is deprecated"
):
conn = engine.contextual_connect()
eq_(conn.execute(select([1])).scalar(), 1)
conn.close()
class TLTransactionTest(fixtures.TestBase):
__requires__ = ("ad_hoc_engines",)
__backend__ = True
@classmethod
def setup_class(cls):
global users, metadata, tlengine
with _tlengine_deprecated():
tlengine = testing_engine(options=dict(strategy="threadlocal"))
metadata = MetaData()
users = Table(
"query_users",
metadata,
Column(
"user_id",
INT,
Sequence("query_users_id_seq", optional=True),
primary_key=True,
),
Column("user_name", VARCHAR(20)),
test_needs_acid=True,
)
metadata.create_all(tlengine)
def teardown(self):
tlengine.execute(users.delete()).close()
@classmethod
def teardown_class(cls):
tlengine.close()
metadata.drop_all(tlengine)
tlengine.dispose()
def setup(self):
# ensure tests start with engine closed
tlengine.close()
@testing.crashes(
"oracle", "TNS error of unknown origin occurs on the buildbot."
)
def test_rollback_no_trans(self):
with _tlengine_deprecated():
tlengine = testing_engine(options=dict(strategy="threadlocal"))
# shouldn't fail
tlengine.rollback()
tlengine.begin()
tlengine.rollback()
# shouldn't fail
tlengine.rollback()
def test_commit_no_trans(self):
with _tlengine_deprecated():
tlengine = testing_engine(options=dict(strategy="threadlocal"))
# shouldn't fail
tlengine.commit()
tlengine.begin()
tlengine.rollback()
# shouldn't fail
tlengine.commit()
def test_prepare_no_trans(self):
with _tlengine_deprecated():
tlengine = testing_engine(options=dict(strategy="threadlocal"))
# shouldn't fail
tlengine.prepare()
tlengine.begin()
tlengine.rollback()
# shouldn't fail
tlengine.prepare()
def test_connection_close(self):
"""test that when connections are closed for real, transactions
are rolled back and disposed."""
c = tlengine.contextual_connect()
c.begin()
assert c.in_transaction()
c.close()
assert not c.in_transaction()
def test_transaction_close(self):
c = tlengine.contextual_connect()
t = c.begin()
tlengine.execute(users.insert(), user_id=1, user_name="user1")
tlengine.execute(users.insert(), user_id=2, user_name="user2")
t2 = c.begin()
tlengine.execute(users.insert(), user_id=3, user_name="user3")
tlengine.execute(users.insert(), user_id=4, user_name="user4")
t2.close()
result = c.execute("select * from query_users")
assert len(result.fetchall()) == 4
t.close()
external_connection = tlengine.connect()
result = external_connection.execute("select * from query_users")
try:
assert len(result.fetchall()) == 0
finally:
c.close()
external_connection.close()
def test_rollback(self):
"""test a basic rollback"""
tlengine.begin()
tlengine.execute(users.insert(), user_id=1, user_name="user1")
tlengine.execute(users.insert(), user_id=2, user_name="user2")
tlengine.execute(users.insert(), user_id=3, user_name="user3")
tlengine.rollback()
external_connection = tlengine.connect()
result = external_connection.execute("select * from query_users")
try:
assert len(result.fetchall()) == 0
finally:
external_connection.close()
def test_commit(self):
"""test a basic commit"""
tlengine.begin()
tlengine.execute(users.insert(), user_id=1, user_name="user1")
tlengine.execute(users.insert(), user_id=2, user_name="user2")
tlengine.execute(users.insert(), user_id=3, user_name="user3")
tlengine.commit()
external_connection = tlengine.connect()
result = external_connection.execute("select * from query_users")
try:
assert len(result.fetchall()) == 3
finally:
external_connection.close()
def test_with_interface(self):
trans = tlengine.begin()
tlengine.execute(users.insert(), user_id=1, user_name="user1")
tlengine.execute(users.insert(), user_id=2, user_name="user2")
trans.commit()
trans = tlengine.begin()
tlengine.execute(users.insert(), user_id=3, user_name="user3")
trans.__exit__(Exception, "fake", None)
trans = tlengine.begin()
tlengine.execute(users.insert(), user_id=4, user_name="user4")
trans.__exit__(None, None, None)
eq_(
tlengine.execute(
users.select().order_by(users.c.user_id)
).fetchall(),
[(1, "user1"), (2, "user2"), (4, "user4")],
)
def test_commits(self):
connection = tlengine.connect()
assert (
connection.execute("select count(*) from query_users").scalar()
== 0
)
connection.close()
connection = tlengine.contextual_connect()
transaction = connection.begin()
connection.execute(users.insert(), user_id=1, user_name="user1")
transaction.commit()
transaction = connection.begin()
connection.execute(users.insert(), user_id=2, user_name="user2")
connection.execute(users.insert(), user_id=3, user_name="user3")
transaction.commit()
transaction = connection.begin()
result = connection.execute("select * from query_users")
rows = result.fetchall()
assert len(rows) == 3, "expected 3 got %d" % len(rows)
transaction.commit()
connection.close()
def test_rollback_off_conn(self):
# test that a TLTransaction opened off a TLConnection allows
# that TLConnection to be aware of the transactional context
conn = tlengine.contextual_connect()
trans = conn.begin()
conn.execute(users.insert(), user_id=1, user_name="user1")
conn.execute(users.insert(), user_id=2, user_name="user2")
conn.execute(users.insert(), user_id=3, user_name="user3")
trans.rollback()
external_connection = tlengine.connect()
result = external_connection.execute("select * from query_users")
try:
assert len(result.fetchall()) == 0
finally:
conn.close()
external_connection.close()
def test_morerollback_off_conn(self):
# test that an existing TLConnection automatically takes place
# in a TLTransaction opened on a second TLConnection
conn = tlengine.contextual_connect()
conn2 = tlengine.contextual_connect()
trans = conn2.begin()
conn.execute(users.insert(), user_id=1, user_name="user1")
conn.execute(users.insert(), user_id=2, user_name="user2")
conn.execute(users.insert(), user_id=3, user_name="user3")
trans.rollback()
external_connection = tlengine.connect()
result = external_connection.execute("select * from query_users")
try:
assert len(result.fetchall()) == 0
finally:
conn.close()
conn2.close()
external_connection.close()
def test_commit_off_connection(self):
conn = tlengine.contextual_connect()
trans = conn.begin()
conn.execute(users.insert(), user_id=1, user_name="user1")
conn.execute(users.insert(), user_id=2, user_name="user2")
conn.execute(users.insert(), user_id=3, user_name="user3")
trans.commit()
external_connection = tlengine.connect()
result = external_connection.execute("select * from query_users")
try:
assert len(result.fetchall()) == 3
finally:
conn.close()
external_connection.close()
def test_nesting_rollback(self):
"""tests nesting of transactions, rollback at the end"""
external_connection = tlengine.connect()
self.assert_(
external_connection.connection
is not tlengine.contextual_connect().connection
)
tlengine.begin()
tlengine.execute(users.insert(), user_id=1, user_name="user1")
tlengine.execute(users.insert(), user_id=2, user_name="user2")
tlengine.execute(users.insert(), user_id=3, user_name="user3")
tlengine.begin()
tlengine.execute(users.insert(), user_id=4, user_name="user4")
tlengine.execute(users.insert(), user_id=5, user_name="user5")
tlengine.commit()
tlengine.rollback()
try:
self.assert_(
external_connection.scalar("select count(*) from query_users")
== 0
)
finally:
external_connection.close()
def test_nesting_commit(self):
"""tests nesting of transactions, commit at the end."""
external_connection = tlengine.connect()
self.assert_(
external_connection.connection
is not tlengine.contextual_connect().connection
)
tlengine.begin()
tlengine.execute(users.insert(), user_id=1, user_name="user1")
tlengine.execute(users.insert(), user_id=2, user_name="user2")
tlengine.execute(users.insert(), user_id=3, user_name="user3")
tlengine.begin()
tlengine.execute(users.insert(), user_id=4, user_name="user4")
tlengine.execute(users.insert(), user_id=5, user_name="user5")
tlengine.commit()
tlengine.commit()
try:
self.assert_(
external_connection.scalar("select count(*) from query_users")
== 5
)
finally:
external_connection.close()
def test_mixed_nesting(self):
"""tests nesting of transactions off the TLEngine directly
inside of transactions off the connection from the TLEngine"""
external_connection = tlengine.connect()
self.assert_(
external_connection.connection
is not tlengine.contextual_connect().connection
)
conn = tlengine.contextual_connect()
trans = conn.begin()
trans2 = conn.begin()
tlengine.execute(users.insert(), user_id=1, user_name="user1")
tlengine.execute(users.insert(), user_id=2, user_name="user2")
tlengine.execute(users.insert(), user_id=3, user_name="user3")
tlengine.begin()
tlengine.execute(users.insert(), user_id=4, user_name="user4")
tlengine.begin()
tlengine.execute(users.insert(), user_id=5, user_name="user5")
tlengine.execute(users.insert(), user_id=6, user_name="user6")
tlengine.execute(users.insert(), user_id=7, user_name="user7")
tlengine.commit()
tlengine.execute(users.insert(), user_id=8, user_name="user8")
tlengine.commit()
trans2.commit()
trans.rollback()
conn.close()
try:
self.assert_(
external_connection.scalar("select count(*) from query_users")
== 0
)
finally:
external_connection.close()
def test_more_mixed_nesting(self):
"""tests nesting of transactions off the connection from the
TLEngine inside of transactions off the TLEngine directly."""
external_connection = tlengine.connect()
self.assert_(
external_connection.connection
is not tlengine.contextual_connect().connection
)
tlengine.begin()
connection = tlengine.contextual_connect()
connection.execute(users.insert(), user_id=1, user_name="user1")
tlengine.begin()
connection.execute(users.insert(), user_id=2, user_name="user2")
connection.execute(users.insert(), user_id=3, user_name="user3")
trans = connection.begin()
connection.execute(users.insert(), user_id=4, user_name="user4")
connection.execute(users.insert(), user_id=5, user_name="user5")
trans.commit()
tlengine.commit()
tlengine.rollback()
connection.close()
try:
self.assert_(
external_connection.scalar("select count(*) from query_users")
== 0
)
finally:
external_connection.close()
@testing.requires.savepoints
def test_nested_subtransaction_rollback(self):
tlengine.begin()
tlengine.execute(users.insert(), user_id=1, user_name="user1")
tlengine.begin_nested()
tlengine.execute(users.insert(), user_id=2, user_name="user2")
tlengine.rollback()
tlengine.execute(users.insert(), user_id=3, user_name="user3")
tlengine.commit()
tlengine.close()
eq_(
tlengine.execute(
select([users.c.user_id]).order_by(users.c.user_id)
).fetchall(),
[(1,), (3,)],
)
tlengine.close()
@testing.requires.savepoints
@testing.crashes(
"oracle+zxjdbc",
"Errors out and causes subsequent tests to " "deadlock",
)
def test_nested_subtransaction_commit(self):
tlengine.begin()
tlengine.execute(users.insert(), user_id=1, user_name="user1")
tlengine.begin_nested()
tlengine.execute(users.insert(), user_id=2, user_name="user2")
tlengine.commit()
tlengine.execute(users.insert(), user_id=3, user_name="user3")
tlengine.commit()
tlengine.close()
eq_(
tlengine.execute(
select([users.c.user_id]).order_by(users.c.user_id)
).fetchall(),
[(1,), (2,), (3,)],
)
tlengine.close()
@testing.requires.savepoints
def test_rollback_to_subtransaction(self):
tlengine.begin()
tlengine.execute(users.insert(), user_id=1, user_name="user1")
tlengine.begin_nested()
tlengine.execute(users.insert(), user_id=2, user_name="user2")
tlengine.begin()
tlengine.execute(users.insert(), user_id=3, user_name="user3")
tlengine.rollback()
tlengine.rollback()
tlengine.execute(users.insert(), user_id=4, user_name="user4")
tlengine.commit()
tlengine.close()
eq_(
tlengine.execute(
select([users.c.user_id]).order_by(users.c.user_id)
).fetchall(),
[(1,), (4,)],
)
tlengine.close()
def test_connections(self):
"""tests that contextual_connect is threadlocal"""
c1 = tlengine.contextual_connect()
c2 = tlengine.contextual_connect()
assert c1.connection is c2.connection
c2.close()
assert not c1.closed
assert not tlengine.closed
@testing.requires.independent_cursors
def test_result_closing(self):
"""tests that contextual_connect is threadlocal"""
r1 = tlengine.execute(select([1]))
r2 = tlengine.execute(select([1]))
row1 = r1.fetchone()
row2 = r2.fetchone()
r1.close()
assert r2.connection is r1.connection
assert not r2.connection.closed
assert not tlengine.closed
# close again, nothing happens since resultproxy calls close()
# only once
r1.close()
assert r2.connection is r1.connection
assert not r2.connection.closed
assert not tlengine.closed
r2.close()
assert r2.connection.closed
assert tlengine.closed
@testing.crashes(
"oracle+cx_oracle", "intermittent failures on the buildbot"
)
def test_dispose(self):
with _tlengine_deprecated():
eng = testing_engine(options=dict(strategy="threadlocal"))
result = eng.execute(select([1]))
eng.dispose()
eng.execute(select([1]))
@testing.requires.two_phase_transactions
def test_two_phase_transaction(self):
tlengine.begin_twophase()
tlengine.execute(users.insert(), user_id=1, user_name="user1")
tlengine.prepare()
tlengine.commit()
tlengine.begin_twophase()
tlengine.execute(users.insert(), user_id=2, user_name="user2")
tlengine.commit()
tlengine.begin_twophase()
tlengine.execute(users.insert(), user_id=3, user_name="user3")
tlengine.rollback()
tlengine.begin_twophase()
tlengine.execute(users.insert(), user_id=4, user_name="user4")
tlengine.prepare()
tlengine.rollback()
eq_(
tlengine.execute(
select([users.c.user_id]).order_by(users.c.user_id)
).fetchall(),
[(1,), (2,)],
)
class ConvenienceExecuteTest(fixtures.TablesTest):
__backend__ = True
@classmethod
def define_tables(cls, metadata):
cls.table = Table(
"exec_test",
metadata,
Column("a", Integer),
Column("b", Integer),
test_needs_acid=True,
)
def _trans_fn(self, is_transaction=False):
def go(conn, x, value=None):
if is_transaction:
conn = conn.connection
conn.execute(self.table.insert().values(a=x, b=value))
return go
def _trans_rollback_fn(self, is_transaction=False):
def go(conn, x, value=None):
if is_transaction:
conn = conn.connection
conn.execute(self.table.insert().values(a=x, b=value))
raise SomeException("breakage")
return go
def _assert_no_data(self):
eq_(
testing.db.scalar(
select([func.count("*")]).select_from(self.table)
),
0,
)
def _assert_fn(self, x, value=None):
eq_(testing.db.execute(self.table.select()).fetchall(), [(x, value)])
def test_transaction_tlocal_engine_ctx_commit(self):
fn = self._trans_fn()
with _tlengine_deprecated():
engine = engines.testing_engine(
options=dict(strategy="threadlocal", pool=testing.db.pool)
)
ctx = engine.begin()
testing.run_as_contextmanager(ctx, fn, 5, value=8)
self._assert_fn(5, value=8)
def test_transaction_tlocal_engine_ctx_rollback(self):
fn = self._trans_rollback_fn()
with _tlengine_deprecated():
engine = engines.testing_engine(
options=dict(strategy="threadlocal", pool=testing.db.pool)
)
ctx = engine.begin()
assert_raises_message(
Exception,
"breakage",
testing.run_as_contextmanager,
ctx,
fn,
5,
value=8,
)
self._assert_no_data()
def _proxy_execute_deprecated():
return (
testing.expect_deprecated("ConnectionProxy.execute is deprecated."),
testing.expect_deprecated(
"ConnectionProxy.cursor_execute is deprecated."
),
)
class ProxyConnectionTest(fixtures.TestBase):
"""These are the same tests as EngineEventsTest, except using
the deprecated ConnectionProxy interface.
"""
__requires__ = ("ad_hoc_engines",)
__prefer_requires__ = ("two_phase_transactions",)
@testing.uses_deprecated(r".*Use event.listen")
@testing.fails_on("firebird", "Data type unknown")
def test_proxy(self):
stmts = []
cursor_stmts = []
class MyProxy(ConnectionProxy):
def execute(
self, conn, execute, clauseelement, *multiparams, **params
):
stmts.append((str(clauseelement), params, multiparams))
return execute(clauseelement, *multiparams, **params)
def cursor_execute(
self,
execute,
cursor,
statement,
parameters,
context,
executemany,
):
cursor_stmts.append((str(statement), parameters, None))
return execute(cursor, statement, parameters, context)
def assert_stmts(expected, received):
for stmt, params, posn in expected:
if not received:
assert False, "Nothing available for stmt: %s" % stmt
while received:
teststmt, testparams, testmultiparams = received.pop(0)
teststmt = (
re.compile(r"[\n\t ]+", re.M)
.sub(" ", teststmt)
.strip()
)
if teststmt.startswith(stmt) and (
testparams == params or testparams == posn
):
break
with testing.expect_deprecated(
"ConnectionProxy.execute is deprecated.",
"ConnectionProxy.cursor_execute is deprecated.",
):
plain_engine = engines.testing_engine(
options=dict(implicit_returning=False, proxy=MyProxy())
)
with testing.expect_deprecated(
"ConnectionProxy.execute is deprecated.",
"ConnectionProxy.cursor_execute is deprecated.",
"The 'threadlocal' engine strategy is deprecated",
):
tl_engine = engines.testing_engine(
options=dict(
implicit_returning=False,
proxy=MyProxy(),
strategy="threadlocal",
)
)
for engine in (plain_engine, tl_engine):
m = MetaData(engine)
t1 = Table(
"t1",
m,
Column("c1", Integer, primary_key=True),
Column(
"c2",
String(50),
default=func.lower("Foo"),
primary_key=True,
),
)
m.create_all()
try:
t1.insert().execute(c1=5, c2="some data")
t1.insert().execute(c1=6)
eq_(
engine.execute("select * from t1").fetchall(),
[(5, "some data"), (6, "foo")],
)
finally:
m.drop_all()
engine.dispose()
compiled = [
("CREATE TABLE t1", {}, None),
(
"INSERT INTO t1 (c1, c2)",
{"c2": "some data", "c1": 5},
None,
),
("INSERT INTO t1 (c1, c2)", {"c1": 6}, None),
("select * from t1", {}, None),
("DROP TABLE t1", {}, None),
]
cursor = [
("CREATE TABLE t1", {}, ()),
(
"INSERT INTO t1 (c1, c2)",
{"c2": "some data", "c1": 5},
(5, "some data"),
),
("SELECT lower", {"lower_1": "Foo"}, ("Foo",)),
(
"INSERT INTO t1 (c1, c2)",
{"c2": "foo", "c1": 6},
(6, "foo"),
),
("select * from t1", {}, ()),
("DROP TABLE t1", {}, ()),
]
assert_stmts(compiled, stmts)
assert_stmts(cursor, cursor_stmts)
@testing.uses_deprecated(r".*Use event.listen")
def test_options(self):
canary = []
class TrackProxy(ConnectionProxy):
def __getattribute__(self, key):
fn = object.__getattribute__(self, key)
def go(*arg, **kw):
canary.append(fn.__name__)
return fn(*arg, **kw)
return go
with testing.expect_deprecated(
*[
"ConnectionProxy.%s is deprecated" % name
for name in [
"execute",
"cursor_execute",
"begin",
"rollback",
"commit",
"savepoint",
"rollback_savepoint",
"release_savepoint",
"begin_twophase",
"prepare_twophase",
"rollback_twophase",
"commit_twophase",
]
]
):
engine = engines.testing_engine(options={"proxy": TrackProxy()})
conn = engine.connect()
c2 = conn.execution_options(foo="bar")
eq_(c2._execution_options, {"foo": "bar"})
c2.execute(select([1]))
c3 = c2.execution_options(bar="bat")
eq_(c3._execution_options, {"foo": "bar", "bar": "bat"})
eq_(canary, ["execute", "cursor_execute"])
@testing.uses_deprecated(r".*Use event.listen")
def test_transactional(self):
canary = []
class TrackProxy(ConnectionProxy):
def __getattribute__(self, key):
fn = object.__getattribute__(self, key)
def go(*arg, **kw):
canary.append(fn.__name__)
return fn(*arg, **kw)
return go
with testing.expect_deprecated(
*[
"ConnectionProxy.%s is deprecated" % name
for name in [
"execute",
"cursor_execute",
"begin",
"rollback",
"commit",
"savepoint",
"rollback_savepoint",
"release_savepoint",
"begin_twophase",
"prepare_twophase",
"rollback_twophase",
"commit_twophase",
]
]
):
engine = engines.testing_engine(options={"proxy": TrackProxy()})
conn = engine.connect()
trans = conn.begin()
conn.execute(select([1]))
trans.rollback()
trans = conn.begin()
conn.execute(select([1]))
trans.commit()
eq_(
canary,
[
"begin",
"execute",
"cursor_execute",
"rollback",
"begin",
"execute",
"cursor_execute",
"commit",
],
)
@testing.uses_deprecated(r".*Use event.listen")
@testing.requires.savepoints
@testing.requires.two_phase_transactions
def test_transactional_advanced(self):
canary = []
class TrackProxy(ConnectionProxy):
def __getattribute__(self, key):
fn = object.__getattribute__(self, key)
def go(*arg, **kw):
canary.append(fn.__name__)
return fn(*arg, **kw)
return go
with testing.expect_deprecated(
*[
"ConnectionProxy.%s is deprecated" % name
for name in [
"execute",
"cursor_execute",
"begin",
"rollback",
"commit",
"savepoint",
"rollback_savepoint",
"release_savepoint",
"begin_twophase",
"prepare_twophase",
"rollback_twophase",
"commit_twophase",
]
]
):
engine = engines.testing_engine(options={"proxy": TrackProxy()})
conn = engine.connect()
trans = conn.begin()
trans2 = conn.begin_nested()
conn.execute(select([1]))
trans2.rollback()
trans2 = conn.begin_nested()
conn.execute(select([1]))
trans2.commit()
trans.rollback()
trans = conn.begin_twophase()
conn.execute(select([1]))
trans.prepare()
trans.commit()
canary = [t for t in canary if t not in ("cursor_execute", "execute")]
eq_(
canary,
[
"begin",
"savepoint",
"rollback_savepoint",
"savepoint",
"release_savepoint",
"rollback",
"begin_twophase",
"prepare_twophase",
"commit_twophase",
],
)
class HandleInvalidatedOnConnectTest(fixtures.TestBase):
__requires__ = ("sqlite",)
def setUp(self):
e = create_engine("sqlite://")
connection = Mock(get_server_version_info=Mock(return_value="5.0"))
def connect(*args, **kwargs):
return connection
dbapi = Mock(
sqlite_version_info=(99, 9, 9),
version_info=(99, 9, 9),
sqlite_version="99.9.9",
paramstyle="named",
connect=Mock(side_effect=connect),
)
sqlite3 = e.dialect.dbapi
dbapi.Error = (sqlite3.Error,)
dbapi.ProgrammingError = sqlite3.ProgrammingError
self.dbapi = dbapi
self.ProgrammingError = sqlite3.ProgrammingError
def test_dont_touch_non_dbapi_exception_on_contextual_connect(self):
dbapi = self.dbapi
dbapi.connect = Mock(side_effect=TypeError("I'm not a DBAPI error"))
e = create_engine("sqlite://", module=dbapi)
e.dialect.is_disconnect = is_disconnect = Mock()
with testing.expect_deprecated(
r"The Engine.contextual_connect\(\) method is deprecated"
):
assert_raises_message(
TypeError, "I'm not a DBAPI error", e.contextual_connect
)
eq_(is_disconnect.call_count, 0)
def test_invalidate_on_contextual_connect(self):
"""test that is_disconnect() is called during connect.
interpretation of connection failures are not supported by
every backend.
"""
dbapi = self.dbapi
dbapi.connect = Mock(
side_effect=self.ProgrammingError(
"Cannot operate on a closed database."
)
)
e = create_engine("sqlite://", module=dbapi)
try:
with testing.expect_deprecated(
r"The Engine.contextual_connect\(\) method is deprecated"
):
e.contextual_connect()
assert False
except tsa.exc.DBAPIError as de:
assert de.connection_invalidated
class HandleErrorTest(fixtures.TestBase):
__requires__ = ("ad_hoc_engines",)
__backend__ = True
def tearDown(self):
Engine.dispatch._clear()
Engine._has_events = False
def test_legacy_dbapi_error(self):
engine = engines.testing_engine()
canary = Mock()
with testing.expect_deprecated(
r"The ConnectionEvents.dbapi_error\(\) event is deprecated"
):
event.listen(engine, "dbapi_error", canary)
with engine.connect() as conn:
try:
conn.execute("SELECT FOO FROM I_DONT_EXIST")
assert False
except tsa.exc.DBAPIError as e:
eq_(canary.mock_calls[0][1][5], e.orig)
eq_(canary.mock_calls[0][1][2], "SELECT FOO FROM I_DONT_EXIST")
def test_legacy_dbapi_error_no_ad_hoc_context(self):
engine = engines.testing_engine()
listener = Mock(return_value=None)
with testing.expect_deprecated(
r"The ConnectionEvents.dbapi_error\(\) event is deprecated"
):
event.listen(engine, "dbapi_error", listener)
nope = SomeException("nope")
class MyType(TypeDecorator):
impl = Integer
def process_bind_param(self, value, dialect):
raise nope
with engine.connect() as conn:
assert_raises_message(
tsa.exc.StatementError,
r"\(.*SomeException\) " r"nope \[SQL\: u?'SELECT 1 ",
conn.execute,
select([1]).where(column("foo") == literal("bar", MyType())),
)
# no legacy event
eq_(listener.mock_calls, [])
def test_legacy_dbapi_error_non_dbapi_error(self):
engine = engines.testing_engine()
listener = Mock(return_value=None)
with testing.expect_deprecated(
r"The ConnectionEvents.dbapi_error\(\) event is deprecated"
):
event.listen(engine, "dbapi_error", listener)
nope = TypeError("I'm not a DBAPI error")
with engine.connect() as c:
c.connection.cursor = Mock(
return_value=Mock(execute=Mock(side_effect=nope))
)
assert_raises_message(
TypeError, "I'm not a DBAPI error", c.execute, "select "
)
# no legacy event
eq_(listener.mock_calls, [])
def MockDBAPI(): # noqa
def cursor():
return Mock()
def connect(*arg, **kw):
def close():
conn.closed = True
# mock seems like it might have an issue logging
# call_count correctly under threading, not sure.
# adding a side_effect for close seems to help.
conn = Mock(
cursor=Mock(side_effect=cursor),
close=Mock(side_effect=close),
closed=False,
)
return conn
def shutdown(value):
if value:
db.connect = Mock(side_effect=Exception("connect failed"))
else:
db.connect = Mock(side_effect=connect)
db.is_shutdown = value
db = Mock(
connect=Mock(side_effect=connect), shutdown=shutdown, is_shutdown=False
)
return db
class PoolTestBase(fixtures.TestBase):
def setup(self):
pool.clear_managers()
self._teardown_conns = []
def teardown(self):
for ref in self._teardown_conns:
conn = ref()
if conn:
conn.close()
@classmethod
def teardown_class(cls):
pool.clear_managers()
def _queuepool_fixture(self, **kw):
dbapi, pool = self._queuepool_dbapi_fixture(**kw)
return pool
def _queuepool_dbapi_fixture(self, **kw):
dbapi = MockDBAPI()
return (
dbapi,
pool.QueuePool(creator=lambda: dbapi.connect("foo.db"), **kw),
)
class DeprecatedPoolListenerTest(PoolTestBase):
@testing.requires.predictable_gc
@testing.uses_deprecated(
r".*Use the PoolEvents", r".*'listeners' argument .* is deprecated"
)
def test_listeners(self):
class InstrumentingListener(object):
def __init__(self):
if hasattr(self, "connect"):
self.connect = self.inst_connect
if hasattr(self, "first_connect"):
self.first_connect = self.inst_first_connect
if hasattr(self, "checkout"):
self.checkout = self.inst_checkout
if hasattr(self, "checkin"):
self.checkin = self.inst_checkin
self.clear()
def clear(self):
self.connected = []
self.first_connected = []
self.checked_out = []
self.checked_in = []
def assert_total(self, conn, fconn, cout, cin):
eq_(len(self.connected), conn)
eq_(len(self.first_connected), fconn)
eq_(len(self.checked_out), cout)
eq_(len(self.checked_in), cin)
def assert_in(self, item, in_conn, in_fconn, in_cout, in_cin):
eq_((item in self.connected), in_conn)
eq_((item in self.first_connected), in_fconn)
eq_((item in self.checked_out), in_cout)
eq_((item in self.checked_in), in_cin)
def inst_connect(self, con, record):
print("connect(%s, %s)" % (con, record))
assert con is not None
assert record is not None
self.connected.append(con)
def inst_first_connect(self, con, record):
print("first_connect(%s, %s)" % (con, record))
assert con is not None
assert record is not None
self.first_connected.append(con)
def inst_checkout(self, con, record, proxy):
print("checkout(%s, %s, %s)" % (con, record, proxy))
assert con is not None
assert record is not None
assert proxy is not None
self.checked_out.append(con)
def inst_checkin(self, con, record):
print("checkin(%s, %s)" % (con, record))
# con can be None if invalidated
assert record is not None
self.checked_in.append(con)
class ListenAll(tsa.interfaces.PoolListener, InstrumentingListener):
pass
class ListenConnect(InstrumentingListener):
def connect(self, con, record):
pass
class ListenFirstConnect(InstrumentingListener):
def first_connect(self, con, record):
pass
class ListenCheckOut(InstrumentingListener):
def checkout(self, con, record, proxy, num):
pass
class ListenCheckIn(InstrumentingListener):
def checkin(self, con, record):
pass
def assert_listeners(p, total, conn, fconn, cout, cin):
for instance in (p, p.recreate()):
self.assert_(len(instance.dispatch.connect) == conn)
self.assert_(len(instance.dispatch.first_connect) == fconn)
self.assert_(len(instance.dispatch.checkout) == cout)
self.assert_(len(instance.dispatch.checkin) == cin)
p = self._queuepool_fixture()
assert_listeners(p, 0, 0, 0, 0, 0)
with testing.expect_deprecated(
*[
"PoolListener.%s is deprecated." % name
for name in ["connect", "first_connect", "checkout", "checkin"]
]
):
p.add_listener(ListenAll())
assert_listeners(p, 1, 1, 1, 1, 1)
with testing.expect_deprecated(
*["PoolListener.%s is deprecated." % name for name in ["connect"]]
):
p.add_listener(ListenConnect())
assert_listeners(p, 2, 2, 1, 1, 1)
with testing.expect_deprecated(
*[
"PoolListener.%s is deprecated." % name
for name in ["first_connect"]
]
):
p.add_listener(ListenFirstConnect())
assert_listeners(p, 3, 2, 2, 1, 1)
with testing.expect_deprecated(
*["PoolListener.%s is deprecated." % name for name in ["checkout"]]
):
p.add_listener(ListenCheckOut())
assert_listeners(p, 4, 2, 2, 2, 1)
with testing.expect_deprecated(
*["PoolListener.%s is deprecated." % name for name in ["checkin"]]
):
p.add_listener(ListenCheckIn())
assert_listeners(p, 5, 2, 2, 2, 2)
del p
snoop = ListenAll()
with testing.expect_deprecated(
*[
"PoolListener.%s is deprecated." % name
for name in ["connect", "first_connect", "checkout", "checkin"]
]
+ [
"PoolListener is deprecated in favor of the PoolEvents "
"listener interface. The Pool.listeners parameter "
"will be removed"
]
):
p = self._queuepool_fixture(listeners=[snoop])
assert_listeners(p, 1, 1, 1, 1, 1)
c = p.connect()
snoop.assert_total(1, 1, 1, 0)
cc = c.connection
snoop.assert_in(cc, True, True, True, False)
c.close()
snoop.assert_in(cc, True, True, True, True)
del c, cc
snoop.clear()
# this one depends on immediate gc
c = p.connect()
cc = c.connection
snoop.assert_in(cc, False, False, True, False)
snoop.assert_total(0, 0, 1, 0)
del c, cc
lazy_gc()
snoop.assert_total(0, 0, 1, 1)
p.dispose()
snoop.clear()
c = p.connect()
c.close()
c = p.connect()
snoop.assert_total(1, 0, 2, 1)
c.close()
snoop.assert_total(1, 0, 2, 2)
# invalidation
p.dispose()
snoop.clear()
c = p.connect()
snoop.assert_total(1, 0, 1, 0)
c.invalidate()
snoop.assert_total(1, 0, 1, 1)
c.close()
snoop.assert_total(1, 0, 1, 1)
del c
lazy_gc()
snoop.assert_total(1, 0, 1, 1)
c = p.connect()
snoop.assert_total(2, 0, 2, 1)
c.close()
del c
lazy_gc()
snoop.assert_total(2, 0, 2, 2)
# detached
p.dispose()
snoop.clear()
c = p.connect()
snoop.assert_total(1, 0, 1, 0)
c.detach()
snoop.assert_total(1, 0, 1, 0)
c.close()
del c
snoop.assert_total(1, 0, 1, 0)
c = p.connect()
snoop.assert_total(2, 0, 2, 0)
c.close()
del c
snoop.assert_total(2, 0, 2, 1)
# recreated
p = p.recreate()
snoop.clear()
c = p.connect()
snoop.assert_total(1, 1, 1, 0)
c.close()
snoop.assert_total(1, 1, 1, 1)
c = p.connect()
snoop.assert_total(1, 1, 2, 1)
c.close()
snoop.assert_total(1, 1, 2, 2)
@testing.uses_deprecated(r".*Use the PoolEvents")
def test_listeners_callables(self):
def connect(dbapi_con, con_record):
counts[0] += 1
def checkout(dbapi_con, con_record, con_proxy):
counts[1] += 1
def checkin(dbapi_con, con_record):
counts[2] += 1
i_all = dict(connect=connect, checkout=checkout, checkin=checkin)
i_connect = dict(connect=connect)
i_checkout = dict(checkout=checkout)
i_checkin = dict(checkin=checkin)
for cls in (pool.QueuePool, pool.StaticPool):
counts = [0, 0, 0]
def assert_listeners(p, total, conn, cout, cin):
for instance in (p, p.recreate()):
eq_(len(instance.dispatch.connect), conn)
eq_(len(instance.dispatch.checkout), cout)
eq_(len(instance.dispatch.checkin), cin)
p = self._queuepool_fixture()
assert_listeners(p, 0, 0, 0, 0)
with testing.expect_deprecated(
*[
"PoolListener.%s is deprecated." % name
for name in ["connect", "checkout", "checkin"]
]
):
p.add_listener(i_all)
assert_listeners(p, 1, 1, 1, 1)
with testing.expect_deprecated(
*[
"PoolListener.%s is deprecated." % name
for name in ["connect"]
]
):
p.add_listener(i_connect)
assert_listeners(p, 2, 1, 1, 1)
with testing.expect_deprecated(
*[
"PoolListener.%s is deprecated." % name
for name in ["checkout"]
]
):
p.add_listener(i_checkout)
assert_listeners(p, 3, 1, 1, 1)
with testing.expect_deprecated(
*[
"PoolListener.%s is deprecated." % name
for name in ["checkin"]
]
):
p.add_listener(i_checkin)
assert_listeners(p, 4, 1, 1, 1)
del p
with testing.expect_deprecated(
*[
"PoolListener.%s is deprecated." % name
for name in ["connect", "checkout", "checkin"]
]
+ [".*The Pool.listeners parameter will be removed"]
):
p = self._queuepool_fixture(listeners=[i_all])
assert_listeners(p, 1, 1, 1, 1)
c = p.connect()
assert counts == [1, 1, 0]
c.close()
assert counts == [1, 1, 1]
c = p.connect()
assert counts == [1, 2, 1]
with testing.expect_deprecated(
*[
"PoolListener.%s is deprecated." % name
for name in ["checkin"]
]
):
p.add_listener(i_checkin)
c.close()
assert counts == [1, 2, 2]
class PoolTest(PoolTestBase):
def test_manager(self):
with testing.expect_deprecated(
r"The pool.manage\(\) function is deprecated,"
):
manager = pool.manage(MockDBAPI(), use_threadlocal=True)
with testing.expect_deprecated(
r".*Pool.use_threadlocal parameter is deprecated"
):
c1 = manager.connect("foo.db")
c2 = manager.connect("foo.db")
c3 = manager.connect("bar.db")
c4 = manager.connect("foo.db", bar="bat")
c5 = manager.connect("foo.db", bar="hoho")
c6 = manager.connect("foo.db", bar="bat")
assert c1.cursor() is not None
assert c1 is c2
assert c1 is not c3
assert c4 is c6
assert c4 is not c5
def test_manager_with_key(self):
dbapi = MockDBAPI()
with testing.expect_deprecated(
r"The pool.manage\(\) function is deprecated,"
):
manager = pool.manage(dbapi, use_threadlocal=True)
with testing.expect_deprecated(
r".*Pool.use_threadlocal parameter is deprecated"
):
c1 = manager.connect("foo.db", sa_pool_key="a")
c2 = manager.connect("foo.db", sa_pool_key="b")
c3 = manager.connect("bar.db", sa_pool_key="a")
assert c1.cursor() is not None
assert c1 is not c2
assert c1 is c3
eq_(dbapi.connect.mock_calls, [call("foo.db"), call("foo.db")])
def test_bad_args(self):
with testing.expect_deprecated(
r"The pool.manage\(\) function is deprecated,"
):
manager = pool.manage(MockDBAPI())
manager.connect(None)
def test_non_thread_local_manager(self):
with testing.expect_deprecated(
r"The pool.manage\(\) function is deprecated,"
):
manager = pool.manage(MockDBAPI(), use_threadlocal=False)
connection = manager.connect("foo.db")
connection2 = manager.connect("foo.db")
self.assert_(connection.cursor() is not None)
self.assert_(connection is not connection2)
def test_threadlocal_del(self):
self._do_testthreadlocal(useclose=False)
def test_threadlocal_close(self):
self._do_testthreadlocal(useclose=True)
def _do_testthreadlocal(self, useclose=False):
dbapi = MockDBAPI()
with testing.expect_deprecated(
r".*Pool.use_threadlocal parameter is deprecated"
):
for p in (
pool.QueuePool(
creator=dbapi.connect,
pool_size=3,
max_overflow=-1,
use_threadlocal=True,
),
pool.SingletonThreadPool(
creator=dbapi.connect, use_threadlocal=True
),
):
c1 = p.connect()
c2 = p.connect()
self.assert_(c1 is c2)
c3 = p.unique_connection()
self.assert_(c3 is not c1)
if useclose:
c2.close()
else:
c2 = None
c2 = p.connect()
self.assert_(c1 is c2)
self.assert_(c3 is not c1)
if useclose:
c2.close()
else:
c2 = None
lazy_gc()
if useclose:
c1 = p.connect()
c2 = p.connect()
c3 = p.connect()
c3.close()
c2.close()
self.assert_(c1.connection is not None)
c1.close()
c1 = c2 = c3 = None
# extra tests with QueuePool to ensure connections get
# __del__()ed when dereferenced
if isinstance(p, pool.QueuePool):
lazy_gc()
self.assert_(p.checkedout() == 0)
c1 = p.connect()
c2 = p.connect()
if useclose:
c2.close()
c1.close()
else:
c2 = None
c1 = None
lazy_gc()
self.assert_(p.checkedout() == 0)
def test_mixed_close(self):
pool._refs.clear()
with testing.expect_deprecated(
r".*Pool.use_threadlocal parameter is deprecated"
):
p = self._queuepool_fixture(
pool_size=3, max_overflow=-1, use_threadlocal=True
)
c1 = p.connect()
c2 = p.connect()
assert c1 is c2
c1.close()
c2 = None
assert p.checkedout() == 1
c1 = None
lazy_gc()
assert p.checkedout() == 0
lazy_gc()
assert not pool._refs
class QueuePoolTest(PoolTestBase):
def test_threadfairy(self):
with testing.expect_deprecated(
r".*Pool.use_threadlocal parameter is deprecated"
):
p = self._queuepool_fixture(
pool_size=3, max_overflow=-1, use_threadlocal=True
)
c1 = p.connect()
c1.close()
c2 = p.connect()
assert c2.connection is not None
def test_trick_the_counter(self):
"""this is a "flaw" in the connection pool; since threadlocal
uses a single ConnectionFairy per thread with an open/close
counter, you can fool the counter into giving you a
ConnectionFairy with an ambiguous counter. i.e. its not true
reference counting."""
with testing.expect_deprecated(
r".*Pool.use_threadlocal parameter is deprecated"
):
p = self._queuepool_fixture(
pool_size=3, max_overflow=-1, use_threadlocal=True
)
c1 = p.connect()
c2 = p.connect()
assert c1 is c2
c1.close()
c2 = p.connect()
c2.close()
self.assert_(p.checkedout() != 0)
c2.close()
self.assert_(p.checkedout() == 0)
@testing.requires.predictable_gc
def test_weakref_kaboom(self):
with testing.expect_deprecated(
r".*Pool.use_threadlocal parameter is deprecated"
):
p = self._queuepool_fixture(
pool_size=3, max_overflow=-1, use_threadlocal=True
)
c1 = p.connect()
c2 = p.connect()
c1.close()
c2 = None
del c1
del c2
gc_collect()
assert p.checkedout() == 0
c3 = p.connect()
assert c3 is not None
class ExplicitAutoCommitDeprecatedTest(fixtures.TestBase):
"""test the 'autocommit' flag on select() and text() objects.
Requires PostgreSQL so that we may define a custom function which
modifies the database. """
__only_on__ = "postgresql"
@classmethod
def setup_class(cls):
global metadata, foo
metadata = MetaData(testing.db)
foo = Table(
"foo",
metadata,
Column("id", Integer, primary_key=True),
Column("data", String(100)),
)
metadata.create_all()
testing.db.execute(
"create function insert_foo(varchar) "
"returns integer as 'insert into foo(data) "
"values ($1);select 1;' language sql"
)
def teardown(self):
foo.delete().execute().close()
@classmethod
def teardown_class(cls):
testing.db.execute("drop function insert_foo(varchar)")
metadata.drop_all()
def test_explicit_compiled(self):
conn1 = testing.db.connect()
conn2 = testing.db.connect()
with testing.expect_deprecated(
"The select.autocommit parameter is deprecated"
):
conn1.execute(select([func.insert_foo("data1")], autocommit=True))
assert conn2.execute(select([foo.c.data])).fetchall() == [("data1",)]
with testing.expect_deprecated(
r"The SelectBase.autocommit\(\) method is deprecated,"
):
conn1.execute(select([func.insert_foo("data2")]).autocommit())
assert conn2.execute(select([foo.c.data])).fetchall() == [
("data1",),
("data2",),
]
conn1.close()
conn2.close()
def test_explicit_text(self):
conn1 = testing.db.connect()
conn2 = testing.db.connect()
with testing.expect_deprecated(
"The text.autocommit parameter is deprecated"
):
conn1.execute(
text("select insert_foo('moredata')", autocommit=True)
)
assert conn2.execute(select([foo.c.data])).fetchall() == [
("moredata",)
]
conn1.close()
conn2.close()
| 32.550167 | 79 | 0.548985 |
31f83afea1aa3491f5603b5bb0a45e21bcdc7769 | 9,487 | py | Python | src/oci/data_integration/models/update_connection_from_adwc.py | ezequielramos/oci-python-sdk | cc4235cf217beaf9feed75760e9ce82610222762 | [
"Apache-2.0",
"BSD-3-Clause"
] | 249 | 2017-09-11T22:06:05.000Z | 2022-03-04T17:09:29.000Z | src/oci/data_integration/models/update_connection_from_adwc.py | ezequielramos/oci-python-sdk | cc4235cf217beaf9feed75760e9ce82610222762 | [
"Apache-2.0",
"BSD-3-Clause"
] | 228 | 2017-09-11T23:07:26.000Z | 2022-03-23T10:58:50.000Z | src/oci/data_integration/models/update_connection_from_adwc.py | ezequielramos/oci-python-sdk | cc4235cf217beaf9feed75760e9ce82610222762 | [
"Apache-2.0",
"BSD-3-Clause"
] | 224 | 2017-09-27T07:32:43.000Z | 2022-03-25T16:55:42.000Z | # coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from .update_connection_details import UpdateConnectionDetails
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class UpdateConnectionFromAdwc(UpdateConnectionDetails):
"""
The details to update an Autonomous Data Warehouse data asset connection.
"""
def __init__(self, **kwargs):
"""
Initializes a new UpdateConnectionFromAdwc object with values from keyword arguments. The default value of the :py:attr:`~oci.data_integration.models.UpdateConnectionFromAdwc.model_type` attribute
of this class is ``ORACLE_ADWC_CONNECTION`` and it should not be changed.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param model_type:
The value to assign to the model_type property of this UpdateConnectionFromAdwc.
Allowed values for this property are: "ORACLE_ADWC_CONNECTION", "ORACLE_ATP_CONNECTION", "ORACLE_OBJECT_STORAGE_CONNECTION", "ORACLEDB_CONNECTION", "MYSQL_CONNECTION", "GENERIC_JDBC_CONNECTION", "BICC_CONNECTION", "AMAZON_S3_CONNECTION"
:type model_type: str
:param key:
The value to assign to the key property of this UpdateConnectionFromAdwc.
:type key: str
:param model_version:
The value to assign to the model_version property of this UpdateConnectionFromAdwc.
:type model_version: str
:param parent_ref:
The value to assign to the parent_ref property of this UpdateConnectionFromAdwc.
:type parent_ref: oci.data_integration.models.ParentReference
:param name:
The value to assign to the name property of this UpdateConnectionFromAdwc.
:type name: str
:param description:
The value to assign to the description property of this UpdateConnectionFromAdwc.
:type description: str
:param object_status:
The value to assign to the object_status property of this UpdateConnectionFromAdwc.
:type object_status: int
:param object_version:
The value to assign to the object_version property of this UpdateConnectionFromAdwc.
:type object_version: int
:param identifier:
The value to assign to the identifier property of this UpdateConnectionFromAdwc.
:type identifier: str
:param connection_properties:
The value to assign to the connection_properties property of this UpdateConnectionFromAdwc.
:type connection_properties: list[oci.data_integration.models.ConnectionProperty]
:param registry_metadata:
The value to assign to the registry_metadata property of this UpdateConnectionFromAdwc.
:type registry_metadata: oci.data_integration.models.RegistryMetadata
:param tns_alias:
The value to assign to the tns_alias property of this UpdateConnectionFromAdwc.
:type tns_alias: str
:param tns_names:
The value to assign to the tns_names property of this UpdateConnectionFromAdwc.
:type tns_names: list[str]
:param username:
The value to assign to the username property of this UpdateConnectionFromAdwc.
:type username: str
:param password:
The value to assign to the password property of this UpdateConnectionFromAdwc.
:type password: str
:param password_secret:
The value to assign to the password_secret property of this UpdateConnectionFromAdwc.
:type password_secret: oci.data_integration.models.SensitiveAttribute
"""
self.swagger_types = {
'model_type': 'str',
'key': 'str',
'model_version': 'str',
'parent_ref': 'ParentReference',
'name': 'str',
'description': 'str',
'object_status': 'int',
'object_version': 'int',
'identifier': 'str',
'connection_properties': 'list[ConnectionProperty]',
'registry_metadata': 'RegistryMetadata',
'tns_alias': 'str',
'tns_names': 'list[str]',
'username': 'str',
'password': 'str',
'password_secret': 'SensitiveAttribute'
}
self.attribute_map = {
'model_type': 'modelType',
'key': 'key',
'model_version': 'modelVersion',
'parent_ref': 'parentRef',
'name': 'name',
'description': 'description',
'object_status': 'objectStatus',
'object_version': 'objectVersion',
'identifier': 'identifier',
'connection_properties': 'connectionProperties',
'registry_metadata': 'registryMetadata',
'tns_alias': 'tnsAlias',
'tns_names': 'tnsNames',
'username': 'username',
'password': 'password',
'password_secret': 'passwordSecret'
}
self._model_type = None
self._key = None
self._model_version = None
self._parent_ref = None
self._name = None
self._description = None
self._object_status = None
self._object_version = None
self._identifier = None
self._connection_properties = None
self._registry_metadata = None
self._tns_alias = None
self._tns_names = None
self._username = None
self._password = None
self._password_secret = None
self._model_type = 'ORACLE_ADWC_CONNECTION'
@property
def tns_alias(self):
"""
Gets the tns_alias of this UpdateConnectionFromAdwc.
The Autonomous Data Warehouse instance service name.
:return: The tns_alias of this UpdateConnectionFromAdwc.
:rtype: str
"""
return self._tns_alias
@tns_alias.setter
def tns_alias(self, tns_alias):
"""
Sets the tns_alias of this UpdateConnectionFromAdwc.
The Autonomous Data Warehouse instance service name.
:param tns_alias: The tns_alias of this UpdateConnectionFromAdwc.
:type: str
"""
self._tns_alias = tns_alias
@property
def tns_names(self):
"""
Gets the tns_names of this UpdateConnectionFromAdwc.
Array of service names that are available for selection in the tnsAlias property.
:return: The tns_names of this UpdateConnectionFromAdwc.
:rtype: list[str]
"""
return self._tns_names
@tns_names.setter
def tns_names(self, tns_names):
"""
Sets the tns_names of this UpdateConnectionFromAdwc.
Array of service names that are available for selection in the tnsAlias property.
:param tns_names: The tns_names of this UpdateConnectionFromAdwc.
:type: list[str]
"""
self._tns_names = tns_names
@property
def username(self):
"""
Gets the username of this UpdateConnectionFromAdwc.
The user name for the connection.
:return: The username of this UpdateConnectionFromAdwc.
:rtype: str
"""
return self._username
@username.setter
def username(self, username):
"""
Sets the username of this UpdateConnectionFromAdwc.
The user name for the connection.
:param username: The username of this UpdateConnectionFromAdwc.
:type: str
"""
self._username = username
@property
def password(self):
"""
Gets the password of this UpdateConnectionFromAdwc.
The password for the connection.
:return: The password of this UpdateConnectionFromAdwc.
:rtype: str
"""
return self._password
@password.setter
def password(self, password):
"""
Sets the password of this UpdateConnectionFromAdwc.
The password for the connection.
:param password: The password of this UpdateConnectionFromAdwc.
:type: str
"""
self._password = password
@property
def password_secret(self):
"""
Gets the password_secret of this UpdateConnectionFromAdwc.
:return: The password_secret of this UpdateConnectionFromAdwc.
:rtype: oci.data_integration.models.SensitiveAttribute
"""
return self._password_secret
@password_secret.setter
def password_secret(self, password_secret):
"""
Sets the password_secret of this UpdateConnectionFromAdwc.
:param password_secret: The password_secret of this UpdateConnectionFromAdwc.
:type: oci.data_integration.models.SensitiveAttribute
"""
self._password_secret = password_secret
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 35.00738 | 248 | 0.65205 |
180b906b9fcd22cb470ea6ec6eb3f1b130759377 | 4,176 | py | Python | spotify_client.py | leobeeson/music-time-machine | 385faab5a55c6d78779cbb18bd7316ef0af8bf1c | [
"MIT"
] | null | null | null | spotify_client.py | leobeeson/music-time-machine | 385faab5a55c6d78779cbb18bd7316ef0af8bf1c | [
"MIT"
] | null | null | null | spotify_client.py | leobeeson/music-time-machine | 385faab5a55c6d78779cbb18bd7316ef0af8bf1c | [
"MIT"
] | null | null | null | import os
from spotipy import Spotify
from spotipy.oauth2 import SpotifyOAuth
from typing import List
from song_metadata_curator import SongMetadataCurator
from dotenv import load_dotenv
load_dotenv()
CLIENT_ID = os.environ.get("SPOTIPY_CLIENT_ID")
CLIENT_SECRET = os.environ.get("SPOTIPY_CLIENT_SECRET")
REDERICT_URI = os.environ.get("SPOTIPY_REDIRECT_URI")
SCOPE = "playlist-modify-private"
class SpotifyClient():
def __init__(self) -> None:
self.client_id = CLIENT_ID
self.client_secret = CLIENT_SECRET
self.redirect_uri = REDERICT_URI
self.authenticate(SCOPE)
self.get_access_token()
self.create_spotipy_object()
self.get_user_id()
def authenticate(self, scope) -> SpotifyOAuth:
oauth_manager = SpotifyOAuth(
client_id = self.client_id,
client_secret = self.client_secret,
redirect_uri = self.redirect_uri,
scope = scope,
show_dialog = True,
cache_path= "token.txt"
)
self.oauth_manager = oauth_manager
def get_access_token(self) -> None:
self.oauth_manager.get_access_token(as_dict = False)
def create_spotipy_object(self) -> None:
self.spotify = Spotify(auth_manager = self.oauth_manager)
def get_user_id(self) -> None:
user_profile = self.spotify.current_user()
user_id = user_profile["id"]
self.user_id = user_id
def get_all_spotify_song_ids(self, songs: List[dict]) -> List[dict]:
metadata_curator = SongMetadataCurator()
songs_with_uris = []
for song in songs:
song_title = song["song_title"]
song_title = metadata_curator.remove_apostrophes(song_title)
artist_name = song["artist_name"]
artist_name = metadata_curator.normalise_singer_collaboration(artist_name)
track_uris = self.get_spotify_song_id(song_title, artist_name)
song_with_uris = {
"song_title": song_title,
"artist_name": artist_name,
"track_uris": track_uris
}
songs_with_uris.append(song_with_uris)
return songs_with_uris
def get_spotify_song_id(self, song_title: str, artist_name: str) -> List[str]:
track_uris = []
response = self.spotify.search(q = f"track:{song_title} artist:{artist_name}", type = "track")
try:
items = response["tracks"]["items"]
if len(items) > 0:
for item in items:
try:
track_uri = item["uri"]
track_uris.append(track_uri)
except KeyError:
print(f"NO SPOTIFY URI: {song_title} by {artist_name}.")
except KeyError:
print(f"TRACK NOT FOUND: {song_title} by {artist_name}")
return track_uris
def get_spotify_song_uris(self, song_ids: List[dict]) -> List[str]:
song_uris = []
if len(song_ids) > 0:
for song_id in song_ids:
try:
track_uris = song_id["track_uris"]
try:
track_uri = track_uris[0]
except IndexError:
continue
except KeyError:
continue
song_uris.append(track_uri)
return song_uris
def create_empty_spotify_playlist(self, playlist_name: str) -> str:
new_playlist = self.spotify.user_playlist_create(
self.user_id,
playlist_name,
public = False
)
playlist_id = new_playlist["id"]
return playlist_id
def create_spotify_playlist(self, song_uris: List[str], playlist_name: str) -> None:
if len(song_uris) > 0:
playlist_id = self.create_empty_spotify_playlist(playlist_name)
snapshot_id = self.spotify.user_playlist_add_tracks(
self.user_id,
playlist_id,
song_uris
)
return snapshot_id
| 33.142857 | 102 | 0.589559 |
769c5246c6e2d8f03d88b36e38534c367e58dc1b | 826 | py | Python | 0x03-caching/base_caching.py | JoseAVallejo12/holbertonschool-web_back_end | eb514784772352b8e4873d1f648726815ab69592 | [
"MIT"
] | null | null | null | 0x03-caching/base_caching.py | JoseAVallejo12/holbertonschool-web_back_end | eb514784772352b8e4873d1f648726815ab69592 | [
"MIT"
] | null | null | null | 0x03-caching/base_caching.py | JoseAVallejo12/holbertonschool-web_back_end | eb514784772352b8e4873d1f648726815ab69592 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
""" BaseCaching module."""
class BaseCaching():
""" BaseCaching defines:
- constants of your caching system
- where your data are stored (in a dictionary)
"""
MAX_ITEMS = 4
def __init__(self):
"""Initiliaze."""
self.cache_data = {}
def print_cache(self):
""" Print the cache
"""
print("Current cache:")
for key in sorted(self.cache_data.keys()):
print("{}: {}".format(key, self.cache_data.get(key)))
def put(self, key, item):
""" Add an item in the cache
"""
raise NotImplementedError("put must be implemented in your cache class")
def get(self, key):
""" Get an item by key
"""
raise NotImplementedError("get must be implemented in your cache class")
| 25.030303 | 80 | 0.572639 |
1728c92f7bab88c7237b5d9e66dcd3eae5c30263 | 11,025 | py | Python | plotly/graph_objs/scatterpolargl/hoverlabel/_font.py | gnestor/plotly.py | a8ae062795ddbf9867b8578fe6d9e244948c15ff | [
"MIT"
] | 12 | 2020-04-18T18:10:22.000Z | 2021-12-06T10:11:15.000Z | plotly/graph_objs/scatterpolargl/hoverlabel/_font.py | Vesauza/plotly.py | e53e626d59495d440341751f60aeff73ff365c28 | [
"MIT"
] | 27 | 2020-04-28T21:23:12.000Z | 2021-06-25T15:36:38.000Z | plotly/graph_objs/scatterpolargl/hoverlabel/_font.py | Vesauza/plotly.py | e53e626d59495d440341751f60aeff73ff365c28 | [
"MIT"
] | 6 | 2020-04-18T23:07:08.000Z | 2021-11-18T07:53:06.000Z | from plotly.basedatatypes import BaseTraceHierarchyType
import copy
class Font(BaseTraceHierarchyType):
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, saddlebrown, salmon, sandybrown,
seagreen, seashell, sienna, silver, skyblue,
slateblue, slategray, slategrey, snow, springgreen,
steelblue, tan, teal, thistle, tomato, turquoise,
violet, wheat, white, whitesmoke, yellow,
yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self['color']
@color.setter
def color(self, val):
self['color'] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on plot.ly for color .
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['colorsrc']
@colorsrc.setter
def colorsrc(self, val):
self['colorsrc'] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The plotly service (at https://plot.ly or on-
premise) generates images on a server, where only a select
number of fonts are installed and supported. These include
"Arial", "Balto", "Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old Standard TT", "Open
Sans", "Overpass", "PT Sans Narrow", "Raleway", "Times New
Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self['family']
@family.setter
def family(self, val):
self['family'] = val
# familysrc
# ---------
@property
def familysrc(self):
"""
Sets the source reference on plot.ly for family .
The 'familysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['familysrc']
@familysrc.setter
def familysrc(self, val):
self['familysrc'] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self['size']
@size.setter
def size(self, val):
self['size'] = val
# sizesrc
# -------
@property
def sizesrc(self):
"""
Sets the source reference on plot.ly for size .
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['sizesrc']
@sizesrc.setter
def sizesrc(self, val):
self['sizesrc'] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return 'scatterpolargl.hoverlabel'
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
colorsrc
Sets the source reference on plot.ly for color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The
plotly service (at https://plot.ly or on-premise)
generates images on a server, where only a select
number of fonts are installed and supported. These
include "Arial", "Balto", "Courier New", "Droid Sans",,
"Droid Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on plot.ly for family .
size
sizesrc
Sets the source reference on plot.ly for size .
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
family=None,
familysrc=None,
size=None,
sizesrc=None,
**kwargs
):
"""
Construct a new Font object
Sets the font used in hover labels.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
plotly.graph_objs.scatterpolargl.hoverlabel.Font
color
colorsrc
Sets the source reference on plot.ly for color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The
plotly service (at https://plot.ly or on-premise)
generates images on a server, where only a select
number of fonts are installed and supported. These
include "Arial", "Balto", "Courier New", "Droid Sans",,
"Droid Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on plot.ly for family .
size
sizesrc
Sets the source reference on plot.ly for size .
Returns
-------
Font
"""
super(Font, self).__init__('font')
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scatterpolargl.hoverlabel.Font
constructor must be a dict or
an instance of plotly.graph_objs.scatterpolargl.hoverlabel.Font"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop('skip_invalid', False)
# Import validators
# -----------------
from plotly.validators.scatterpolargl.hoverlabel import (
font as v_font
)
# Initialize validators
# ---------------------
self._validators['color'] = v_font.ColorValidator()
self._validators['colorsrc'] = v_font.ColorsrcValidator()
self._validators['family'] = v_font.FamilyValidator()
self._validators['familysrc'] = v_font.FamilysrcValidator()
self._validators['size'] = v_font.SizeValidator()
self._validators['sizesrc'] = v_font.SizesrcValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop('color', None)
self['color'] = color if color is not None else _v
_v = arg.pop('colorsrc', None)
self['colorsrc'] = colorsrc if colorsrc is not None else _v
_v = arg.pop('family', None)
self['family'] = family if family is not None else _v
_v = arg.pop('familysrc', None)
self['familysrc'] = familysrc if familysrc is not None else _v
_v = arg.pop('size', None)
self['size'] = size if size is not None else _v
_v = arg.pop('sizesrc', None)
self['sizesrc'] = sizesrc if sizesrc is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| 34.133127 | 75 | 0.562177 |
f889cff852fb0f077b9dca00a6ac7142bcdb8251 | 1,706 | py | Python | aliyun-python-sdk-imm/aliyunsdkimm/request/v20170906/IndexTagRequest.py | sdk-team/aliyun-openapi-python-sdk | 384730d707e6720d1676ccb8f552e6a7b330ec86 | [
"Apache-2.0"
] | 1 | 2019-12-23T12:36:43.000Z | 2019-12-23T12:36:43.000Z | aliyun-python-sdk-imm/aliyunsdkimm/request/v20170906/IndexTagRequest.py | sdk-team/aliyun-openapi-python-sdk | 384730d707e6720d1676ccb8f552e6a7b330ec86 | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-imm/aliyunsdkimm/request/v20170906/IndexTagRequest.py | sdk-team/aliyun-openapi-python-sdk | 384730d707e6720d1676ccb8f552e6a7b330ec86 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class IndexTagRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'imm', '2017-09-06', 'IndexTag','imm')
def get_SrcUris(self):
return self.get_query_params().get('SrcUris')
def set_SrcUris(self,SrcUris):
self.add_query_param('SrcUris',SrcUris)
def get_ModelId(self):
return self.get_query_params().get('ModelId')
def set_ModelId(self,ModelId):
self.add_query_param('ModelId',ModelId)
def get_Project(self):
return self.get_query_params().get('Project')
def set_Project(self,Project):
self.add_query_param('Project',Project)
def get_SetId(self):
return self.get_query_params().get('SetId')
def set_SetId(self,SetId):
self.add_query_param('SetId',SetId)
def get_Force(self):
return self.get_query_params().get('Force')
def set_Force(self,Force):
self.add_query_param('Force',Force) | 31.592593 | 67 | 0.745604 |
3d53bb7e17f847925ecefaa73daed9571e94271b | 412 | py | Python | products/urls.py | Code-Institute-Submissions/danielboots-fytletic | 67c3000a4b681d7f76255ab11db841a7f2ba613e | [
"OLDAP-2.3"
] | 1 | 2021-03-31T18:54:25.000Z | 2021-03-31T18:54:25.000Z | products/urls.py | Code-Institute-Submissions/danielboots-fytletic | 67c3000a4b681d7f76255ab11db841a7f2ba613e | [
"OLDAP-2.3"
] | null | null | null | products/urls.py | Code-Institute-Submissions/danielboots-fytletic | 67c3000a4b681d7f76255ab11db841a7f2ba613e | [
"OLDAP-2.3"
] | 1 | 2021-03-31T11:00:11.000Z | 2021-03-31T11:00:11.000Z | from django.urls import path
from . import views
urlpatterns = [
path("", views.all_products, name="products"),
path("<int:product_id>/", views.product_detail, name="product_detail"),
path("add/", views.add_product, name="add_product"),
path("edit/<int:product_id>/", views.edit_product, name="edit_product"),
path("delete/<int:product_id>/", views.delete_product, name="delete_product"),
]
| 37.454545 | 82 | 0.701456 |
1298f143205819bcdf2d90e6a3f5cf7cefcc26eb | 4,253 | py | Python | exblox/trainer.py | lokijuhy/exblox | 86dd358094381ac7c91d935a9c6d4c8d2380b291 | [
"MIT"
] | null | null | null | exblox/trainer.py | lokijuhy/exblox | 86dd358094381ac7c91d935a9c6d4c8d2380b291 | [
"MIT"
] | null | null | null | exblox/trainer.py | lokijuhy/exblox | 86dd358094381ac7c91d935a9c6d4c8d2380b291 | [
"MIT"
] | null | null | null | from copy import deepcopy
from sklearn.preprocessing import LabelEncoder
from typing import Dict, Tuple
from .architecture import Architecture, ArchitectureInterface
from .ConfigurableComponent import ConfigurableComponent, ComponentInterface
from .tuner import Tuner, TunerInterface
from .predictor import Predictor
class Trainer(ConfigurableComponent):
def __init__(self, config: Dict, architecture: Architecture, tuner: Tuner = None):
if not isinstance(config, dict):
if isinstance(architecture, dict) or isinstance(tuner, dict):
raise ValueError('It looks like the order of the arguments to `Trainer` is swapped. Please use'
' `Trainer(config, architecture, tuner)`.')
raise ValueError('The first argument to Trainer must be a dictionary.')
super().__init__(config)
self.architecture = architecture
self.tuner = tuner
def fit(self, x, y) -> Tuple[Predictor, dict]:
"""
Train the `Architecture`, yielding a trained `Predictor` object and a `training_metadata` dictionary.
If `Trainer` contains a `Tuner`, use it to fit the `Architecture`. Otherwise, `fit` the `Architecture` directly.
Args:
x: The training set.
y: The labels for the training set.
Returns: a trained `Predictor` object and a `training_metadata` dictionary.
"""
training_metadata = {}
architecture = self.get_architecture()
if self.tuner:
trained_model, training_metadata = self.tuner.fit(architecture, x, y)
else:
trained_model = architecture.fit(x, y)
predictor = Predictor(trained_model)
return predictor, training_metadata
def get_architecture(self) -> Architecture:
"""
Instantiate a new copy of an untrained architecture.
Using this method rather than referencing `self.architecture` ensures that a single `Trainer` object can be
used to train and yield multiple architectures, without overwriting the origianl (by reference).
Returns: A new architecture object
"""
fresh_architecture = deepcopy(self.architecture)
return fresh_architecture
class SkorchTrainer(Trainer):
def fit(self, x, y) -> Tuple[Predictor, Dict]:
training_metadata = {}
architecture = self.get_architecture()
y = self.transform_y(y)
if self.tuner:
trained_model, training_metadata = self.tuner.fit(architecture, x, y)
else:
trained_model = architecture.fit(x, y)
predictor = Predictor(trained_model)
return predictor, training_metadata
@staticmethod
def transform_y(y):
y = LabelEncoder().fit_transform(y)
y = y.astype('float32')
y = y.reshape((len(y), 1))
return y
class TrainerInterface(ComponentInterface):
registered_flavors = {
'Trainer': Trainer,
'SkorchTrainer': SkorchTrainer,
}
@classmethod
def deserialize(cls, d: Dict[str, Dict]) -> Trainer:
"""
Instantiate a Trainer from dictionary containing keys ['Trainer', 'Architecture', 'Tuner'].
Args:
d: A dictionary with keys ['Trainer', 'Architecture', 'Tuner'], each containing configuration dictionaries.
The configuration dictionaries contain the key 'flavor' describing the class name of the component to be
instantiated, and key 'config' containing the object's config dictionary. The configuration dictionaries
may also contain other keys, which must be added to the object by the subclass-ed deserialize method.
Returns:
A deserialized Trainer object.
"""
trainer_config = d['Trainer']
trainer_config = cls.validate_serialization_config(trainer_config)
flavor_cls = cls.select_flavor(trainer_config['flavor'])
architecture = ArchitectureInterface.deserialize(d['Architecture'])
tuner = TunerInterface.deserialize(d['Tuner']) if 'Tuner' in d else None
flavor_instance = flavor_cls(config=trainer_config['config'], architecture=architecture, tuner=tuner)
return flavor_instance
| 38.663636 | 120 | 0.667294 |
67e5120a54b79a83a4ea125ee487839b5740af55 | 38,284 | py | Python | python/paddle/tensor/random.py | zhenlin-work/Paddle | ed7a21dea0ddcffb6f7f33ce21c5c368f5c7866b | [
"Apache-2.0"
] | 4 | 2021-02-08T13:07:15.000Z | 2021-10-22T00:58:33.000Z | python/paddle/tensor/random.py | zhenlin-work/Paddle | ed7a21dea0ddcffb6f7f33ce21c5c368f5c7866b | [
"Apache-2.0"
] | 2 | 2019-07-26T04:06:05.000Z | 2019-07-29T04:25:24.000Z | python/paddle/tensor/random.py | zhenlin-work/Paddle | ed7a21dea0ddcffb6f7f33ce21c5c368f5c7866b | [
"Apache-2.0"
] | 5 | 2021-12-10T11:20:06.000Z | 2022-02-18T05:18:12.000Z | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: define random functions
from ..fluid import core
from ..fluid.framework import in_dygraph_mode, Variable, convert_np_dtype_to_dtype_, dygraph_only
from ..fluid.layer_helper import LayerHelper
from ..fluid.data_feeder import check_variable_and_dtype, check_type, check_dtype, check_shape
from ..fluid.layers import utils
import paddle
from paddle import _C_ops
__all__ = []
def bernoulli(x, name=None):
"""
This OP returns a Tensor filled with random binary(0 or 1) number from a Bernoulli distribution.
The input ``x`` is a tensor with probabilities for generating the random binary number.
Each element in ``x`` should be in [0, 1], and the out is generated by:
.. math::
out_i ~ Bernoulli (x_i)
Args:
x(Tensor): A tensor with probabilities for generating the random binary number. The data type
should be float32, float64.
name(str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
Returns:
Tensor: A Tensor filled with random binary number with the same shape and dtype as ``x``.
Examples:
.. code-block:: python
import paddle
paddle.set_device('cpu') # on CPU device
paddle.seed(100)
x = paddle.rand([2,3])
print(x)
# [[0.55355281, 0.20714243, 0.01162981],
# [0.51577556, 0.36369765, 0.26091650]]
out = paddle.bernoulli(x)
print(out)
# [[1., 0., 1.],
# [0., 1., 0.]]
"""
if in_dygraph_mode():
return _C_ops.bernoulli(x)
check_variable_and_dtype(x, "x", ["float32", "float64"], "bernoulli")
helper = LayerHelper("randint", **locals())
out = helper.create_variable_for_type_inference(
dtype=x.dtype) # maybe set out to int32 ?
helper.append_op(
type='bernoulli', inputs={"X": x}, outputs={'Out': out}, attrs={})
out.stop_gradient = True
return out
def multinomial(x, num_samples=1, replacement=False, name=None):
"""
This OP returns a Tensor filled with random values sampled from a Multinomical
distribution. The input ``x`` is a tensor with probabilities for generating the
random number. Each element in ``x`` should be larger or equal to 0, but not all
0. ``replacement`` indicates whether it is a replaceable sample. If ``replacement``
is True, a category can be sampled more than once.
Args:
x(Tensor): A tensor with probabilities for generating the random number. The data type
should be float32, float64.
num_samples(int, optional): Number of samples, default is 1.
replacement(bool, optional): Whether it is a replaceable sample, default is False.
name(str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
Returns:
Tensor: A Tensor filled with sampled category index after ``num_samples`` times samples.
Examples:
.. code-block:: python
import paddle
paddle.seed(100) # on CPU device
x = paddle.rand([2,4])
print(x)
# [[0.5535528 0.20714243 0.01162981 0.51577556]
# [0.36369765 0.2609165 0.18905126 0.5621971 ]]
paddle.seed(200) # on CPU device
out1 = paddle.multinomial(x, num_samples=5, replacement=True)
print(out1)
# [[3 3 0 0 0]
# [3 3 3 1 0]]
# out2 = paddle.multinomial(x, num_samples=5)
# InvalidArgumentError: When replacement is False, number of samples
# should be less than non-zero categories
paddle.seed(300) # on CPU device
out3 = paddle.multinomial(x, num_samples=3)
print(out3)
# [[3 0 1]
# [3 1 0]]
"""
assert core.is_compiled_with_rocm() == False, (
"multinomial op is not supported on ROCM yet.")
if in_dygraph_mode():
return _C_ops.multinomial(x, 'num_samples', num_samples, 'replacement',
replacement)
check_variable_and_dtype(x, "x", ["float32", "float64"], "multinomial")
helper = LayerHelper("multinomial", **locals())
out = helper.create_variable_for_type_inference(
dtype=convert_np_dtype_to_dtype_('int64'))
helper.append_op(
type='multinomial',
inputs={"X": x},
outputs={'Out': out},
attrs={'num_samples': num_samples,
'replacement': replacement})
out.stop_gradient = True
return out
def gaussian(shape, mean=0.0, std=1.0, dtype=None, name=None):
"""
This OP returns a Tensor filled with random values sampled from a Gaussian
distribution, with ``shape`` and ``dtype``.
Args:
shape (list|tuple|Tensor): The shape of the output Tensor. If ``shape``
is a list or tuple, the elements of it should be integers or Tensors
(with the shape [1], and the data type int32 or int64). If ``shape``
is a Tensor, it should be a 1-D Tensor(with the data type int32 or
int64).
mean (float|int, optional): Mean of the output tensor, default is 0.0.
std (float|int, optional): Standard deviation of the output tensor, default
is 1.0.
seed (int, optional): Random seed of generator.
dtype (str|np.dtype, optional): The data type of the output Tensor.
Supported data types: float32, float64.
Default is None, use global default dtype (see ``get_default_dtype``
for details).
name (str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
Returns:
Tensor: A Tensor filled with random values sampled from a Gaussian
distribution, with ``shape`` and ``dtype``.
"""
op_type_for_check = 'gaussian/standard_normal/randn/normal'
seed = 0
if dtype is None:
dtype = paddle.framework.get_default_dtype()
if dtype not in ['float32', 'float64']:
raise TypeError(
"{} only supports [float32, float64], but the default dtype is {}"
.format(op_type_for_check, dtype))
if not isinstance(dtype, core.VarDesc.VarType):
dtype = convert_np_dtype_to_dtype_(dtype)
if in_dygraph_mode():
shape = utils.convert_shape_to_list(shape)
return _C_ops.gaussian_random('shape', shape, 'mean',
float(mean), 'std',
float(std), 'seed', seed, 'dtype', dtype)
check_shape(shape, op_type_for_check)
check_dtype(dtype, 'dtype', ['float32', 'float64'], op_type_for_check)
inputs = {}
attrs = {
'mean': mean,
'std': std,
'seed': seed,
'dtype': dtype,
'use_mkldnn': False
}
utils.get_shape_tensor_inputs(
inputs=inputs, attrs=attrs, shape=shape, op_type=op_type_for_check)
helper = LayerHelper('gaussian', **locals())
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='gaussian_random',
inputs=inputs,
outputs={'Out': out},
attrs=attrs)
out.stop_gradient = True
return out
def standard_normal(shape, dtype=None, name=None):
"""
This OP returns a Tensor filled with random values sampled from a standard
normal distribution with mean 0 and standard deviation 1, with ``shape``
and ``dtype``.
Args:
shape (list|tuple|Tensor): The shape of the output Tensor. If ``shape``
is a list or tuple, the elements of it should be integers or Tensors
(with the shape [1], and the data type int32 or int64). If ``shape``
is a Tensor, it should be a 1-D Tensor(with the data type int32 or
int64).
dtype (str|np.dtype, optional): The data type of the output Tensor.
Supported data types: float32, float64.
Default is None, use global default dtype (see ``get_default_dtype``
for details).
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor: A Tensor filled with random values sampled from a standard
normal distribution with mean 0 and standard deviation 1, with
``shape`` and ``dtype``.
Examples:
.. code-block:: python
import paddle
# example 1: attr shape is a list which doesn't contain Tensor.
out1 = paddle.standard_normal(shape=[2, 3])
# [[-2.923464 , 0.11934398, -0.51249987], # random
# [ 0.39632758, 0.08177969, 0.2692008 ]] # random
# example 2: attr shape is a list which contains Tensor.
dim1 = paddle.to_tensor([2], 'int64')
dim2 = paddle.to_tensor([3], 'int32')
out2 = paddle.standard_normal(shape=[dim1, dim2, 2])
# [[[-2.8852394 , -0.25898588], # random
# [-0.47420555, 0.17683524], # random
# [-0.7989969 , 0.00754541]], # random
# [[ 0.85201347, 0.32320443], # random
# [ 1.1399018 , 0.48336947], # random
# [ 0.8086993 , 0.6868893 ]]] # random
# example 3: attr shape is a Tensor, the data type must be int64 or int32.
shape_tensor = paddle.to_tensor([2, 3])
out3 = paddle.standard_normal(shape_tensor)
# [[-2.878077 , 0.17099959, 0.05111201] # random
# [-0.3761474, -1.044801 , 1.1870178 ]] # random
"""
return gaussian(shape=shape, mean=0.0, std=1.0, dtype=dtype, name=name)
def randn(shape, dtype=None, name=None):
"""
This OP returns a Tensor filled with random values sampled from a standard
normal distribution with mean 0 and standard deviation 1, with ``shape``
and ``dtype``.
Args:
shape (list|tuple|Tensor): The shape of the output Tensor. If ``shape``
is a list or tuple, the elements of it should be integers or Tensors
(with the shape [1], and the data type int32 or int64). If ``shape``
is a Tensor, it should be a 1-D Tensor(with the data type int32 or
int64).
dtype (str|np.dtype, optional): The data type of the output Tensor.
Supported data types: float32, float64.
Default is None, use global default dtype (see ``get_default_dtype``
for details).
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor: A Tensor filled with random values sampled from a standard
normal distribution with mean 0 and standard deviation 1, with
``shape`` and ``dtype``.
Examples:
.. code-block:: python
import paddle
# example 1: attr shape is a list which doesn't contain Tensor.
out1 = paddle.randn(shape=[2, 3])
# [[-2.923464 , 0.11934398, -0.51249987], # random
# [ 0.39632758, 0.08177969, 0.2692008 ]] # random
# example 2: attr shape is a list which contains Tensor.
dim1 = paddle.to_tensor([2], 'int64')
dim2 = paddle.to_tensor([3], 'int32')
out2 = paddle.randn(shape=[dim1, dim2, 2])
# [[[-2.8852394 , -0.25898588], # random
# [-0.47420555, 0.17683524], # random
# [-0.7989969 , 0.00754541]], # random
# [[ 0.85201347, 0.32320443], # random
# [ 1.1399018 , 0.48336947], # random
# [ 0.8086993 , 0.6868893 ]]] # random
# example 3: attr shape is a Tensor, the data type must be int64 or int32.
shape_tensor = paddle.to_tensor([2, 3])
out3 = paddle.randn(shape_tensor)
# [[-2.878077 , 0.17099959, 0.05111201] # random
# [-0.3761474, -1.044801 , 1.1870178 ]] # random
"""
return standard_normal(shape, dtype, name)
def normal(mean=0.0, std=1.0, shape=None, name=None):
"""
This OP returns a Tensor filled with random values sampled from a normal
distribution with ``mean`` and ``std`` (standard deviation) .
If ``mean`` is a Tensor, the output Tensor has the same shape and data type as ``mean``.
If ``mean`` is not a Tensor and ``std`` is a Tensor, the output Tensor has the same shape and data type as ``std``.
If ``mean`` and ``std`` are not a Tensor, the output Tensor has the same shape as ``shape``, with data type float32.
If ``mean`` and ``std`` are Tensor, the num of elements of ``mean`` and ``std`` should be the same.
Args:
mean (float|Tensor, optional): The mean of the output Tensor's normal distribution.
If ``mean`` is float, all elements of the output Tensor shared the same mean.
If ``mean`` is a Tensor(data type supports float32, float64), it has per-element means.
Default is 0.0
std (float|Tensor, optional): The standard deviation of the output Tensor's normal distribution.
If ``std`` is float, all elements of the output Tensor shared the same standard deviation.
If ``std`` is a Tensor(data type supports float32, float64), it has per-element standard deviations.
Defaule is 1.0
shape (list|tuple|Tensor, optional): The shape of the output Tensor. If ``shape``
is a list or tuple, the elements of it should be integers or Tensors
(with the shape [1], and the data type int32 or int64). If ``shape``
is a Tensor, it should be a 1-D Tensor(with the data type int32 or
int64). If ``mean`` or ``std`` is a Tensor, the shape of the output
Tensor is the same as ``mean`` or ``std`` , attr ``shape`` is ignored.
Default is None
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Tensor filled with random values sampled from a normal distribution with ``mean`` and ``std`` .
Examples:
.. code-block:: python
import paddle
out1 = paddle.normal(shape=[2, 3])
# [[ 0.17501129 0.32364586 1.561118 ] # random
# [-1.7232178 1.1545963 -0.76156676]] # random
mean_tensor = paddle.to_tensor([1.0, 2.0, 3.0])
out2 = paddle.normal(mean=mean_tensor)
# [ 0.18644847 -1.19434458 3.93694787] # random
std_tensor = paddle.to_tensor([1.0, 2.0, 3.0])
out3 = paddle.normal(mean=mean_tensor, std=std_tensor)
# [1.00780561 3.78457445 5.81058198] # random
"""
if not in_dygraph_mode():
check_type(mean, 'mean', (int, float, Variable), 'normal')
check_type(std, 'std', (int, float, Variable), 'normal')
if isinstance(mean, Variable):
check_dtype(
mean.dtype, 'mean', ['float32', 'float64'], 'normal',
"If mean is Tensor, it's data type only support float32, float64."
)
if isinstance(std, Variable):
check_dtype(
std.dtype, 'std', ['float32', 'float64'], 'normal',
"If std is Tensor, it's data type only support float32, float64."
)
if shape is not None:
check_shape(shape, 'normal')
if isinstance(mean, Variable):
if isinstance(std, Variable):
if std.dtype != mean.dtype:
std = paddle.cast(std, mean.dtype)
mean_shape = paddle.shape(mean)
std = paddle.reshape(std, mean_shape)
else:
std = float(std)
out = standard_normal(paddle.shape(mean), mean.dtype, name)
elif isinstance(std, Variable):
mean = float(mean)
out = standard_normal(paddle.shape(std), std.dtype, name)
else:
return gaussian(shape=shape, mean=mean, std=std, name=name)
out = out * std + mean
if not in_dygraph_mode():
out.stop_grediant = True
return out
def uniform(shape, dtype=None, min=-1.0, max=1.0, seed=0, name=None):
"""
This OP returns a Tensor filled with random values sampled from a uniform
distribution in the range [``min``, ``max``), with ``shape`` and ``dtype``.
Examples:
.. code-block:: text
Input:
shape = [1, 2]
Output:
result=[[0.8505902, 0.8397286]]
Args:
shape(list|tuple|Tensor): The shape of the output Tensor. If ``shape``
is a list or tuple, the elements of it should be integers or Tensors
(with the shape [1], and the data type int32 or int64). If ``shape``
is a Tensor, it should be a 1-D Tensor(with the data type int32 or
int64).
dtype(str|np.dtype, optional): The data type of the output Tensor.
Supported data types: float32, float64.
Default is None, use global default dtype (see ``get_default_dtype``
for details).
min(float|int, optional): The lower bound on the range of random values
to generate, ``min`` is included in the range. Default is -1.0.
max(float|int, optional): The upper bound on the range of random values
to generate, ``max`` is excluded in the range. Default is 1.0.
seed(int, optional): Random seed used for generating samples. If seed is 0,
it will use the seed of the global default generator (which can be set by paddle.seed).
Note that if seed is not 0, this operator will always generate the same random numbers every
time. Default is 0.
name(str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
Returns:
Tensor: A Tensor filled with random values sampled from a uniform
distribution in the range [``min``, ``max``), with ``shape`` and ``dtype``.
Raises:
TypeError: If ``shape`` is not list, tuple, Tensor.
TypeError: If ``dtype`` is not float32, float64.
Examples:
.. code-block:: python
import paddle
# example 1:
# attr shape is a list which doesn't contain Tensor.
out1 = paddle.uniform(shape=[3, 4])
# [[ 0.84524226, 0.6921872, 0.56528175, 0.71690357], # random
# [-0.34646994, -0.45116323, -0.09902662, -0.11397249], # random
# [ 0.433519, 0.39483607, -0.8660099, 0.83664286]] # random
# example 2:
# attr shape is a list which contains Tensor.
dim1 = paddle.to_tensor([2], 'int64')
dim2 = paddle.to_tensor([3], 'int32')
out2 = paddle.uniform(shape=[dim1, dim2])
# [[-0.9951253, 0.30757582, 0.9899647 ], # random
# [ 0.5864527, 0.6607096, -0.8886161]] # random
# example 3:
# attr shape is a Tensor, the data type must be int64 or int32.
shape_tensor = paddle.to_tensor([2, 3])
out3 = paddle.uniform(shape_tensor)
# [[-0.8517412, -0.4006908, 0.2551912 ], # random
# [ 0.3364414, 0.36278176, -0.16085452]] # random
"""
if dtype is None:
dtype = paddle.framework.get_default_dtype()
if dtype not in ['float32', 'float64']:
raise TypeError(
"uniform/rand only supports [float32, float64], but the default dtype is {}".
format(dtype))
if not isinstance(dtype, core.VarDesc.VarType):
dtype = convert_np_dtype_to_dtype_(dtype)
if in_dygraph_mode():
shape = utils.convert_shape_to_list(shape)
return _C_ops.uniform_random('shape', shape, 'min',
float(min), 'max',
float(max), 'seed', seed, 'dtype', dtype)
check_type(shape, 'shape', (list, tuple, Variable), 'uniform/rand')
check_dtype(dtype, 'dtype', ('float32', 'float64'), 'uniform/rand')
inputs = dict()
attrs = {'seed': seed, 'min': min, 'max': max, 'dtype': dtype}
utils.get_shape_tensor_inputs(
inputs=inputs, attrs=attrs, shape=shape, op_type='uniform/rand')
helper = LayerHelper("uniform", **locals())
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="uniform_random", inputs=inputs, attrs=attrs,
outputs={"Out": out})
out.stop_gradient = True
return out
@dygraph_only
def uniform_(x, min=-1.0, max=1.0, seed=0, name=None):
"""
This is the inplace version of OP ``uniform``, which returns a Tensor filled
with random values sampled from a uniform distribution. The output Tensor will
be inplaced with input ``x``. Please refer to :ref:`api_tensor_uniform`.
Args:
x(Tensor): The input tensor to be filled with random values.
min(float|int, optional): The lower bound on the range of random values
to generate, ``min`` is included in the range. Default is -1.0.
max(float|int, optional): The upper bound on the range of random values
to generate, ``max`` is excluded in the range. Default is 1.0.
seed(int, optional): Random seed used for generating samples. If seed is 0,
it will use the seed of the global default generator (which can be set by paddle.seed).
Note that if seed is not 0, this operator will always generate the same random numbers every
time. Default is 0.
name(str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
Returns:
Tensor: The input tensor x filled with random values sampled from a uniform
distribution in the range [``min``, ``max``).
Examples:
.. code-block:: python
import paddle
# example:
x = paddle.ones(shape=[3, 4])
x.uniform_()
print(x)
# [[ 0.84524226, 0.6921872, 0.56528175, 0.71690357], # random
# [-0.34646994, -0.45116323, -0.09902662, -0.11397249], # random
# [ 0.433519, 0.39483607, -0.8660099, 0.83664286]] # random
"""
return core.ops.uniform_random_inplace_(x, 'min', min, 'max', max, 'seed',
seed)
def randint(low=0, high=None, shape=[1], dtype=None, name=None):
"""
This OP returns a Tensor filled with random integers from a discrete uniform
distribution in the range [``low``, ``high``), with ``shape`` and ``dtype``.
If ``high`` is None (the default), the range is [0, ``low``).
Args:
low (int): The lower bound on the range of random values to generate.
The ``low`` is included in the range. If ``high`` is None, the
range is [0, ``low``). Default is 0.
high (int, optional): The upper bound on the range of random values to
generate, the ``high`` is excluded in the range. Default is None
(see above for behavior if high = None). Default is None.
shape (list|tuple|Tensor): The shape of the output Tensor. If ``shape``
is a list or tuple, the elements of it should be integers or Tensors
(with the shape [1], and the data type int32 or int64). If ``shape``
is a Tensor, it should be a 1-D Tensor(with the data type int32 or
int64). Default is [1].
dtype (str|np.dtype, optional): The data type of the
output tensor. Supported data types: int32, int64. If ``dytpe``
is None, the data type is int64. Default is None.
name (str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
Returns:
Tensor: A Tensor filled with random integers from a discrete uniform
distribution in the range [``low``, ``high``), with ``shape`` and ``dtype``.
Examples:
.. code-block:: python
import paddle
# example 1:
# attr shape is a list which doesn't contain Tensor.
out1 = paddle.randint(low=-5, high=5, shape=[3])
# [0, -3, 2] # random
# example 2:
# attr shape is a list which contains Tensor.
dim1 = paddle.to_tensor([2], 'int64')
dim2 = paddle.to_tensor([3], 'int32')
out2 = paddle.randint(low=-5, high=5, shape=[dim1, dim2])
# [[0, -1, -3], # random
# [4, -2, 0]] # random
# example 3:
# attr shape is a Tensor
shape_tensor = paddle.to_tensor(3)
out3 = paddle.randint(low=-5, high=5, shape=shape_tensor)
# [-2, 2, 3] # random
# example 4:
# data type is int32
out4 = paddle.randint(low=-5, high=5, shape=[3], dtype='int32')
# [-5, 4, -4] # random
# example 5:
# Input only one parameter
# low=0, high=10, shape=[1], dtype='int64'
out5 = paddle.randint(10)
# [7] # random
"""
if high is None:
if low <= 0:
raise ValueError(
"If high is None, low must be greater than 0, but received low = {0}.".
format(low))
high = low
low = 0
if dtype is None:
dtype = 'int64'
if not isinstance(dtype, core.VarDesc.VarType):
dtype = convert_np_dtype_to_dtype_(dtype)
if in_dygraph_mode():
shape = utils.convert_shape_to_list(shape)
return _C_ops.randint('shape', shape, 'low', low, 'high', high, 'seed',
0, 'dtype', dtype)
check_shape(shape, 'randint')
check_dtype(dtype, 'dtype', ['int32', 'int64'], 'randint')
if low >= high:
raise ValueError(
"randint's low must less then high, but received low = {0}, "
"high = {1}".format(low, high))
inputs = dict()
attrs = {'low': low, 'high': high, 'seed': 0, 'dtype': dtype}
utils.get_shape_tensor_inputs(
inputs=inputs, attrs=attrs, shape=shape, op_type='randint')
helper = LayerHelper("randint", **locals())
out = helper.create_variable_for_type_inference(dtype=dtype)
helper.append_op(
type='randint', inputs=inputs, outputs={'Out': out}, attrs=attrs)
out.stop_gradient = True
return out
def randint_like(x, low=0, high=None, dtype=None, name=None):
"""
This OP returns a Tensor filled with random integers from a discrete uniform
distribution in the range [``low``, ``high``), with the same shape as ``x``.
(use ``dtype`` if ``dtype`` is not None)
If ``high`` is None (the default), the range is [0, ``low``).
Args:
x (Tensor): The input tensor which specifies shape. The dtype of ``x``
can be bool, int32, int64, float16, float32, float64.
low (int): The lower bound on the range of random values to generate.
The ``low`` is included in the range. If ``high`` is None, the
range is [0, ``low``). Default is 0.
high (int, optional): The upper bound on the range of random values to
generate, the ``high`` is excluded in the range. Default is None
(see above for behavior if high = None). Default is None.
dtype (str|np.dtype, optional): The data type of the
output tensor. Supported data types: bool, int32, int64, float16,
float32, float64. If ``dytpe`` is None, the data type is the
same as x's data type. Default is None.
name (str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
Returns:
Tensor: A Tensor filled with random integers from a discrete uniform
distribution in the range [``low``, ``high``), with ``shape`` and ``dtype``.
Examples:
.. code-block:: python
import paddle
# example 1:
# dtype is None and the dtype of x is float16
x = paddle.zeros((1,2)).astype("float16")
out1 = paddle.randint_like(x, low=-5, high=5)
print(out1)
print(out1.dtype)
# [[0, -3]] # random
# paddle.float16
# example 2:
# dtype is None and the dtype of x is float32
x = paddle.zeros((1,2)).astype("float32")
out2 = paddle.randint_like(x, low=-5, high=5)
print(out2)
print(out2.dtype)
# [[0, -3]] # random
# paddle.float32
# example 3:
# dtype is None and the dtype of x is float64
x = paddle.zeros((1,2)).astype("float64")
out3 = paddle.randint_like(x, low=-5, high=5)
print(out3)
print(out3.dtype)
# [[0, -3]] # random
# paddle.float64
# example 4:
# dtype is None and the dtype of x is int32
x = paddle.zeros((1,2)).astype("int32")
out4 = paddle.randint_like(x, low=-5, high=5)
print(out4)
print(out4.dtype)
# [[0, -3]] # random
# paddle.int32
# example 5:
# dtype is None and the dtype of x is int64
x = paddle.zeros((1,2)).astype("int64")
out5 = paddle.randint_like(x, low=-5, high=5)
print(out5)
print(out5.dtype)
# [[0, -3]] # random
# paddle.int64
# example 6:
# dtype is float64 and the dtype of x is float32
x = paddle.zeros((1,2)).astype("float32")
out6 = paddle.randint_like(x, low=-5, high=5, dtype="float64")
print(out6)
print(out6.dtype)
# [[0, -1]] # random
# paddle.float64
# example 7:
# dtype is bool and the dtype of x is float32
x = paddle.zeros((1,2)).astype("float32")
out7 = paddle.randint_like(x, low=-5, high=5, dtype="bool")
print(out7)
print(out7.dtype)
# [[0, -1]] # random
# paddle.bool
# example 8:
# dtype is int32 and the dtype of x is float32
x = paddle.zeros((1,2)).astype("float32")
out8 = paddle.randint_like(x, low=-5, high=5, dtype="int32")
print(out8)
print(out8.dtype)
# [[0, -1]] # random
# paddle.int32
# example 9:
# dtype is int64 and the dtype of x is float32
x = paddle.zeros((1,2)).astype("float32")
out9 = paddle.randint_like(x, low=-5, high=5, dtype="int64")
print(out9)
print(out9.dtype)
# [[0, -1]] # random
# paddle.int64
# example 10:
# dtype is int64 and the dtype of x is bool
x = paddle.zeros((1,2)).astype("bool")
out10 = paddle.randint_like(x, low=-5, high=5, dtype="int64")
print(out10)
print(out10.dtype)
# [[0, -1]] # random
# paddle.int64
"""
if high is None:
if low <= 0:
raise ValueError(
"If high is None, low must be greater than 0, but received low = {0}.".
format(low))
high = low
low = 0
if dtype is None:
dtype = x.dtype
if not isinstance(dtype, core.VarDesc.VarType):
dtype = convert_np_dtype_to_dtype_(dtype)
shape = x.shape
if low >= high:
raise ValueError(
"randint_like's low must less then high, but received low = {0}, "
"high = {1}".format(low, high))
if in_dygraph_mode():
shape = utils.convert_shape_to_list(shape)
out = _C_ops.randint('shape', shape, 'low', low, 'high', high, 'seed',
0, 'dtype', core.VarDesc.VarType.INT64)
out = paddle.cast(out, dtype)
return out
check_shape(shape, 'randint_like')
check_dtype(dtype, 'dtype',
['bool', 'float16', 'float32', 'float64', 'int32',
'int64'], 'randint_like')
inputs = dict()
attrs = {
'low': low,
'high': high,
'seed': 0,
'dtype': core.VarDesc.VarType.INT64
}
utils.get_shape_tensor_inputs(
inputs=inputs, attrs=attrs, shape=shape, op_type='randint_like')
helper = LayerHelper("randint", **locals())
out = helper.create_variable_for_type_inference(
dtype=core.VarDesc.VarType.INT64)
helper.append_op(
type='randint', inputs=inputs, outputs={'Out': out}, attrs=attrs)
out.stop_gradient = True
out = paddle.cast(out, dtype)
return out
def randperm(n, dtype="int64", name=None):
"""
This OP returns a 1-D Tensor filled with random permutation values from 0
to n-1, with ``dtype``.
Args:
n (int): The upper bound (exclusive), and it should be greater than 0.
dtype (str|np.dtype, optional): The data type of
the output Tensor. Supported data types: int32, int64, float32,
float64. Default is int64.
name (str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
Returns:
Tensor: A 1-D Tensor filled with random permutation values from 0
to n-1, with ``dtype``.
Examples:
.. code-block:: python
import paddle
out1 = paddle.randperm(5)
# [4, 1, 2, 3, 0] # random
out2 = paddle.randperm(7, 'int32')
# [1, 6, 2, 0, 4, 3, 5] # random
"""
if not isinstance(dtype, core.VarDesc.VarType):
dtype = convert_np_dtype_to_dtype_(dtype)
if in_dygraph_mode():
return _C_ops.randperm('n', n, 'seed', 0, 'dtype', dtype)
if n < 1:
raise ValueError("The input n should be greater than 0 in randperm op.")
check_dtype(dtype, 'dtype', ['int64', 'int32', 'float32', 'float64'],
'randperm')
helper = LayerHelper("randperm", **locals())
out = helper.create_variable_for_type_inference(dtype)
attrs = {'n': n, 'dtype': dtype, 'seed': 0}
helper.append_op(
type='randperm', inputs={}, outputs={'Out': out}, attrs=attrs)
out.stop_gradient = True
return out
def rand(shape, dtype=None, name=None):
"""
This OP returns a Tensor filled with random values sampled from a uniform
distribution in the range [0, 1), with ``shape`` and ``dtype``.
Args:
shape (list|tuple|Tensor): The shape of the output Tensor. If ``shape``
is a list or tuple, the elements of it should be integers or Tensors
(with the shape [1], and the data type int32 or int64). If ``shape``
is a Tensor, it should be a 1-D Tensor(with the data type int32 or
int64).
dtype (str|np.dtype, optional): The data type of the output Tensor.
Supported data types: float32, float64.
Default is None, use global default dtype (see ``get_default_dtype``
for details).
name (str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
Returns:
Tensor: A Tensor filled with random values sampled from a uniform
distribution in the range [0, 1), with ``shape`` and ``dtype``.
Examples:
.. code-block:: python
import paddle
# example 1: attr shape is a list which doesn't contain Tensor.
out1 = paddle.rand(shape=[2, 3])
# [[0.451152 , 0.55825245, 0.403311 ], # random
# [0.22550228, 0.22106001, 0.7877319 ]] # random
# example 2: attr shape is a list which contains Tensor.
dim1 = paddle.to_tensor([2], 'int64')
dim2 = paddle.to_tensor([3], 'int32')
out2 = paddle.rand(shape=[dim1, dim2, 2])
# [[[0.8879919 , 0.25788337], # random
# [0.28826773, 0.9712097 ], # random
# [0.26438272, 0.01796806]], # random
# [[0.33633623, 0.28654453], # random
# [0.79109055, 0.7305809 ], # random
# [0.870881 , 0.2984597 ]]] # random
# example 3: attr shape is a Tensor, the data type must be int64 or int32.
shape_tensor = paddle.to_tensor([2, 3])
out3 = paddle.rand(shape_tensor)
# [[0.22920267, 0.841956 , 0.05981819], # random
# [0.4836288 , 0.24573246, 0.7516129 ]] # random
"""
return uniform(shape, dtype, min=0.0, max=1.0, name=name)
| 40.72766 | 120 | 0.581522 |
4e282545478177cd7a079d2b658091c2e3a5328c | 5,121 | py | Python | Resources/app3.py | BankeUCI/SQLAlchemy-Challenge | ed961511bb605c400244c3af5d9253d478e0680a | [
"ADSL"
] | null | null | null | Resources/app3.py | BankeUCI/SQLAlchemy-Challenge | ed961511bb605c400244c3af5d9253d478e0680a | [
"ADSL"
] | null | null | null | Resources/app3.py | BankeUCI/SQLAlchemy-Challenge | ed961511bb605c400244c3af5d9253d478e0680a | [
"ADSL"
] | null | null | null | from time import strftime
from flask import Flask, jsonify
import sqlite3
import sqlalchemy
from sqlalchemy import *
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, inspect, func
import pandas as pd
import numpy as np
import datetime as dt
#######################################################
# Database Setup
#######################################################
engine = create_engine("sqlite:///hawaii.sqlite")
# Reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# Classes automap found
#Base.classes.keys()
metadata = MetaData()
# Save reference to table
Measurement = Base.classes.measurement
Station = Base.classes.station
# Create our session (link) from python to the DB
session = Session(engine)
#############################################################
# Flask Setup
#############################################################
app = Flask(__name__)
################################
# Flask Routes
################################
@app.route("/")
def Home():
return(
f"Available Routes:<br/>"
f"/api/v1.0/precipitation<br/>"
f"/api/v1.0/stations<br/>"
f"/api/v1.0/tobs<br/>"
f"/api/v1.0/<start><br/>"
f"/api/v1.0/<start>/<end>"
)
# Calculating date range for previous year date and last date in database
def date_calc():
latest_date = session.query(func.max(Measurement.date)).all()
# One year date range
today = dt.date.today()
# format date
latest_date_fmt = today.replace(year=int(latest_date[0][0][:4]),\
month=int(latest_date[0][0][5:7]),\
day=int(latest_date[0][0][8:]))
# One year ago from latest_date
year_ago = latest_date_fmt-dt.timedelta(days=365)
year_end = latest_date_fmt-strftime("%Y-%m-%d")
startdate_year = year_ago.strftime("%Y-%m-%d")
year_list = [startdate_year, year_end]
return(tuple(year_list))
@app.route("/api/v1.0/precipitation")
def precipitation():
# Start date and end date of previous year using date_calc function
range = date_calc()
end_date = range[1]
start_date = range[0]
# Converting query results to a dictionary using `date` as the key and `prcp` as the value
results = session.query(Measurement.date, Measurement.station, Measurement.prcp).\
filter(Measurement.date <= end_date).\
filter(Measurement.date >= start_date).all()
# Return the JSON representation of your dictionary.
list = []
for result in results:
dict = {"Date":result[0],"Station":result[1],"Precipitation":result[2]}
list.append(dict)
return jsonify(list)
# Returning a JSON list of stations from the dataset.
@app.route("/api/v1.0/stations")
def stations():
stations = session.query(Station.station, Station.name).all()
list=[]
for station in stations:
dict = {"Station ID:":stations[0], "Station Name": stations[1]}
list.append(dict)
return jsonify(list)
@app.route("/api/v1.0/tobs")
def tobs():
# Last year's most active station date and temperature query
range = date_calc()
end_date = range[1]
start_date = range[0]
tobs = session.query(Measurement.date, Measurement.tobs).\
filter(Measurement.date <= end_date).\
filter(Measurement.date >= start_date).all()
list = []
for temp in tobs:
dict = {"date": temp[0], "tobs": temp[1]}
list.append(dict)
#JSON list of temperature observations (TOBS) for the previous year.
return jsonify(list)
# JSON list of the minimum temperature, the average temperature, and the max temperature for a given start or start-end range
@app.route("/api/v1.0/<start>")
def tempstart(start):
##### When given the start only, calculate `TMIN`, `TAVG`, and `TMAX` for all dates greater than and equal to the start date
results = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
filter(Measurement.date >= start).order_by(Measurement.date.desc()).all()
print(f"MIN, MAX, AVG Temperatures")
for temps in results:
dict = {"Min Temp": results[0][0], "Avg Temp": results[0][1], "Max Temp": results[0][2]}
return jsonify(dict)
@app.route("/api/v1.0/<start>/<end>")
def tempstartend(start,end):
# Given the start and the end date, calculate the `TMIN`, `TAVG`, and `TMAX` for dates between the start and end date inclusive
results = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
filter(Measurement.date >= start, Measurement.date <= end).order_by(Measurement.date.desc()).all()
for temps in results:
dict = {"Mini Temp": results[0][0], "Avg Temp": results[0][1], "Max Temp": results[0][2]}
return jsonify(dict)
if __name__ == '__main__':
app.run(debug=True)
| 33.470588 | 131 | 0.614528 |
61f6a68c6974fef1d035a5278d1bd5dafeee12bb | 98 | py | Python | Functions advanced - Lab/Character combinations.py | DiyanKalaydzhiev23/Advanced---Python | ed2c60bb887c49e5a87624719633e2b8432f6f6b | [
"MIT"
] | null | null | null | Functions advanced - Lab/Character combinations.py | DiyanKalaydzhiev23/Advanced---Python | ed2c60bb887c49e5a87624719633e2b8432f6f6b | [
"MIT"
] | null | null | null | Functions advanced - Lab/Character combinations.py | DiyanKalaydzhiev23/Advanced---Python | ed2c60bb887c49e5a87624719633e2b8432f6f6b | [
"MIT"
] | null | null | null | from itertools import permutations
[print(''.join(el)) for el in list(permutations(input()))]
| 24.5 | 59 | 0.714286 |
34cd924d7c3e222ae3e964c6c497d33e7aff9b5c | 660 | py | Python | implementation/server/cms/migrations/0013_contentpage_image.py | Aincient/cleo | 933ef372fa7847d943206d72bfb03c201dbafbd6 | [
"Apache-2.0"
] | null | null | null | implementation/server/cms/migrations/0013_contentpage_image.py | Aincient/cleo | 933ef372fa7847d943206d72bfb03c201dbafbd6 | [
"Apache-2.0"
] | null | null | null | implementation/server/cms/migrations/0013_contentpage_image.py | Aincient/cleo | 933ef372fa7847d943206d72bfb03c201dbafbd6 | [
"Apache-2.0"
] | 3 | 2018-10-01T12:04:36.000Z | 2021-01-07T09:30:50.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.12 on 2018-06-29 14:21
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wagtailimages', '0020_add-verbose-name'),
('cms', '0012_auto_20180625_0816'),
]
operations = [
migrations.AddField(
model_name='contentpage',
name='image',
field=models.ForeignKey(blank=True, help_text='Header image', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image'),
),
]
| 28.695652 | 175 | 0.657576 |
2d38101b743df3d7ccfcfb00b64b6839e8bb30db | 780 | py | Python | posts/migrations/0003_comment_users_who_liked_post_users_who_liked.py | Eugust/mini_social | fc9ba094c75a82e952707e0e898f90fb8cbb654a | [
"MIT"
] | null | null | null | posts/migrations/0003_comment_users_who_liked_post_users_who_liked.py | Eugust/mini_social | fc9ba094c75a82e952707e0e898f90fb8cbb654a | [
"MIT"
] | null | null | null | posts/migrations/0003_comment_users_who_liked_post_users_who_liked.py | Eugust/mini_social | fc9ba094c75a82e952707e0e898f90fb8cbb654a | [
"MIT"
] | null | null | null | # Generated by Django 4.0.1 on 2022-01-21 02:13
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('posts', '0002_initial'),
]
operations = [
migrations.AddField(
model_name='comment',
name='users_who_liked',
field=models.ManyToManyField(blank=True, related_name='users_who_liked_comment', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='post',
name='users_who_liked',
field=models.ManyToManyField(blank=True, related_name='users_who_liked_post', to=settings.AUTH_USER_MODEL),
),
]
| 30 | 122 | 0.657692 |
297b704a60faf39c37015cda8c07d3c91181701e | 15,575 | py | Python | qa/rpc-tests/wallet.py | Xrunner1/luckycore-0.12.1.3 | f7fe1e74f533a384b14a667202779608389eeea5 | [
"MIT"
] | 3 | 2019-11-25T18:31:45.000Z | 2020-11-14T16:58:14.000Z | qa/rpc-tests/wallet.py | Xrunner1/luckycore-0.12.1.3 | f7fe1e74f533a384b14a667202779608389eeea5 | [
"MIT"
] | 1 | 2020-08-03T05:22:22.000Z | 2020-08-03T05:22:22.000Z | qa/rpc-tests/wallet.py | Xrunner1/luckycore-0.12.1.3 | f7fe1e74f533a384b14a667202779608389eeea5 | [
"MIT"
] | 4 | 2019-06-08T06:52:18.000Z | 2021-08-21T14:19:33.000Z | #!/usr/bin/env python2
# coding=utf-8
# ^^^^^^^^^^^^ TODO remove when supporting only Python3
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class WalletTest (BitcoinTestFramework):
def check_fee_amount(self, curr_balance, balance_with_fee, fee_per_byte, tx_size):
"""Return curr_balance after asserting the fee was in range"""
fee = balance_with_fee - curr_balance
target_fee = fee_per_byte * tx_size
if fee < target_fee:
raise AssertionError("Fee of %s LUCKY too low! (Should be %s LUCKY)"%(str(fee), str(target_fee)))
# allow the node's estimation to be at most 2 bytes off
if fee > fee_per_byte * (tx_size + 2):
raise AssertionError("Fee of %s LUCKY too high! (Should be %s LUCKY)"%(str(fee), str(target_fee)))
return curr_balance
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 4)
def setup_network(self, split=False):
self.nodes = start_nodes(3, self.options.tmpdir)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
self.is_network_split=False
self.sync_all()
def run_test (self):
# Check that there's no UTXO on none of the nodes
assert_equal(len(self.nodes[0].listunspent()), 0)
assert_equal(len(self.nodes[1].listunspent()), 0)
assert_equal(len(self.nodes[2].listunspent()), 0)
print "Mining blocks..."
self.nodes[0].generate(1)
walletinfo = self.nodes[0].getwalletinfo()
assert_equal(walletinfo['immature_balance'], 500)
assert_equal(walletinfo['balance'], 0)
self.sync_all()
self.nodes[1].generate(101)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 500)
assert_equal(self.nodes[1].getbalance(), 500)
assert_equal(self.nodes[2].getbalance(), 0)
# Check that only first and second nodes have UTXOs
assert_equal(len(self.nodes[0].listunspent()), 1)
assert_equal(len(self.nodes[1].listunspent()), 1)
assert_equal(len(self.nodes[2].listunspent()), 0)
# Send 210 LUCKY from 0 to 2 using sendtoaddress call.
# Second transaction will be child of first, and will require a fee
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 110)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 100)
walletinfo = self.nodes[0].getwalletinfo()
assert_equal(walletinfo['immature_balance'], 0)
# Have node0 mine a block, thus it will collect its own fee.
self.nodes[0].generate(1)
self.sync_all()
# Exercise locking of unspent outputs
unspent_0 = self.nodes[2].listunspent()[0]
unspent_0 = {"txid": unspent_0["txid"], "vout": unspent_0["vout"]}
self.nodes[2].lockunspent(False, [unspent_0])
assert_raises(JSONRPCException, self.nodes[2].sendtoaddress, self.nodes[2].getnewaddress(), 200)
assert_equal([unspent_0], self.nodes[2].listlockunspent())
self.nodes[2].lockunspent(True, [unspent_0])
assert_equal(len(self.nodes[2].listlockunspent()), 0)
# Have node1 generate 100 blocks (so node0 can recover the fee)
self.nodes[1].generate(100)
self.sync_all()
# node0 should end up with 1000 LUCKY in block rewards plus fees, but
# minus the 210 plus fees sent to node2
assert_equal(self.nodes[0].getbalance(), 1000-210)
assert_equal(self.nodes[2].getbalance(), 210)
# Node0 should have two unspent outputs.
# Create a couple of transactions to send them to node2, submit them through
# node1, and make sure both node0 and node2 pick them up properly:
node0utxos = self.nodes[0].listunspent(1)
assert_equal(len(node0utxos), 2)
# create both transactions
txns_to_send = []
for utxo in node0utxos:
inputs = []
outputs = {}
inputs.append({ "txid" : utxo["txid"], "vout" : utxo["vout"]})
outputs[self.nodes[2].getnewaddress("from1")] = utxo["amount"]
raw_tx = self.nodes[0].createrawtransaction(inputs, outputs)
txns_to_send.append(self.nodes[0].signrawtransaction(raw_tx))
# Have node 1 (miner) send the transactions
self.nodes[1].sendrawtransaction(txns_to_send[0]["hex"], True)
self.nodes[1].sendrawtransaction(txns_to_send[1]["hex"], True)
# Have node1 mine a block to confirm transactions:
self.nodes[1].generate(1)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 1000)
assert_equal(self.nodes[2].getbalance("from1"), 1000-210)
# Send 100 LUCKY normal
address = self.nodes[0].getnewaddress("test")
fee_per_byte = Decimal('0.001') / 1000
self.nodes[2].settxfee(fee_per_byte * 1000)
txid = self.nodes[2].sendtoaddress(address, 100, "", "", False)
self.nodes[2].generate(1)
self.sync_all()
node_2_bal = self.check_fee_amount(self.nodes[2].getbalance(), Decimal('900'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
assert_equal(self.nodes[0].getbalance(), Decimal('100'))
# Send 100 LUCKY with subtract fee from amount
txid = self.nodes[2].sendtoaddress(address, 100, "", "", True)
self.nodes[2].generate(1)
self.sync_all()
node_2_bal -= Decimal('100')
assert_equal(self.nodes[2].getbalance(), node_2_bal)
node_0_bal = self.check_fee_amount(self.nodes[0].getbalance(), Decimal('200'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
# Sendmany 100 LUCKY
txid = self.nodes[2].sendmany('from1', {address: 100}, 0, "", [])
self.nodes[2].generate(1)
self.sync_all()
node_0_bal += Decimal('100')
node_2_bal = self.check_fee_amount(self.nodes[2].getbalance(), node_2_bal - Decimal('100'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
assert_equal(self.nodes[0].getbalance(), node_0_bal)
# Sendmany 100 LUCKY with subtract fee from amount
txid = self.nodes[2].sendmany('from1', {address: 100}, 0, "", [address])
self.nodes[2].generate(1)
self.sync_all()
node_2_bal -= Decimal('100')
assert_equal(self.nodes[2].getbalance(), node_2_bal)
node_0_bal = self.check_fee_amount(self.nodes[0].getbalance(), node_0_bal + Decimal('100'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
# Test ResendWalletTransactions:
# Create a couple of transactions, then start up a fourth
# node (nodes[3]) and ask nodes[0] to rebroadcast.
# EXPECT: nodes[3] should have those transactions in its mempool.
txid1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1)
txid2 = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1)
sync_mempools(self.nodes)
self.nodes.append(start_node(3, self.options.tmpdir))
connect_nodes_bi(self.nodes, 0, 3)
sync_blocks(self.nodes)
relayed = self.nodes[0].resendwallettransactions()
assert_equal(set(relayed), {txid1, txid2})
sync_mempools(self.nodes)
assert(txid1 in self.nodes[3].getrawmempool())
# Exercise balance rpcs
assert_equal(self.nodes[0].getwalletinfo()["unconfirmed_balance"], 1)
assert_equal(self.nodes[0].getunconfirmedbalance(), 1)
#check if we can list zero value tx as available coins
#1. create rawtx
#2. hex-changed one output to 0.0
#3. sign and send
#4. check if recipient (node0) can list the zero value tx
usp = self.nodes[1].listunspent()
inputs = [{"txid":usp[0]['txid'], "vout":usp[0]['vout']}]
outputs = {self.nodes[1].getnewaddress(): 499.998, self.nodes[0].getnewaddress(): 11.11}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs).replace("c0833842", "00000000") #replace 11.11 with 0.0 (int32)
decRawTx = self.nodes[1].decoderawtransaction(rawTx)
signedRawTx = self.nodes[1].signrawtransaction(rawTx)
decRawTx = self.nodes[1].decoderawtransaction(signedRawTx['hex'])
zeroValueTxid= decRawTx['txid']
sendResp = self.nodes[1].sendrawtransaction(signedRawTx['hex'])
self.sync_all()
self.nodes[1].generate(1) #mine a block
self.sync_all()
unspentTxs = self.nodes[0].listunspent() #zero value tx must be in listunspents output
found = False
for uTx in unspentTxs:
if uTx['txid'] == zeroValueTxid:
found = True
assert_equal(uTx['amount'], Decimal('0'))
assert(found)
#do some -walletbroadcast tests
stop_nodes(self.nodes)
wait_bitcoinds()
self.nodes = start_nodes(3, self.options.tmpdir, [["-walletbroadcast=0"],["-walletbroadcast=0"],["-walletbroadcast=0"]])
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
self.sync_all()
txIdNotBroadcasted = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 2)
txObjNotBroadcasted = self.nodes[0].gettransaction(txIdNotBroadcasted)
self.nodes[1].generate(1) #mine a block, tx should not be in there
self.sync_all()
assert_equal(self.nodes[2].getbalance(), node_2_bal) #should not be changed because tx was not broadcasted
#now broadcast from another node, mine a block, sync, and check the balance
self.nodes[1].sendrawtransaction(txObjNotBroadcasted['hex'])
self.nodes[1].generate(1)
self.sync_all()
node_2_bal += 2
txObjNotBroadcasted = self.nodes[0].gettransaction(txIdNotBroadcasted)
assert_equal(self.nodes[2].getbalance(), node_2_bal)
#create another tx
txIdNotBroadcasted = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 2)
#restart the nodes with -walletbroadcast=1
stop_nodes(self.nodes)
wait_bitcoinds()
self.nodes = start_nodes(3, self.options.tmpdir)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
sync_blocks(self.nodes)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
node_2_bal += 2
#tx should be added to balance because after restarting the nodes tx should be broadcastet
assert_equal(self.nodes[2].getbalance(), node_2_bal)
#send a tx with value in a string (PR#6380 +)
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "2")
txObj = self.nodes[0].gettransaction(txId)
assert_equal(txObj['amount'], Decimal('-2'))
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "0.0001")
txObj = self.nodes[0].gettransaction(txId)
assert_equal(txObj['amount'], Decimal('-0.0001'))
#check if JSON parser can handle scientific notation in strings
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "1e-4")
txObj = self.nodes[0].gettransaction(txId)
assert_equal(txObj['amount'], Decimal('-0.0001'))
try:
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "1f-4")
except JSONRPCException as e:
assert("Invalid amount" in e.error['message'])
else:
raise AssertionError("Must not parse invalid amounts")
try:
self.nodes[0].generate("2")
raise AssertionError("Must not accept strings as numeric")
except JSONRPCException as e:
assert("not an integer" in e.error['message'])
# Import address and private key to check correct behavior of spendable unspents
# 1. Send some coins to generate new UTXO
address_to_import = self.nodes[2].getnewaddress()
txid = self.nodes[0].sendtoaddress(address_to_import, 1)
self.nodes[0].generate(1)
self.sync_all()
# 2. Import address from node2 to node1
self.nodes[1].importaddress(address_to_import)
# 3. Validate that the imported address is watch-only on node1
assert(self.nodes[1].validateaddress(address_to_import)["iswatchonly"])
# 4. Check that the unspents after import are not spendable
assert_array_result(self.nodes[1].listunspent(),
{"address": address_to_import},
{"spendable": False})
# 5. Import private key of the previously imported address on node1
priv_key = self.nodes[2].dumpprivkey(address_to_import)
self.nodes[1].importprivkey(priv_key)
# 6. Check that the unspents are now spendable on node1
assert_array_result(self.nodes[1].listunspent(),
{"address": address_to_import},
{"spendable": True})
#check if wallet or blochchain maintenance changes the balance
self.sync_all()
blocks = self.nodes[0].generate(2)
self.sync_all()
balance_nodes = [self.nodes[i].getbalance() for i in range(3)]
block_count = self.nodes[0].getblockcount()
# Check modes:
# - True: unicode escaped as \u....
# - False: unicode directly as UTF-8
for mode in [True, False]:
self.nodes[0].ensure_ascii = mode
# unicode check: Basic Multilingual Plane, Supplementary Plane respectively
for s in [u'рыба', u'𝅘𝅥𝅯']:
addr = self.nodes[0].getaccountaddress(s)
label = self.nodes[0].getaccount(addr)
assert_equal(label.encode('utf-8'), s.encode('utf-8')) # TODO remove encode(...) when supporting only Python3
assert(s in self.nodes[0].listaccounts().keys())
self.nodes[0].ensure_ascii = True # restore to default
# maintenance tests
maintenance = [
'-rescan',
'-reindex',
'-zapwallettxes=1',
'-zapwallettxes=2',
'-salvagewallet',
]
for m in maintenance:
print "check " + m
stop_nodes(self.nodes)
wait_bitcoinds()
self.nodes = start_nodes(3, self.options.tmpdir, [[m]] * 3)
while m == '-reindex' and [block_count] * 3 != [self.nodes[i].getblockcount() for i in range(3)]:
# reindex will leave rpc warm up "early"; Wait for it to finish
time.sleep(0.1)
assert_equal(balance_nodes, [self.nodes[i].getbalance() for i in range(3)])
# Exercise listsinceblock with the last two blocks
coinbase_tx_1 = self.nodes[0].listsinceblock(blocks[0])
assert_equal(coinbase_tx_1["lastblock"], blocks[1])
assert_equal(len(coinbase_tx_1["transactions"]), 1)
assert_equal(coinbase_tx_1["transactions"][0]["blockhash"], blocks[1])
assert_equal(len(self.nodes[0].listsinceblock(blocks[1])["transactions"]), 0)
if __name__ == '__main__':
WalletTest ().main ()
| 44.5 | 165 | 0.635634 |
069374ad8e83fa6933463c6fb8ec9ed23c193d5b | 428 | py | Python | examples/yield.py | scottluskcis/learning-python | 034f332817430454d55729f8f98fded4c310094e | [
"MIT"
] | null | null | null | examples/yield.py | scottluskcis/learning-python | 034f332817430454d55729f8f98fded4c310094e | [
"MIT"
] | null | null | null | examples/yield.py | scottluskcis/learning-python | 034f332817430454d55729f8f98fded4c310094e | [
"MIT"
] | null | null | null | students = []
def read_file():
try:
f = open("students.txt", "r") # read text from a file using access mode r
for student in read_students(f):
students.append(student)
f.close()
except Exception:
print("Could not read file")
# use generator with yield to read each line in the file
def read_students(f):
for line in f:
yield line
read_file()
print(students) | 20.380952 | 82 | 0.616822 |
32cb23cbfa9bdef4728e85d0014123652e4aefea | 4,646 | py | Python | python/paddle/fluid/tests/unittests/test_fake_dequantize_op.py | worldlove521/Paddle | c7f1f3ed0c897073cc7ae8ec60a13a8217dffe7d | [
"Apache-2.0"
] | 1 | 2019-03-14T02:29:12.000Z | 2019-03-14T02:29:12.000Z | python/paddle/fluid/tests/unittests/test_fake_dequantize_op.py | VonRosenchild/Paddle | 4ef6f738c3ce37aa7eee845b90ee78941f6eb43e | [
"Apache-2.0"
] | null | null | null | python/paddle/fluid/tests/unittests/test_fake_dequantize_op.py | VonRosenchild/Paddle | 4ef6f738c3ce37aa7eee845b90ee78941f6eb43e | [
"Apache-2.0"
] | 1 | 2019-03-14T02:29:15.000Z | 2019-03-14T02:29:15.000Z | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import math
from op_test import OpTest
def quantize_max_abs(x, max_range):
scale = np.max(np.abs(x).flatten())
y = np.round(x / scale * max_range)
return y, scale
def dequantize_max_abs(x, scale, max_range):
y = (scale / max_range) * x
return y
def channel_wise_quantize_max_abs(x, quant_bit=8):
scales = []
for i in range(x.shape[0]):
scales.append(np.max(np.abs(x[i])).astype("float32"))
y = x.copy()
max_range = math.pow(2, quant_bit - 1) - 1
for i, scale in enumerate(scales):
y[i] = np.round(y[i] / scale * max_range)
return y, scales
def channel_wise_dequantize_max_abs(x,
scales,
quant_bits,
activation_scale=None):
y = x.copy()
for i in range(x.shape[0]):
y[i] = (scales[i] / (math.pow(2, quant_bits[0] - 1) - 1)) * y[i]
if activation_scale is not None:
y *= activation_scale / (math.pow(2, quant_bits[1] - 1) - 1)
return y
class TestFakeChannelWiseDequantizeMaxAbsOpTwoScales(OpTest):
def set_args(self):
self.quant_bits = [8, 8]
self.data_type = "float32"
self.activation_scale = 0.7861
def setUp(self):
self.set_args()
self.op_type = "fake_channel_wise_dequantize_max_abs"
x = np.random.randn(4, 3, 64, 64).astype(self.data_type)
yq, scales = channel_wise_quantize_max_abs(x, self.quant_bits[0])
ydq = channel_wise_dequantize_max_abs(yq, scales, self.quant_bits,
self.activation_scale)
self.inputs = {
'X': yq,
'Scales': [("scales0", np.array(scales).astype(self.data_type)),
("scales1", np.array(
[self.activation_scale]).astype(self.data_type))]
}
self.attrs = {'quant_bits': self.quant_bits}
self.outputs = {'Out': ydq}
def test_check_output(self):
self.check_output()
class TestFakeChannelWiseDequantizeMaxAbsOpOneScale(OpTest):
def set_args(self):
self.quant_bits = [8]
self.data_type = "float32"
def setUp(self):
self.set_args()
self.op_type = "fake_channel_wise_dequantize_max_abs"
x = np.random.randn(4, 3, 64, 64).astype(self.data_type)
yq, scales = channel_wise_quantize_max_abs(x, self.quant_bits[0])
ydq = channel_wise_dequantize_max_abs(yq, scales, self.quant_bits)
self.inputs = {
'X': yq,
'Scales': [("scales0", np.array(scales).astype(self.data_type))]
}
self.attrs = {'quant_bits': self.quant_bits}
self.outputs = {'Out': ydq}
def test_check_output(self):
self.check_output()
class TestFakeDequantizeMaxAbsOp(OpTest):
def set_args(self):
self.num_bits = 8
self.max_range = math.pow(2, self.num_bits - 1) - 1
self.data_type = "float32"
def setUp(self):
self.set_args()
self.op_type = "fake_dequantize_max_abs"
x = np.random.randn(31, 65).astype(self.data_type)
yq, scale = quantize_max_abs(x, self.max_range)
ydq = dequantize_max_abs(yq, scale, self.max_range)
self.inputs = {'X': yq, 'Scale': np.array(scale).astype(self.data_type)}
self.attrs = {'max_range': self.max_range}
self.outputs = {'Out': ydq}
def test_check_output(self):
self.check_output()
class TestFakeDequantizeMaxAbsOpDouble(TestFakeDequantizeMaxAbsOp):
def set_args(self):
self.num_bits = 8
self.max_range = math.pow(2, self.num_bits - 1) - 1
self.data_type = "float64"
class TestFakeDequantizeMaxAbsOp5Bits(TestFakeDequantizeMaxAbsOp):
def set_args(self):
self.num_bits = 5
self.max_range = math.pow(2, self.num_bits - 1) - 1
self.data_type = "float32"
if __name__ == "__main__":
unittest.main()
| 32.041379 | 80 | 0.625484 |
988774dbc19dd9cd88b384e2d409e67a1a23b5bb | 1,384 | py | Python | preprocess/tacotron/hyperparams.py | RothIdan/adaptive_voice_conversion | 06fe2af31fa6a2b3363dcad942c62d9dc388ee65 | [
"Apache-2.0"
] | 366 | 2019-02-22T04:27:03.000Z | 2022-03-31T08:48:02.000Z | preprocess/tacotron/hyperparams.py | RothIdan/adaptive_voice_conversion | 06fe2af31fa6a2b3363dcad942c62d9dc388ee65 | [
"Apache-2.0"
] | 37 | 2019-04-22T07:21:30.000Z | 2022-03-29T09:57:41.000Z | preprocess/tacotron/hyperparams.py | RothIdan/adaptive_voice_conversion | 06fe2af31fa6a2b3363dcad942c62d9dc388ee65 | [
"Apache-2.0"
] | 78 | 2019-03-15T01:21:38.000Z | 2022-03-17T03:55:38.000Z | # -*- coding: utf-8 -*-
#/usr/bin/python2
'''
By kyubyong park. kbpark.linguist@gmail.com.
https://www.github.com/kyubyong/tacotron
'''
class Hyperparams:
'''Hyper parameters'''
# pipeline
prepro = False # if True, run `python prepro.py` first before running `python train.py`.
vocab = "PE abcdefghijklmnopqrstuvwxyz'.?" # P: Padding E: End of Sentence
# data
data = "/data/private/voice/LJSpeech-1.0"
# data = "/data/private/voice/nick"
test_data = 'harvard_sentences.txt'
max_duration = 10.0
top_db = 15
# signal processing
sr = 24000 # Sample rate.
n_fft = 2048 # fft points (samples)
frame_shift = 0.0125 # seconds
frame_length = 0.05 # seconds
hop_length = int(sr*frame_shift) # samples.
win_length = int(sr*frame_length) # samples.
n_mels = 512 # Number of Mel banks to generate
power = 1.2 # Exponent for amplifying the predicted magnitude
n_iter = 100 # Number of inversion iterations
preemphasis = .97 # or None
max_db = 100
ref_db = 20
# model
embed_size = 256 # alias = E
encoder_num_banks = 16
decoder_num_banks = 8
num_highwaynet_blocks = 4
r = 5 # Reduction factor. Paper => 2, 3, 5
dropout_rate = .5
# training scheme
lr = 0.001 # Initial learning rate.
logdir = "logdir/01"
sampledir = 'samples'
batch_size = 32
| 26.113208 | 93 | 0.642341 |
269e90797463e0ff68b93b6aa24e73c3c1de11ac | 1,577 | py | Python | .history/ClassFiles/OOP/Encapsulation_20210105153638.py | minefarmer/Comprehensive-Python | f97b9b83ec328fc4e4815607e6a65de90bb8de66 | [
"Unlicense"
] | null | null | null | .history/ClassFiles/OOP/Encapsulation_20210105153638.py | minefarmer/Comprehensive-Python | f97b9b83ec328fc4e4815607e6a65de90bb8de66 | [
"Unlicense"
] | null | null | null | .history/ClassFiles/OOP/Encapsulation_20210105153638.py | minefarmer/Comprehensive-Python | f97b9b83ec328fc4e4815607e6a65de90bb8de66 | [
"Unlicense"
] | null | null | null | ''' Encapsulation : Part 1
Encapsulation is the process of restricting access to methods and variables in a class in order to prevent direct data modification so it prevents accidental data modification.
Encapsulation basically allows the internal representation of an object to be hidden from the view outside of the objects definition.
Public methods and variables can be accessed from anywhere within the program.
Private methods and variables are accessible from their own class.
Double underscore prefix before object name makes it private'
Encapsulation Part 2: 40
'''
# class Cars:
# def __init__(self,speed, color):
# self.speed = speed
# self.color = color
# def set_speed(self,value):
# self.speed = value
# def get_speed(self):
# return self.speed
# Encapsulation Part 2: 40
class Cars:
def __init__(self,speed, color):
self.speed = speed
self.color = color
def set_speed(self,value):
self.speed = value
def get_speed(self):
return self.speed
ford = Cars(250,"green")
nissan = Cars(300,"red")
toyota = Cars(350, "blue")
ford.set_speed(450) # If I wanted to chang he value of the speed after the instantiantion, I can do that by using the name of the instance and the method.
# ford.speed = 500 # I can also access the speed variable directly without the method and change the value. I'm able to do this because there is no encapsulation in place.
print(ford.get_speed())
# print(ford) # 500
| 25.031746 | 176 | 0.686113 |
743dc4a6682b3893e1967f2eabcaf3a2f7784640 | 1,059 | py | Python | contrastive.py | 804463592/pytorch-siamese-master | 34181ded641820cd6c3f0ea6255ac67a832c561b | [
"MIT"
] | 1 | 2018-12-24T06:34:59.000Z | 2018-12-24T06:34:59.000Z | contrastive.py | 804463592/pytorch-siamese-master | 34181ded641820cd6c3f0ea6255ac67a832c561b | [
"MIT"
] | null | null | null | contrastive.py | 804463592/pytorch-siamese-master | 34181ded641820cd6c3f0ea6255ac67a832c561b | [
"MIT"
] | 2 | 2019-05-21T14:44:05.000Z | 2022-02-12T06:25:54.000Z | import torch
import torch.nn
class ContrastiveLoss(torch.nn.Module):
"""
Contrastive loss function.
Based on:
"""
def __init__(self, margin=1.0):
super(ContrastiveLoss, self).__init__()
self.margin = margin
def check_type_forward(self, in_types):
assert len(in_types) == 3
x0_type, x1_type, y_type = in_types
assert x0_type.size() == x1_type.shape
assert x1_type.size()[0] == y_type.shape[0]
assert x1_type.size()[0] > 0
assert x0_type.dim() == 2
assert x1_type.dim() == 2
assert y_type.dim() == 1
def forward(self, x0, x1, y):
#self.check_type_forward((x0, x1, y))
# TODO: forward
# euclidian distance
diff = x0 - x1
dist_sq = torch.sum(torch.pow(diff, 2), 1)
dist = torch.sqrt(dist_sq)
mdist = self.margin - dist
dist = torch.clamp(mdist, min=0.0)
loss = y * dist_sq + (1 - y) * torch.pow(dist, 2)
loss = torch.sum(loss) / 2.0 / x0.size()[0]
return loss
| 27.153846 | 57 | 0.567517 |
80cca9b6fbad2e4fe22b3f266fd1341e9ff06822 | 5,726 | py | Python | Source/boost_1_33_1/libs/python/test/map_indexing_suite.py | spxuw/RFIM | 32b78fbb90c7008b1106b0cff4f8023ae83c9b6d | [
"MIT"
] | null | null | null | Source/boost_1_33_1/libs/python/test/map_indexing_suite.py | spxuw/RFIM | 32b78fbb90c7008b1106b0cff4f8023ae83c9b6d | [
"MIT"
] | null | null | null | Source/boost_1_33_1/libs/python/test/map_indexing_suite.py | spxuw/RFIM | 32b78fbb90c7008b1106b0cff4f8023ae83c9b6d | [
"MIT"
] | null | null | null | # Copyright Joel de Guzman 2004. Distributed under the Boost
# Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
'''
#####################################################################
# Check an object that we will use as container element
#####################################################################
>>> from map_indexing_suite_ext import *
>>> assert "map_indexing_suite_IntMap_entry" in dir()
>>> assert "map_indexing_suite_TestMap_entry" in dir()
>>> assert "map_indexing_suite_XMap_entry" in dir()
>>> x = X('hi')
>>> x
hi
>>> x.reset() # a member function that modifies X
>>> x
reset
>>> x.foo() # another member function that modifies X
>>> x
foo
# test that a string is implicitly convertible
# to an X
>>> x_value('bochi bochi')
'gotya bochi bochi'
#####################################################################
# Iteration
#####################################################################
>>> def print_xmap(xmap):
... s = '[ '
... for x in xmap:
... s += repr(x)
... s += ' '
... s += ']'
... print s
#####################################################################
# Setting (adding entries)
#####################################################################
>>> xm = XMap()
>>> xm['joel'] = 'apple'
>>> xm['tenji'] = 'orange'
>>> xm['mariel'] = 'grape'
>>> xm['tutit'] = 'banana'
>>> xm['kim'] = 'kiwi'
>>> print_xmap(xm)
[ (joel, apple) (kim, kiwi) (mariel, grape) (tenji, orange) (tutit, banana) ]
#####################################################################
# Changing an entry
#####################################################################
>>> xm['joel'] = 'pineapple'
>>> print_xmap(xm)
[ (joel, pineapple) (kim, kiwi) (mariel, grape) (tenji, orange) (tutit, banana) ]
#####################################################################
# Deleting an entry
#####################################################################
>>> del xm['joel']
>>> print_xmap(xm)
[ (kim, kiwi) (mariel, grape) (tenji, orange) (tutit, banana) ]
#####################################################################
# adding an entry
#####################################################################
>>> xm['joel'] = 'apple'
>>> print_xmap(xm)
[ (joel, apple) (kim, kiwi) (mariel, grape) (tenji, orange) (tutit, banana) ]
#####################################################################
# Indexing
#####################################################################
>>> len(xm)
5
>>> xm['joel']
apple
>>> xm['tenji']
orange
>>> xm['mariel']
grape
>>> xm['tutit']
banana
>>> xm['kim']
kiwi
#####################################################################
# Calling a mutating function of a container element
#####################################################################
>>> xm['joel'].reset()
>>> xm['joel']
reset
#####################################################################
# Copying a container element
#####################################################################
>>> x = X(xm['mariel'])
>>> x
grape
>>> x.foo()
>>> x
foo
>>> xm['mariel'] # should not be changed to 'foo'
grape
#####################################################################
# Referencing a container element
#####################################################################
>>> x = xm['mariel']
>>> x
grape
>>> x.foo()
>>> x
foo
>>> xm['mariel'] # should be changed to 'foo'
foo
>>> xm['mariel'] = 'grape' # take it back
>>> xm['joel'] = 'apple' # take it back
#####################################################################
# Contains
#####################################################################
>>> assert 'joel' in xm
>>> assert 'mariel' in xm
>>> assert 'tenji' in xm
>>> assert 'tutit' in xm
>>> assert 'kim' in xm
>>> assert not 'X' in xm
>>> assert not 12345 in xm
#####################################################################
# Some references to the container elements
#####################################################################
>>> z0 = xm['joel']
>>> z1 = xm['mariel']
>>> z2 = xm['tenji']
>>> z3 = xm['tutit']
>>> z4 = xm['kim']
>>> z0 # proxy
apple
>>> z1 # proxy
grape
>>> z2 # proxy
orange
>>> z3 # proxy
banana
>>> z4 # proxy
kiwi
#####################################################################
# Delete some container element
#####################################################################
>>> del xm['tenji']
>>> print_xmap(xm)
[ (joel, apple) (kim, kiwi) (mariel, grape) (tutit, banana) ]
>>> del xm['tutit']
>>> print_xmap(xm)
[ (joel, apple) (kim, kiwi) (mariel, grape) ]
#####################################################################
# Show that the references are still valid
#####################################################################
>>> z0 # proxy
apple
>>> z1 # proxy
grape
>>> z2 # proxy detached
orange
>>> z3 # proxy detached
banana
>>> z4 # proxy
kiwi
#####################################################################
# Show that iteration allows mutable access to the elements
#####################################################################
>>> for x in xm:
... x.data().reset()
>>> print_xmap(xm)
[ (joel, reset) (kim, reset) (mariel, reset) ]
#####################################################################
# END....
#####################################################################
'''
def run(args = None):
import sys
import doctest
if args is not None:
sys.argxm = args
return doctest.testmod(sys.modules.get(__name__))
if __name__ == '__main__':
print 'running...'
import sys
status = run()[0]
if (status == 0): print "Done."
sys.exit(status)
| 26.509259 | 81 | 0.367097 |
c2621d6fbec00acc175d0f71f681cf90e3685dd8 | 819 | py | Python | instagram_api/response/model/broadcast.py | Yuego/instagram_api | b53f72db36c505a2eb24ebac1ba8267a0cc295bb | [
"MIT"
] | 13 | 2019-08-07T21:24:34.000Z | 2020-12-12T12:23:50.000Z | instagram_api/response/model/broadcast.py | Yuego/instagram_api | b53f72db36c505a2eb24ebac1ba8267a0cc295bb | [
"MIT"
] | null | null | null | instagram_api/response/model/broadcast.py | Yuego/instagram_api | b53f72db36c505a2eb24ebac1ba8267a0cc295bb | [
"MIT"
] | null | null | null | from ..mapper import PropertyMapper, ApiInterfaceBase
from ..mapper.types import Timestamp, AnyType
from .user import User
__all__ = ['Broadcast', 'BroadcastInterface']
class BroadcastInterface(ApiInterfaceBase):
broadcast_owner: User
broadcast_status: str
cover_frame_url: str
published_time: str
broadcast_message: str
muted: AnyType
media_id: int
id: str
rtmp_playback_url: str
dash_abr_playback_url: str
dash_playback_url: str
ranked_position: AnyType
organic_tracking_token: str
seen_ranked_position: AnyType
viewer_count: int
dash_manifest: str
expire_at: Timestamp
encoding_tag: str
total_unique_viewer_count: int
internal_only: bool
number_of_qualities: int
class Broadcast(PropertyMapper, BroadcastInterface):
pass
| 22.75 | 53 | 0.749695 |
022fb8314549d1e47e23953cdc0148181111b001 | 6,025 | py | Python | tests/functional/test_plugins_host.py | arenadata/adcm | a499caa30adc2a53e7b3f46c96a865f9e4079e4e | [
"Apache-2.0"
] | 16 | 2019-11-28T18:05:21.000Z | 2021-12-08T18:09:18.000Z | tests/functional/test_plugins_host.py | arenadata/adcm | a499caa30adc2a53e7b3f46c96a865f9e4079e4e | [
"Apache-2.0"
] | 1,127 | 2019-11-29T08:57:25.000Z | 2022-03-31T20:21:32.000Z | tests/functional/test_plugins_host.py | arenadata/adcm | a499caa30adc2a53e7b3f46c96a865f9e4079e4e | [
"Apache-2.0"
] | 10 | 2019-11-28T18:05:06.000Z | 2022-01-13T06:16:40.000Z | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for adcm_host plugin"""
# pylint:disable=redefined-outer-name
import adcm_client.base
import allure
import pytest
from adcm_client.objects import ADCMClient, Bundle, Provider
from adcm_pytest_plugin.utils import get_data_dir, wait_until_step_succeeds
@pytest.fixture()
def bundle(sdk_client_fs: ADCMClient) -> Bundle:
"""Upload bundle and create 4 provider objects"""
bundle = sdk_client_fs.upload_from_fs(get_data_dir(__file__))
bundle.provider_create(name="first_p")
bundle.provider_create(name="second_p")
bundle.provider_create(name="third_p")
bundle.provider_create(name="forth_p")
return bundle
@pytest.fixture()
def first_p(bundle: Bundle):
"""First provider"""
return bundle.provider(name="first_p")
@pytest.fixture()
def second_p(bundle: Bundle):
"""Second provider"""
return bundle.provider(name="second_p")
@pytest.fixture()
def third_p(bundle: Bundle):
"""Third provider"""
return bundle.provider(name="third_p")
@pytest.fixture()
def forth_p(bundle: Bundle):
"""Forth provider"""
return bundle.provider(name="forth_p")
def test_create_one_host(second_p: Provider):
"""Test scenario:
1. Create three providers
2. Create host on one of the providers
3. Ensure host exists
"""
hostname = "second_h"
with allure.step('Run action create host'):
second_p.action(name="create_host").run(config_diff={'fqdn': hostname}).try_wait()
second_h = second_p.host(fqdn=hostname)
with allure.step('Check if host is created'):
assert second_h.provider().id == second_p.id
assert second_h.fqdn == hostname
def test_create_multi_host_and_delete_one(first_p: Provider, third_p: Provider):
"""Test scenario:
1. Create three providers
2. Create two host from first providers
3. Create one host from third provider
4. Remove one of host binded to first provider
5. Check that host has been removed
6. Check that other hosts still there.
"""
with allure.step('Create two host from first providers'):
first_p.action(name="create_host").run(config_diff={'fqdn': "one_one"}).try_wait()
first_p.action(name="create_host").run(config_diff={'fqdn': "one_two"}).try_wait()
with allure.step('Create one host from third provider'):
third_p.action(name="create_host").run(config_diff={'fqdn': "three_one"}).try_wait()
with allure.step('Remove one of host binded to first provider'):
one_two = first_p.host(fqdn="one_two")
one_two.action(name="remove_host").run().try_wait()
with allure.step('Check that host has been removed'):
assert first_p.host(fqdn="one_one").fqdn == "one_one"
with allure.step('Check that other hosts still there'):
assert third_p.host(fqdn="three_one").fqdn == "three_one"
with pytest.raises(adcm_client.base.ObjectNotFound):
first_p.host(fqdn="one_two")
def _assert_that_object_exists(get_object_func, *args, **kwargs):
try:
obj = get_object_func(*args, **kwargs)
except adcm_client.base.ObjectNotFound as error:
raise AssertionError("Object still not found") from error
if obj is None:
raise AssertionError("Object is None")
def test_check_host_lock_during_operations(forth_p: Provider):
"""Test scenario:
1. Create provider
2. Create host first host on provider
3. Run job that creates the second host on provider
4. Wait until second host will be created.
5. Check that both host is locked
6. Wait for job to be finished without errors
7. Check that both hosts is free
8. Run remove action on one of hosts
9. Check that host under action is locked, while other host is free
10. Wait for job to be finished without errors
11. Check that remaining host is free.
"""
with allure.step('Create host first host on provider'):
forth_p.action(name="create_host").run(config_diff={'fqdn': "forth_one"}).try_wait()
with allure.step('Run job that creates the second host on provider'):
job = forth_p.action(name="create_host").run(config={'fqdn': "forth_two", 'sleep': 2})
with allure.step('Wait until second host will be created'):
wait_until_step_succeeds(_assert_that_object_exists, period=0.5, get_object_func=forth_p.host, fqdn="forth_two")
forth_two_h = forth_p.host(fqdn="forth_two")
forth_one_h = forth_p.host(fqdn='forth_one')
with allure.step('Check that both host has is locked'):
assert forth_one_h.locked is True
assert forth_two_h.locked is True
with allure.step('Wait for job to be finished without errors'):
job.try_wait()
with allure.step('Check that both hosts is free'):
forth_one_h.reread()
forth_two_h.reread()
assert forth_one_h.locked is False
assert forth_two_h.locked is False
with allure.step('Run remove action on one of hosts'):
job = forth_one_h.action(name="remove_host").run(config={"sleep": 2})
with allure.step('Check that host under action is locked, while other host is free'):
forth_one_h.reread()
forth_two_h.reread()
assert forth_one_h.locked is True
assert forth_two_h.locked is False
with allure.step('Wait for job to be finished without errors'):
job.try_wait()
with allure.step('Check that remaining host is free'):
forth_two_h.reread()
assert forth_two_h.locked is False
| 39.123377 | 120 | 0.7039 |
3aa6d4570748042d8122fc00520e86bb6de40db2 | 13,566 | py | Python | trunk/swen610/toolshare/views/tool_controller.py | leon486/toolshare | 13f6ab1afb96a5f8294c121ff6916a1fcab5c2bc | [
"MIT"
] | null | null | null | trunk/swen610/toolshare/views/tool_controller.py | leon486/toolshare | 13f6ab1afb96a5f8294c121ff6916a1fcab5c2bc | [
"MIT"
] | null | null | null | trunk/swen610/toolshare/views/tool_controller.py | leon486/toolshare | 13f6ab1afb96a5f8294c121ff6916a1fcab5c2bc | [
"MIT"
] | null | null | null | from django.shortcuts import render, get_object_or_404, redirect
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from toolshare.models.tool import Tool
from toolshare.models.user import User
from toolshare.models.reservation import Reservation
from toolshare.views.base_controller import BaseController
from toolshare.forms.forms_tool import ToolRegistrationForm
from toolshare.forms.forms_tool import BorrowToolForm, ChangeToolForm,ChangeToolAvailabilityForm
import datetime
from datetime import date, timedelta as td
from django.contrib import messages
from django.utils import timezone
import pdb
import pytz
from toolshare.forms.forms_tool import ChangeAvailabilityForm
from toolshare.utils import EmailSender
# Create your views here.
class ToolController(BaseController):
PAGE_SIZE = 4
@staticmethod
@login_required
def register_tool(request):
user = User.objects.get(pk=request.user.id)
if request.method == 'POST':
pass
registration_form = ToolRegistrationForm(request.POST,
request.FILES)
if registration_form.is_valid():
# Generate the new-user from the Form
new_tool = registration_form.save(commit=False)
new_tool.owner = user
new_tool.status = 'A'
if new_tool.shed is not None:
new_tool.pickup_location = 'At Shed'
new_tool.save()
return redirect('/toolshare/list-owned-tools')
else:
# Show the registration-form
dummy_tool = Tool()
dummy_tool.pickup_location = user.pickup_location
registration_form = ToolRegistrationForm(instance=dummy_tool)
registration_form.fields['shed'].queryset = user.shed_set
return render(request, 'toolshare/tool-registration.html', {
'registration_form': registration_form
})
@staticmethod
@login_required
def find_tool(request):
user = User.objects.get(pk=request.user.id)
if request.method == 'GET':
search_for = request.GET.get('search_for')
to_date = request.GET.get('to_date')
from_date = request.GET.get('from_date')
#Match empty fields to make query work
if to_date == '':
to_date = from_date
if from_date == '':
from_date = to_date
#tools = Tool.objects.all()
tools = Tool.objects.all().exclude(status='D')
if search_for is not None:
tools = tools.filter(name__contains=search_for)
#Find all reservations during the specified dates
if (to_date is not None and to_date != '') and (from_date is not None and from_date != ''):
reservations = Reservation.objects.filter(start_date__lte=to_date,
end_date__gte=from_date,
status='A')
#filter them from the results
for r in reservations:
tools = tools.exclude(reservation=r)
# Only tools in the share-zone
tools_shared_zone = list()
for tool in tools:
owner = User.objects.get(pk=tool.owner.id)
if owner.share_zone == user.share_zone:
tools_shared_zone.append(tool)
tools = tools_shared_zone
tool_paginator = Paginator(tools, ToolController.PAGE_SIZE)
page = request.GET.get('page')
try:
tool_page = tool_paginator.page(page)
except PageNotAnInteger:
tool_page = tool_paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999),
# deliver last page of results.
tool_page = tool_paginator.page(tool_paginator.num_pages)
search_for = request.GET.get('search_for')
if search_for is None:
search_for = ''
if from_date is None:
from_date = ''
if to_date is None:
to_date = ''
return render(request, 'toolshare/find_tool.html', {
'tool_count': len(tools),
'tool_page': tool_page,
'search_for': search_for,
'to_date': to_date,
'from_date': from_date
})
return render(request, 'toolshare/find_tool.html')
@staticmethod
@login_required
def list_owned_tools(request):
if request.method == 'GET':
user = User.objects.get(pk=request.user.id)
user_tools = user.tool_set.all()
tool_paginator = Paginator(user_tools, ToolController.PAGE_SIZE)
page = request.GET.get('page')
try:
tool_page = tool_paginator.page(page)
except PageNotAnInteger:
tool_page = tool_paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999),
# deliver last page of results.
tool_page = tool_paginator.page(tool_paginator.num_pages)
return render(request, 'toolshare/list-owned-tools.html', {
'tool_page': tool_page,
'user_tools': user_tools
})
def tool_detail(request, tool_id):
if request.method == 'GET':
tool = Tool.objects.get(pk=tool_id)
return render(request, 'toolshare/tool-detail.html', {
'tool':tool
})
@staticmethod
@login_required
def change_tool_info(request, tool_id):
tool = Tool.objects.get(pk= tool_id)
if request.method == 'POST':
changeToolInfo = ChangeToolForm(request.POST, request.FILES)
if changeToolInfo.is_valid():
newtool = changeToolInfo.save(commit=False)
#tool.name = newtool.name
tool.description = newtool.description
tool.category = newtool.category
tool.status = newtool.status
tool.special_instructions = newtool.special_instructions
tool.pickup_location = newtool.pickup_location
if "my_picture" in request.FILES:
tool.picture = request.FILES['my_picture']
tool.save()
return redirect('/toolshare/tool-detail/%s'%tool_id)
else:
changeToolInfo = ChangeToolForm(instance=tool)
return render(request, 'toolshare/tool-info.html',{
'changeToolInfo' : changeToolInfo,
'tool':tool
})
@staticmethod
@login_required
def change_tool_availability(request, tool_id):
tool = Tool.objects.get(pk= tool_id)
changeToolAvail_form = ChangeToolAvailabilityForm(requested_tool_id=tool_id)
if request.method == 'POST':
Flag = False
sharedTool = Tool.objects.get(pk = tool_id)
startdate = request.POST['start_date']
startDate = datetime.datetime.strptime(startdate, "%m/%d/%Y").replace(tzinfo=pytz.UTC)
enddate = request.POST['end_date']
endDate = datetime.datetime.strptime(enddate, "%m/%d/%Y").replace(tzinfo=pytz.UTC)
# Get all the reservation list of the tool
tool_reservations = Reservation.objects.filter(tool_id = tool_id)
for reserve in tool_reservations:
if reserve.status == 'RP' and reserve.borrower != request.user:
if (startDate >= reserve.start_date and startDate <= reserve.end_date) or (startDate <= reserve.start_date and startDate <= reserve.end_date):
messages.add_message(request, messages.ERROR, 'You cannot change the tool availability.')
Flag = False
else:
Flag = True
else:
Flag = True
if Flag:
for reserve in tool_reservations:
if reserve.borrower != request.user:
if reserve.status == 'P' or reserve.status == 'A':
if (startDate >= reserve.start_date and startDate <= reserve.end_date) or (startDate <= reserve.start_date and startDate <= reserve.end_date):
reserve.cancel_msg = 'Your tool reservation has been canceled since the owner changed the availability.'
reserve.status = 'C'
reserve.save()
newReservation = Reservation()
newReservation.status = 'A'
newReservation.tool = sharedTool
newReservation.start_date = startDate
newReservation.end_date = endDate
newReservation.borrower = request.user
newReservation.lender = request.user
newReservation.save()
return redirect('/toolshare/change-tool-availability/%s'%tool_id)
else:
return render(request, 'toolshare/change-tool-availability.html', {'tool': tool,'changeToolAvail_form': changeToolAvail_form,'tool_id': tool_id,'current_date': datetime.datetime.now().strftime('%m/%d/%Y')})
@staticmethod
@login_required
def deactivate_tool(request, tool_id):
tool = Tool.objects.get(pk=tool_id)
user = User.objects.get(pk=request.user.id)
if tool.owner == user:
tool.status = 'D'
tool.save()
return redirect('/toolshare/tool-detail/%s'%tool_id)
#return render(request, 'toolshare/tool-detail.html',{
# 'tool':tool
#})
@staticmethod
@login_required
def change_availability_tool(request, tool_id):
tool = Tool.objects.get(pk= tool_id)
user = User.objects.get(pk = request.user.id)
if request.method == 'POST':
change_avail_form = ChangeAvailabilityForm(request.POST, requested_tool_id=tool_id)
if change_avail_form.is_valid():
new_reservation = change_avail_form.save(commit=False)
new_reservation.tool = tool
new_reservation.borrower = user
if tool.shed is not None:
new_reservation.lender = tool.shed.coordinator
else:
new_reservation.lender = user
new_reservation.status = 'A'
# Cancel all approved and pending reservations
reservations = Reservation.objects.filter(
tool_id=tool.id,
end_date__gt=new_reservation.start_date,
start_date__lt=new_reservation.end_date
)
for reservation in reservations:
if (new_reservation.start_date <= reservation.end_date and
new_reservation.end_date >= reservation.start_date):
if reservation.status == 'A':
reservation.status = 'C'
reservation.cancel_msg = 'The owner cancelled this reservation.'
EmailSender.send_cancel_request_email(reservation)
elif reservation.status == 'P':
reservation.status = 'R'
reservation.reject_msg = 'The owner rejected this reservation.'
EmailSender.send_approve_reject_request_email(reservation)
reservation.save()
# Save the owner-reservation
new_reservation.save()
# Display the success message
a_start = new_reservation.start_date.strftime('%m/%d/%Y')
a_end = new_reservation.end_date.strftime('%m/%d/%Y')
messages.add_message(request, messages.SUCCESS, 'You changed the availability of your tool between {0} - {1}. \n"{2}" reservations were cancelled.'.format(a_start, a_end, len(reservations)))
return redirect('/toolshare/tool-detail/%s' % tool_id)
else:
change_avail_form = ChangeAvailabilityForm(requested_tool_id = tool_id)
return render(request, 'toolshare/change-tool-availability.html', {
'tool': tool,
'changeToolAvail_form': change_avail_form,
'tool_id': tool_id,
'current_date': datetime.datetime.now().strftime('%m/%d/%Y')
})
@staticmethod
@login_required
def activate_deactivate_tool(request, tool_id):
tool = Tool.objects.get(pk=tool_id)
if tool.status != 'D':
now = timezone.now()
reservations = Reservation.objects.filter(tool_id = tool.id, status ="A", start_date__gt = now)
if len(reservations) > 0:
messages.add_message(request, messages.WARNING, 'There are pending/approved reservations for the tool. Cancel the reservations first.')
return redirect('/toolshare/tool-detail/%s'%tool_id)
tool.status = 'D'
tool.save()
return redirect('/toolshare/tool-detail/%s'%tool_id)
else:
tool.status = 'A'
tool.save()
return redirect('/toolshare/tool-detail/%s'%tool_id) | 44.045455 | 218 | 0.581675 |
ae0c6cb688c671cb936e70764582a5805af662fe | 2,793 | py | Python | evaluation.py | suikei-wang/Towards-Interpretable-Attention-Networks-for-Cervical-Cancer-Analysis | 30b69394cc3fe339d2bc9b4c3b17cd345d088dff | [
"MIT"
] | 1 | 2022-01-19T10:01:15.000Z | 2022-01-19T10:01:15.000Z | evaluation.py | suikei-wang/Towards-Interpretable-Attention-Networks-for-Cervical-Cancer-Analysis | 30b69394cc3fe339d2bc9b4c3b17cd345d088dff | [
"MIT"
] | null | null | null | evaluation.py | suikei-wang/Towards-Interpretable-Attention-Networks-for-Cervical-Cancer-Analysis | 30b69394cc3fe339d2bc9b4c3b17cd345d088dff | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
from tqdm import tqdm
from sklearn.metrics import precision_recall_fscore_support as score
from models.ResNet50 import initialize_model as ResNet50
from models.Pretrained import initialize_model as Pretrained
from models.DenseNet import initialize_model as DenseNet
from dataLoader import get_dataloaders
from train_evaluate import evaluate
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# evaluation
print("Evaluation on validation and test set")
print("=" * 20)
num_classes = 5
# num_classes = 7 # for smear dataset
batch_size = 16
criterion = nn.CrossEntropyLoss()
model, input_size = ResNet50(num_classes)
model.load_state_dict(torch.load('weights/baseline1'))
# model.load_state_dict(torch.load('weights/baseline2'))
# model.load_state_dict(torch.load('weights/baseline3'))
# model.load_state_dict(torch.load('weights/baseline4'))
# model.load_state_dict(torch.load('weights/model1'))
# model.load_state_dict(torch.load('weights/model2'))
# model.load_state_dict(torch.load('weights/model3'))
# model.load_state_dict(torch.load('weights/model4'))
model = model.to(device)
generate_validation_labels = True
dataloaders, class_name = get_dataloaders(input_size, batch_size, True)
val_loss, val_top1, val_top5, val_labels = evaluate(model, dataloaders['val'], criterion, is_labelled = True, generate_labels = generate_validation_labels, k = 5)
epoch_loss, top1_acc, top5_acc, test_labels = evaluate(model, dataloaders['test'], criterion, is_labelled = True, generate_labels = True, k = 5)
print("Top 1 accuracy on test set is", top1_acc)
# Get the confusion matrix from test
confusion_matrix = {x: [0,0,0,0,0] for x in class_name}
# confusion_matrix = {x: [0,0,0,0,0,0,0] for x in class_name} for smear dataset
running_top1_correct = 0
loader = dataloaders['test']
labels_array = []
pred_array = []
for inputs, labels in tqdm(loader):
inputs = inputs.to(device)
labels = labels.to(device)
outputs = model(inputs)
_, preds = torch.topk(outputs, k=1, dim=1)
for i in range(len(labels)):
original_label = int(labels[i])
labels_array.append(original_label)
pred_array.append(int(preds[i]))
confusion_matrix[class_name[original_label]][int(preds[i])] += 1
running_top1_correct += torch.sum(preds[:, 0] == labels.data)
precision, recall, fscore, support = score(labels_array, pred_array)
epoch_top1_acc = float(running_top1_correct.double() / len(loader.dataset))
percentage = {x: [y /sum(confusion_matrix[x]) for y in confusion_matrix[x]] for x in confusion_matrix.keys()}
print()
print("Confusion matrix")
print("=" * 20)
print(percentage)
print()
print("Precision:", precision)
print("Recall", recall)
print("F1-Score", fscore)
print("Support:", support) | 38.260274 | 162 | 0.750806 |
484c48c58fa612238c60a077f17149b568a61036 | 1,838 | py | Python | AlgorithmsPractice/python/307_(Binary Indexed Tree)_hard_Range Sum Query - Mutable.py | YangXiaoo/NoteBook | 37056acad7a05b876832f72ac34d3d1a41e0dd22 | [
"CNRI-Python",
"RSA-MD",
"CECILL-B"
] | 58 | 2019-03-03T04:42:23.000Z | 2022-01-13T04:36:31.000Z | AlgorithmsPractice/python/307_(Binary Indexed Tree)_hard_Range Sum Query - Mutable.py | YangXiaoo/NoteBook | 37056acad7a05b876832f72ac34d3d1a41e0dd22 | [
"CNRI-Python",
"RSA-MD",
"CECILL-B"
] | null | null | null | AlgorithmsPractice/python/307_(Binary Indexed Tree)_hard_Range Sum Query - Mutable.py | YangXiaoo/NoteBook | 37056acad7a05b876832f72ac34d3d1a41e0dd22 | [
"CNRI-Python",
"RSA-MD",
"CECILL-B"
] | 28 | 2019-08-11T01:25:00.000Z | 2021-08-22T06:46:06.000Z | '''
Given an integer array nums, find the sum of the elements between indices i and j (i ≤ j), inclusive.
The update(i, val) function modifies nums by updating the element at index i to val.
Example:
Given nums = [1, 3, 5]
sumRange(0, 2) -> 9
update(1, 2)
sumRange(0, 2) -> 8
Note:
The array is only modifiable by the update function.
You may assume the number of calls to update and sumRange function is distributed evenly.
'''
# 2018-9-2
# 307. Range Sum Query - Mutable
# Binary Indexed Tree
# 关键在于 k -= (k & -k) 和 k += (k & -k), 前者用于update后者用于sum
class NumArray:
def __init__(self, nums):
"""
:type nums: List[int]
"""
self.nums = nums
self.lens = len(nums)
self.BIT = [0] * (self.lens + 1)
# map(update, range(self.lens), nums)
for i in range(self.lens):
k = i + 1
while k <= self.lens:
self.BIT[k] += nums[i]
k += (k & -k)
def update(self, i, val):
"""
:type i: int
:type val: int
:rtype: void
"""
diff = val - self.nums[i]
self.nums[i] = val
i += 1
while i <= self.lens:
self.BIT[i] += diff
i += (i & -i)
# print(self.BIT)
def sumRange(self, i, j):
"""
:type i: int
:type j: int
:rtype: int
"""
res = 0
j += 1
while j > 0:
res += self.BIT[j]
j -= (j & -j)
while i > 0:
res -= self.BIT[i]
i -= (i & -i)
return res
# Your NumArray object will be instantiated and called as such:
nums = [1, 3, 5]
obj = NumArray(nums)
obj.update(1, 2)
obj.update(1, 44)
obj.update(2, 44)
r1 = obj.sumRange(0, 2)
print(r1,obj.nums) | 21.372093 | 101 | 0.492383 |
9c02a3199e40bc5c0475fbfec11cca7f64846ff4 | 1,649 | py | Python | homeassistant/components/kira/remote.py | tbarbette/core | 8e58c3aa7bc8d2c2b09b6bd329daa1c092d52d3c | [
"Apache-2.0"
] | 22,481 | 2020-03-02T13:09:59.000Z | 2022-03-31T23:34:28.000Z | homeassistant/components/kira/remote.py | jagadeeshvenkatesh/core | 1bd982668449815fee2105478569f8e4b5670add | [
"Apache-2.0"
] | 31,101 | 2020-03-02T13:00:16.000Z | 2022-03-31T23:57:36.000Z | homeassistant/components/kira/remote.py | jagadeeshvenkatesh/core | 1bd982668449815fee2105478569f8e4b5670add | [
"Apache-2.0"
] | 11,411 | 2020-03-02T14:19:20.000Z | 2022-03-31T22:46:07.000Z | """Support for Keene Electronics IR-IP devices."""
import functools as ft
import logging
from homeassistant.components import remote
from homeassistant.const import CONF_DEVICE, CONF_NAME
from homeassistant.helpers.entity import Entity
from . import CONF_REMOTE, DOMAIN
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Kira platform."""
if discovery_info:
name = discovery_info.get(CONF_NAME)
device = discovery_info.get(CONF_DEVICE)
kira = hass.data[DOMAIN][CONF_REMOTE][name]
add_entities([KiraRemote(device, kira)])
return True
class KiraRemote(Entity):
"""Remote representation used to send commands to a Kira device."""
def __init__(self, name, kira):
"""Initialize KiraRemote class."""
_LOGGER.debug("KiraRemote device init started for: %s", name)
self._name = name
self._kira = kira
@property
def name(self):
"""Return the Kira device's name."""
return self._name
def update(self):
"""No-op."""
def send_command(self, command, **kwargs):
"""Send a command to one device."""
for single_command in command:
code_tuple = (single_command, kwargs.get(remote.ATTR_DEVICE))
_LOGGER.info("Sending Command: %s to %s", *code_tuple)
self._kira.sendCode(code_tuple)
async def async_send_command(self, command, **kwargs):
"""Send a command to a device."""
return await self.hass.async_add_executor_job(
ft.partial(self.send_command, command, **kwargs)
)
| 30.537037 | 73 | 0.664645 |
2cdcf09b0a31b7161fd71c5fafba940a98504cbf | 846 | py | Python | evennia/comms/migrations/0011_auto_20170217_2039.py | fermuch/evennia | 8961baa0a5b9b5419f864a144f080acc68a7ad0f | [
"BSD-3-Clause"
] | null | null | null | evennia/comms/migrations/0011_auto_20170217_2039.py | fermuch/evennia | 8961baa0a5b9b5419f864a144f080acc68a7ad0f | [
"BSD-3-Clause"
] | null | null | null | evennia/comms/migrations/0011_auto_20170217_2039.py | fermuch/evennia | 8961baa0a5b9b5419f864a144f080acc68a7ad0f | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9.11 on 2017-02-17 20:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('scripts', '0007_auto_20150403_2339'),
('comms', '0010_auto_20161206_1912'),
]
operations = [
migrations.AddField(
model_name='msg',
name='db_receivers_scripts',
field=models.ManyToManyField(blank=True, help_text='script_receivers', null=True, related_name='receiver_script_set', to='scripts.ScriptDB'),
),
migrations.AddField(
model_name='msg',
name='db_sender_scripts',
field=models.ManyToManyField(blank=True, db_index=True, null=True, related_name='sender_script_set', to='scripts.ScriptDB', verbose_name='sender(script)'),
),
]
| 31.333333 | 167 | 0.638298 |
716bbdee9ebe571b5d4cbb828941b16c87ead194 | 2,148 | py | Python | 2018/Day 12/solution.py | JohnBehnke/Advent-of-Code-2017 | f16f5e2b58dc8d267995d242d64f63e19aca9ba9 | [
"MIT"
] | 1 | 2020-12-13T02:19:05.000Z | 2020-12-13T02:19:05.000Z | 2018/Day 12/solution.py | JohnBehnke/Advent-of-Code-2017 | f16f5e2b58dc8d267995d242d64f63e19aca9ba9 | [
"MIT"
] | null | null | null | 2018/Day 12/solution.py | JohnBehnke/Advent-of-Code-2017 | f16f5e2b58dc8d267995d242d64f63e19aca9ba9 | [
"MIT"
] | null | null | null | '''
initial state: #..#.#..##......###...###
...## => #
..#.. => #
.#... => #
.#.#. => #
.#.## => #
.##.. => #
.#### => #
#.#.# => #
#.### => #
##.#. => #
##.## => #
###.. => #
###.# => #
####. => #
'''
def bootstrap(inputFile):
rules = {}
initalState = ['.','.','.']
for line in open(inputFile):
if 'initial state' in line:
state = list(line.strip().split(': ')[1])
initalState.extend(state)
initalState.extend(['.','.','.','.','.'])
if '=>' in line:
rules[line.strip().split(' => ')[0]] = line.strip().split(' => ')[1]
return rules, initalState
if __name__ == '__main__':
rules, initalState = bootstrap('input.txt')
generationLimit, currentGeneration = 20, 0
while currentGeneration <= generationLimit:
nextGeneration = []
for i in range(len(initalState)):
nextGeneration.append(initalState[i])
if len(nextGeneration) >= 5:
leftPots = ''.join(nextGeneration[-5:-3])
centerPot = ''.join(nextGeneration[-3])
rightPots = ''.join(nextGeneration[-2:])
combinedPots = leftPots + centerPot + rightPots
nextGeneration[-3] = rules[combinedPots]
print ''.join(nextGeneration)
initalState = nextGeneration[:]
currentGeneration += 1
'''
0: ...#..#.#..##......###...###...........
1: ...#...#....#.....#..#..#..#...........
2: ...##..##...##....#..#..#..##..........
3: ..#.#...#..#.#....#..#..#...#..........
4: ...#.#..#...#.#...#..#..##..##.........
5: ....#...##...#.#..#..#...#...#.........
6: ....##.#.#....#...#..##..##..##........
7: ...#..###.#...##..#...#...#...#........
8: ...#....##.#.#.#..##..##..##..##.......
9: ...##..#..#####....#...#...#...#.......
10: ..#.#..#...#.##....##..##..##..##......
11: ...#...##...#.#...#.#...#...#...#......
12: ...##.#.#....#.#...#.#..##..##..##.....
13: ..#..###.#....#.#...#....#...#...#.....
14: ..#....##.#....#.#..##...##..##..##....
15: ..##..#..#.#....#....#..#.#...#...#....
16: .#.#..#...#.#...##...#...#.#..##..##...
17: ..#...##...#.#.#.#...##...#....#...#...
18: ..##.#.#....#####.#.#.#...##...##..##..
19: .#..###.#..#.#.#######.#.#.#..#.#...#..
20: .#....##....#####...#######....#.#..##.
'''
| 26.85 | 71 | 0.318901 |
17a1e0e64ddc02e8d37884357e0d79638cc2082e | 28,871 | py | Python | pnacl/driver/pnacl-driver.py | MicrohexHQ/nacl_contracts | 3efab5eecb3cf7ba43f2d61000e65918aa4ba77a | [
"BSD-3-Clause"
] | 6 | 2015-02-06T23:41:01.000Z | 2015-10-21T03:08:51.000Z | pnacl/driver/pnacl-driver.py | MicrohexHQ/nacl_contracts | 3efab5eecb3cf7ba43f2d61000e65918aa4ba77a | [
"BSD-3-Clause"
] | null | null | null | pnacl/driver/pnacl-driver.py | MicrohexHQ/nacl_contracts | 3efab5eecb3cf7ba43f2d61000e65918aa4ba77a | [
"BSD-3-Clause"
] | 1 | 2019-10-02T08:41:50.000Z | 2019-10-02T08:41:50.000Z | #!/usr/bin/python
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# IMPORTANT NOTE: If you make local mods to this file, you must run:
# % pnacl/build.sh driver
# in order for them to take effect in the scons build. This command
# updates the copy in the toolchain/ tree.
#
import re
import subprocess
from driver_tools import AddHostBinarySearchPath, DefaultOutputName, \
DriverChain, GetArch, ParseArgs, ParseTriple, Run, RunDriver, RunWithEnv, \
TempNameGen, UnrecognizedOption
from driver_env import env
from driver_log import DriverOpen, Log
import filetype
import pathtools
EXTRA_ENV = {
'ALLOW_TRANSLATE': '0', # Allow bitcode translation before linking.
# It doesn't normally make sense to do this.
'ALLOW_NATIVE' : '0', # Allow native objects (.S,.s,.o) to be in the
# linker line for .pexe generation.
# It doesn't normally make sense to do this.
# CXX_EH_MODE specifies how to deal with C++ exception handling:
# * 'none': Strips out use of C++ exception handling.
# * 'sjlj': Enables the setjmp()+longjmp()-based implementation of
# C++ exception handling. This is supported in PNaCl's stable
# ABI.
# * 'zerocost': Enables the zero-cost implementation of C++
# exception handling. This is not supported in PNaCl's stable
# ABI.
'CXX_EH_MODE': 'none',
'FORCE_INTERMEDIATE_LL': '0',
# Produce an intermediate .ll file
# Useful for debugging.
# NOTE: potentially different code paths and bugs
# might be triggered by this
'LANGUAGE' : '', # C or CXX (set by SetTool)
'INCLUDE_CXX_HEADERS': '0', # This is set by RunCC.
# Command-line options
'GCC_MODE' : '', # '' (default), '-E', '-c', or '-S'
'STDINC' : '1', # Include standard headers (-nostdinc sets to 0)
'STDINCCXX' : '1', # Include standard cxx headers (-nostdinc++ sets to 0)
'USE_STDLIB' : '1', # Include standard libraries (-nostdlib sets to 0)
'STDLIB' : '', # C++ Standard Library.
'STDLIB_TRUNC': '', # C++ Standard Library, truncated to pass as -lXXX.
'STDLIB_IDIR' : '', # C++ Standard Library include directory.
# Note: the above C++ Standard Library
# settings use a default if their value
# remains uset.
'DEFAULTLIBS' : '1', # Link with default libraries
'DIAGNOSTIC' : '0', # Diagnostic flag detected
'PIC' : '0', # Generate PIC
# TODO(robertm): Switch the default to 1
'NO_ASM' : '0', # Disallow use of inline assembler
'NEED_DASH_E' : '0', # Used for stdin inputs, which must have an explicit
# type set (using -x) unless -E is specified.
'VERBOSE' : '0', # Verbose (-v)
'SHOW_VERSION': '0', # Version (--version)
'PTHREAD' : '0', # use pthreads?
'INPUTS' : '', # Input files
'OUTPUT' : '', # Output file
'UNMATCHED' : '', # Unrecognized parameters
'BIAS_NONE' : '',
'BIAS_ARM' : '-D__arm__ -D__ARM_ARCH_7A__ -D__ARMEL__',
'BIAS_MIPS32' : '-D__MIPS__ -D__mips__ -D__MIPSEL__',
'BIAS_X8632' : '-D__i386__ -D__i386 -D__i686 -D__i686__ -D__pentium4__',
'BIAS_X8664' : '-D__amd64__ -D__amd64 -D__x86_64__ -D__x86_64 -D__core2__',
'BIAS_ARM_NONSFI': '${BIAS_ARM} -D__native_client_nonsfi__',
'BIAS_X8632_NONSFI': '${BIAS_X8632} -D__native_client_nonsfi__',
'FRONTEND_TRIPLE' : 'le32-unknown-nacl',
'OPT_LEVEL' : '', # Default for most tools is 0, but we need to know
# if it's explicitly set or not when the driver
# is only used for linking + translating.
'CC_FLAGS' : '-O${#OPT_LEVEL ? ${OPT_LEVEL} : 0} ' +
'-fno-vectorize -fno-slp-vectorize ' +
'-fno-common ${PTHREAD ? -pthread} ' +
'-nostdinc ${BIAS_%BIAS%} ' +
# BUG: http://code.google.com/p/nativeclient/issues/detail?id=2345
# it would be better to detect asm use inside clang
# as some uses of asm are borderline legit, e.g.
# <prototype> asm("<function-name>");
'${NO_ASM ? -Dasm=ASM_FORBIDDEN -D__asm__=ASM_FORBIDDEN} ' +
'-target ${FRONTEND_TRIPLE}',
'ISYSTEM' : '${ISYSTEM_USER} ${STDINC ? ${ISYSTEM_BUILTIN}}',
'ISYSTEM_USER' : '', # System include directories specified by
# using the -isystem flag.
'ISYSTEM_BUILTIN':
'${BASE_USR}/local/include ' +
'${ISYSTEM_CLANG} ' +
'${ISYSTEM_CXX} ' +
'${BASE_USR}/include ' +
'${BASE_SDK}/include ',
'ISYSTEM_CLANG' : '${BASE_LLVM}/lib/clang/3.4/include',
'ISYSTEM_CXX' :
'${INCLUDE_CXX_HEADERS && STDINCCXX ? ${ISYSTEM_CXX_include_paths}}',
'ISYSTEM_CXX_include_paths' :
'${BASE_USR}/include/c++/${STDLIB_IDIR} ' +
'${BASE_USR}/include/c++/${STDLIB_IDIR}/arm-none-linux-gnueabi ' +
'${BASE_USR}/include/c++/${STDLIB_IDIR}/backward',
# Only propagate opt level to linker if explicitly set, so that the
# linker will know if an opt level was explicitly set or not.
'LD_FLAGS' : '${#OPT_LEVEL ? -O${OPT_LEVEL}} -static ' +
'${PIC ? -fPIC} ${@AddPrefix:-L:SEARCH_DIRS} ' +
'--pnacl-exceptions=${CXX_EH_MODE}',
'SEARCH_DIRS' : '', # Directories specified using -L
# Library Strings
'EMITMODE' : '${!USE_STDLIB ? nostdlib : static}',
# This is setup so that LD_ARGS_xxx is evaluated lazily.
'LD_ARGS' : '${LD_ARGS_%EMITMODE%}',
# ${ld_inputs} signifies where to place the objects and libraries
# provided on the command-line.
'LD_ARGS_nostdlib': '-nostdlib ${ld_inputs}',
'LD_ARGS_static':
'${CXX_EH_MODE==zerocost ? -l:crt1_for_eh.x : -l:crt1.x} ' +
'-l:crti.bc -l:crtbegin.bc '
'${CXX_EH_MODE==sjlj ? -l:sjlj_eh_redirect.bc : '
'${CXX_EH_MODE==none ? -l:unwind_stubs.bc}} ' +
'${ld_inputs} ' +
'--start-group ${STDLIBS} --end-group',
'LLVM_PASSES_TO_DISABLE': '',
# Flags for translating to native .o files.
'TRANSLATE_FLAGS' : '-O${#OPT_LEVEL ? ${OPT_LEVEL} : 0}',
'STDLIBS' : '${DEFAULTLIBS ? '
'${LIBSTDCPP} ${LIBPTHREAD} ${LIBNACL} ${LIBC} ${LIBPNACLMM}}',
'LIBSTDCPP' : '${IS_CXX ? -l${STDLIB_TRUNC} -lm }',
'LIBC' : '-lc',
'LIBNACL' : '-lnacl',
'LIBPNACLMM': '-lpnaclmm',
# Enabled/disabled by -pthreads
'LIBPTHREAD': '${PTHREAD ? -lpthread}',
# IS_CXX is set by pnacl-clang and pnacl-clang++ programmatically
'CC' : '${IS_CXX ? ${CLANGXX} : ${CLANG}}',
'RUN_CC': '${CC} -emit-llvm ${mode} ${CC_FLAGS} ' +
'${@AddPrefix:-isystem :ISYSTEM} ' +
'-x${typespec} "${infile}" -o ${output}',
}
def AddLLVMPassDisableFlag(*args):
env.append('LLVM_PASSES_TO_DISABLE', *args)
env.append('LD_FLAGS', *args)
def AddLDFlag(*args):
env.append('LD_FLAGS', *args)
def AddTranslatorFlag(*args):
# pass translator args to ld in case we go all the way to .nexe
env.append('LD_FLAGS', *['-Wt,' + a for a in args])
# pass translator args to translator in case we go to .o
env.append('TRANSLATE_FLAGS', *args)
def AddCCFlag(*args):
env.append('CC_FLAGS', *args)
def AddDiagnosticFlag(*args):
env.append('CC_FLAGS', *args)
env.set('DIAGNOSTIC', '1')
def SetTarget(*args):
arch = ParseTriple(args[0])
env.set('FRONTEND_TRIPLE', args[0])
AddLDFlag('--target=' + args[0])
def SetStdLib(*args):
"""Set the C++ Standard Library."""
lib = args[0]
assert lib == 'libc++' or lib == 'libstdc++', (
'Invalid C++ standard library: -stdlib=%s' % lib)
env.set('STDLIB', lib)
env.set('STDLIB_TRUNC', lib[3:])
if lib == 'libc++':
env.set('STDLIB_IDIR', 'v1')
if env.getbool('IS_CXX'):
# libc++ depends on pthread for C++11 features as well as some
# exception handling (which may get removed later by the PNaCl ABI
# simplification) and initialize-once.
env.set('PTHREAD', '1')
elif lib == 'libstdc++':
env.set('STDLIB_IDIR', '4.6.2')
def IsPortable():
return env.getone('FRONTEND_TRIPLE').startswith('le32-')
stdin_count = 0
def AddInputFileStdin():
global stdin_count
# When stdin is an input, -x or -E must be given.
forced_type = filetype.GetForcedFileType()
if not forced_type:
# Only allowed if -E is specified.
forced_type = 'c'
env.set('NEED_DASH_E', '1')
stdin_name = '__stdin%d__' % stdin_count
env.append('INPUTS', stdin_name)
filetype.ForceFileType(stdin_name, forced_type)
stdin_count += 1
def IsStdinInput(f):
return f.startswith('__stdin') and f.endswith('__')
def HandleDashX(arg):
if arg == 'none':
filetype.SetForcedFileType(None)
return
filetype.SetForcedFileType(filetype.GCCTypeToFileType(arg))
def AddVersionFlag(*args):
env.set('SHOW_VERSION', '1')
AddDiagnosticFlag(*args)
def AddBPrefix(prefix):
""" Add a path to the list searched for host binaries and include dirs. """
AddHostBinarySearchPath(prefix)
prefix = pathtools.normalize(prefix)
if pathtools.isdir(prefix) and not prefix.endswith('/'):
prefix += '/'
# Add prefix/ to the library search dir if it exists
if pathtools.isdir(prefix):
env.append('SEARCH_DIRS', prefix)
# Add prefix/include to isystem if it exists
include_dir = prefix + 'include'
if pathtools.isdir(include_dir):
env.append('ISYSTEM_USER', include_dir)
CustomPatterns = [
( '--driver=(.+)', "env.set('CC', pathtools.normalize($0))\n"),
( '--pnacl-allow-native', "env.set('ALLOW_NATIVE', '1')"),
( '--pnacl-allow-translate', "env.set('ALLOW_TRANSLATE', '1')"),
( '--pnacl-frontend-triple=(.+)', SetTarget),
( ('-target','(.+)'), SetTarget),
( ('--target=(.+)'), SetTarget),
( '--pnacl-exceptions=(none|sjlj|zerocost)', "env.set('CXX_EH_MODE', $0)"),
# TODO(mseaborn): Remove "--pnacl-allow-exceptions", which is
# superseded by "--pnacl-exceptions".
( '--pnacl-allow-exceptions', "env.set('CXX_EH_MODE', 'zerocost')"),
( '(--pnacl-allow-nexe-build-id)', AddLDFlag),
( '(--pnacl-disable-abi-check)', AddLDFlag),
( '(--pnacl-disable-pass=.+)', AddLLVMPassDisableFlag),
( '(--pnacl-allow-dev-intrinsics)', AddLDFlag),
]
GCCPatterns = [
( '-o(.+)', "env.set('OUTPUT', pathtools.normalize($0))"),
( ('-o', '(.+)'), "env.set('OUTPUT', pathtools.normalize($0))"),
( '-E', "env.set('GCC_MODE', '-E')"),
( '-S', "env.set('GCC_MODE', '-S')"),
( '-c', "env.set('GCC_MODE', '-c')"),
( '-allow-asm', "env.set('NO_ASM', '0')"),
( '-nostdinc', "env.set('STDINC', '0')"),
( '-nostdinc\+\+', "env.set('STDINCCXX', '0')"),
( '-nostdlib', "env.set('USE_STDLIB', '0')"),
( '-nodefaultlibs', "env.set('DEFAULTLIBS', '0')"),
( '-?-stdlib=(.*)', SetStdLib),
( ('-?-stdlib', '(.*)'), SetStdLib),
# Flags to pass to native linker
( '(-Wn,.*)', AddLDFlag),
( '-rdynamic', "env.append('LD_FLAGS', '-export-dynamic')"),
# Flags to pass to pnacl-translate
( '-Wt,(.*)', AddTranslatorFlag),
( ('-Xtranslator','(.*)'), AddTranslatorFlag),
# We don't care about -fPIC, but pnacl-ld and pnacl-translate do.
( '-fPIC', "env.set('PIC', '1')"),
# We must include -l, -Xlinker, and -Wl options into the INPUTS
# in the order they appeared. This is the exactly behavior of gcc.
# For example: gcc foo.c -Wl,--start-group -lx -ly -Wl,--end-group
#
( '(-l.+)', "env.append('INPUTS', $0)"),
( ('(-l)','(.+)'), "env.append('INPUTS', $0+$1)"),
( ('-Xlinker','(.*)'), "env.append('INPUTS', '-Xlinker=' + $0)"),
( '(-Wl,.*)', "env.append('INPUTS', $0)"),
( '(-Bstatic)', "env.append('INPUTS', $0)"),
( '(-Bdynamic)', "env.append('INPUTS', $0)"),
( '-O([sz])', "env.set('OPT_LEVEL', $0)\n"),
( '-O([0-3])', "env.set('OPT_LEVEL', $0)\n"),
( '-O([0-9]+)', "env.set('OPT_LEVEL', '3')\n"),
( '-O', "env.set('OPT_LEVEL', '1')\n"),
( ('-isystem', '(.*)'),
"env.append('ISYSTEM_USER', pathtools.normalize($0))"),
( '-isystem(.+)',
"env.append('ISYSTEM_USER', pathtools.normalize($0))"),
( ('-I', '(.+)'), "env.append('CC_FLAGS', '-I'+pathtools.normalize($0))"),
( '-I(.+)', "env.append('CC_FLAGS', '-I'+pathtools.normalize($0))"),
# NOTE: the -iquote =DIR syntax (substitute = with sysroot) doesn't work.
# Clang just says: ignoring nonexistent directory "=DIR"
( ('-iquote', '(.+)'),
"env.append('CC_FLAGS', '-iquote', pathtools.normalize($0))"),
( ('-iquote(.+)'),
"env.append('CC_FLAGS', '-iquote', pathtools.normalize($0))"),
( ('-idirafter', '(.+)'),
"env.append('CC_FLAGS', '-idirafter'+pathtools.normalize($0))"),
( '-idirafter(.+)',
"env.append('CC_FLAGS', '-idirafter'+pathtools.normalize($0))"),
( ('(-include)','(.+)'), AddCCFlag),
( ('(-include.+)'), AddCCFlag),
( '(-g)', AddCCFlag),
( '(-W.*)', AddCCFlag),
( '(-w)', AddCCFlag),
( '(-std=.*)', AddCCFlag),
( '(-ansi)', AddCCFlag),
( ('(-D)','(.*)'), AddCCFlag),
( '(-D.+)', AddCCFlag),
( ('(-U)','(.*)'), AddCCFlag),
( '(-U.+)', AddCCFlag),
( '(-f.*)', AddCCFlag),
( '(-pedantic)', AddCCFlag),
( '(-pedantic-errors)', AddCCFlag),
( '(-g.*)', AddCCFlag),
( '(-v|--v)', "env.append('CC_FLAGS', $0)\n"
"env.set('VERBOSE', '1')"),
( '(-pthreads?)', "env.set('PTHREAD', '1')"),
# No-op: accepted for compatibility in case build scripts pass it.
( '-static', ""),
( ('-B','(.*)'), AddBPrefix),
( ('-B(.+)'), AddBPrefix),
( ('-L','(.+)'), "env.append('SEARCH_DIRS', pathtools.normalize($0))"),
( '-L(.+)', "env.append('SEARCH_DIRS', pathtools.normalize($0))"),
( '(-Wp,.*)', AddCCFlag),
( '(-Xpreprocessor .*)', AddCCFlag),
( ('(-Xclang)', '(.*)'), AddCCFlag),
# Accept and ignore default flags
( '-m32', ""),
( '-emit-llvm', ""),
( '(-MG)', AddCCFlag),
( '(-MMD)', AddCCFlag),
( '(-MM?)', "env.append('CC_FLAGS', $0)\n"
"env.set('GCC_MODE', '-E')"),
( '(-MP)', AddCCFlag),
( ('(-MQ)','(.*)'), AddCCFlag),
( '(-MD)', AddCCFlag),
( ('(-MT)','(.*)'), AddCCFlag),
( ('(-MF)','(.*)'), "env.append('CC_FLAGS', $0, pathtools.normalize($1))"),
( ('-x', '(.+)'), HandleDashX),
( '-x(.+)', HandleDashX),
( ('(-mllvm)', '(.+)'), AddCCFlag),
# Ignore these gcc flags
( '(-msse)', ""),
( '(-march=armv7-a)', ""),
( '(-pipe)', ""),
( '(-s)', AddLDFlag),
( '(--strip-all)', AddLDFlag),
( '(--strip-debug)', AddLDFlag),
# Ignore these assembler flags
( '(-Qy)', ""),
( ('(--traditional-format)', '.*'), ""),
( '(-gstabs)', ""),
( '(--gstabs)', ""),
( '(-gdwarf2)', ""),
( '(--gdwarf2)', ""),
( '(--fatal-warnings)', ""),
( '(-meabi=.*)', ""),
( '(-mfpu=.*)', ""),
( '(-mfloat-abi=.+)', AddCCFlag),
# GCC diagnostic mode triggers
( '(-print-.*)', AddDiagnosticFlag),
( '(--print.*)', AddDiagnosticFlag),
( '(-dumpspecs)', AddDiagnosticFlag),
( '(--version)', AddVersionFlag),
# These are preprocessor flags which should be passed to the frontend, but
# should not prevent the usual -i flags (which DIAGNOSTIC mode does)
( '(-d[DIMNU])', AddCCFlag),
( '(-d.*)', AddDiagnosticFlag),
# Catch all other command-line arguments
( '(-.+)', "env.append('UNMATCHED', $0)"),
# Standard input
( '-', AddInputFileStdin),
# Input Files
# Call ForceFileType for all input files at the time they are
# parsed on the command-line. This ensures that the gcc "-x"
# setting is correctly applied.
( '(.*)', "env.append('INPUTS', pathtools.normalize($0))\n"
"filetype.ForceFileType(pathtools.normalize($0))"),
]
def CheckSetup():
if not env.has('IS_CXX'):
Log.Fatal('"pnacl-driver" cannot be used directly. '
'Use pnacl-clang or pnacl-clang++.')
def DriverOutputTypes(driver_flag, compiling_to_native):
output_type_map = {
('-E', False) : 'pp',
('-E', True) : 'pp',
('-c', False) : 'po',
('-c', True) : 'o',
('-S', False) : 'll',
('-S', True) : 's',
('', False) : 'pexe',
('', True) : 'nexe',
}
return output_type_map[(driver_flag, compiling_to_native)]
def ReadDriverRevision():
rev_file = env.getone('DRIVER_REV_FILE')
# Might be an SVN version or a GIT hash (depending on the NaCl src client)
nacl_ver = DriverOpen(rev_file, 'rb').readlines()[0]
m = re.search(r'\[SVN\].*/native_client:\s*(\d+)', nacl_ver)
if m:
return m.group(1)
m = re.search(r'\[GIT\].*/native_client.git:\s*(\w+)', nacl_ver)
if m:
return m.group(1)
# fail-fast: if the REV file exists but regex search failed,
# we need to fix the regex to get nacl-version.
if not m:
Log.Fatal('Failed to parse REV file to get nacl-version.')
def main(argv):
env.update(EXTRA_ENV)
CheckSetup()
ParseArgs(argv, CustomPatterns + GCCPatterns)
# "configure", especially when run as part of a toolchain bootstrap
# process, will invoke gcc with various diagnostic options and
# parse the output. In these cases we do not alter the incoming
# commandline. It is also important to not emit spurious messages.
if env.getbool('DIAGNOSTIC'):
if env.getbool('SHOW_VERSION'):
code, stdout, stderr = Run(env.get('CC') + env.get('CC_FLAGS'),
redirect_stdout=subprocess.PIPE)
out = stdout.split('\n')
nacl_version = ReadDriverRevision()
out[0] += ' nacl-version=%s' % nacl_version
stdout = '\n'.join(out)
print stdout,
else:
Run(env.get('CC') + env.get('CC_FLAGS'))
return 0
unmatched = env.get('UNMATCHED')
if len(unmatched) > 0:
UnrecognizedOption(*unmatched)
# If -arch was given, we are compiling directly to native code
compiling_to_native = GetArch() is not None
if env.getbool('ALLOW_NATIVE') and not compiling_to_native:
Log.Fatal("--pnacl-allow-native without -arch is not meaningful.")
if not env.get('STDLIB'):
# Default C++ Standard Library.
SetStdLib('libc++')
inputs = env.get('INPUTS')
output = env.getone('OUTPUT')
if len(inputs) == 0:
if env.getbool('VERBOSE'):
# -v can be invoked without any inputs. Runs the original
# command without modifying the commandline for this case.
Run(env.get('CC') + env.get('CC_FLAGS'))
return 0
else:
Log.Fatal('No input files')
gcc_mode = env.getone('GCC_MODE')
output_type = DriverOutputTypes(gcc_mode, compiling_to_native)
needs_linking = (gcc_mode == '')
if env.getbool('NEED_DASH_E') and gcc_mode != '-E':
Log.Fatal("-E or -x required when input is from stdin")
# There are multiple input files and no linking is being done.
# There will be multiple outputs. Handle this case separately.
if not needs_linking:
# Filter out flags
inputs = [f for f in inputs if not IsFlag(f)]
if output != '' and len(inputs) > 1:
Log.Fatal('Cannot have -o with -c, -S, or -E and multiple inputs: %s',
repr(inputs))
for f in inputs:
if IsFlag(f):
continue
intype = filetype.FileType(f)
if not filetype.IsSourceType(intype):
if ((output_type == 'pp' and intype != 'S') or
(output_type == 'll') or
(output_type == 'po' and intype != 'll') or
(output_type == 's' and intype not in ('ll','po','S')) or
(output_type == 'o' and intype not in ('ll','po','S','s'))):
Log.Fatal("%s: Unexpected type of file for '%s'",
pathtools.touser(f), gcc_mode)
if output == '':
f_output = DefaultOutputName(f, output_type)
else:
f_output = output
namegen = TempNameGen([f], f_output)
CompileOne(f, output_type, namegen, f_output)
return 0
# Linking case
assert(needs_linking)
assert(output_type in ('pso','so','pexe','nexe'))
if output == '':
output = pathtools.normalize('a.out')
namegen = TempNameGen(inputs, output)
# Compile all source files (c/c++/ll) to .po
for i in xrange(0, len(inputs)):
if IsFlag(inputs[i]):
continue
intype = filetype.FileType(inputs[i])
if filetype.IsSourceType(intype) or intype == 'll':
inputs[i] = CompileOne(inputs[i], 'po', namegen)
# Compile all .s/.S to .o
if env.getbool('ALLOW_NATIVE'):
for i in xrange(0, len(inputs)):
if IsFlag(inputs[i]):
continue
intype = filetype.FileType(inputs[i])
if intype in ('s','S'):
inputs[i] = CompileOne(inputs[i], 'o', namegen)
# We should only be left with .po and .o and libraries
for f in inputs:
if IsFlag(f):
continue
intype = filetype.FileType(f)
if intype in ('o','s','S') or filetype.IsNativeArchive(f):
if not env.getbool('ALLOW_NATIVE'):
Log.Fatal('%s: Native object files not allowed in link. '
'Use --pnacl-allow-native to override.', pathtools.touser(f))
assert(intype in ('po','o','so','ldscript') or filetype.IsArchive(f))
# Fix the user-specified linker arguments
ld_inputs = []
for f in inputs:
if f.startswith('-Xlinker='):
ld_inputs.append(f[len('-Xlinker='):])
elif f.startswith('-Wl,'):
ld_inputs += f[len('-Wl,'):].split(',')
else:
ld_inputs.append(f)
if env.getbool('ALLOW_NATIVE'):
ld_inputs.append('--pnacl-allow-native')
# Invoke the linker
env.set('ld_inputs', *ld_inputs)
ld_args = env.get('LD_ARGS')
ld_flags = env.get('LD_FLAGS')
RunDriver('ld', ld_flags + ld_args + ['-o', output])
return 0
def IsFlag(f):
return f.startswith('-')
def CompileOne(infile, output_type, namegen, output = None):
if output is None:
output = namegen.TempNameForInput(infile, output_type)
chain = DriverChain(infile, output, namegen)
SetupChain(chain, filetype.FileType(infile), output_type)
chain.run()
return output
def RunCC(infile, output, mode):
intype = filetype.FileType(infile)
typespec = filetype.FileTypeToGCCType(intype)
include_cxx_headers = (env.get('LANGUAGE') == 'CXX') or (intype == 'c++')
env.setbool('INCLUDE_CXX_HEADERS', include_cxx_headers)
if IsStdinInput(infile):
infile = '-'
RunWithEnv("${RUN_CC}", infile=infile, output=output,
mode=mode,
typespec=typespec)
def RunLLVMAS(infile, output):
if IsStdinInput(infile):
infile = '-'
# This is a bitcode only step - so get rid of "-arch xxx" which
# might be inherited from the current invocation
RunDriver('as', [infile, '-o', output], suppress_inherited_arch_args=True)
def RunNativeAS(infile, output):
if IsStdinInput(infile):
infile = '-'
RunDriver('as', [infile, '-o', output])
def RunTranslate(infile, output, mode):
if not env.getbool('ALLOW_TRANSLATE'):
Log.Fatal('%s: Trying to convert bitcode to an object file before '
'bitcode linking. This is supposed to wait until '
'translation. Use --pnacl-allow-translate to override.',
pathtools.touser(infile))
args = env.get('TRANSLATE_FLAGS') + [mode, '--allow-llvm-bitcode-input',
infile, '-o', output]
if env.getbool('PIC'):
args += ['-fPIC']
RunDriver('translate', args)
def RunOpt(infile, outfile, pass_list):
filtered_list = [pass_option for pass_option in pass_list
if pass_option not in env.get('LLVM_PASSES_TO_DISABLE')]
RunDriver('opt', filtered_list + [infile, '-o', outfile])
def SetupChain(chain, input_type, output_type):
assert(output_type in ('pp','ll','po','s','o'))
cur_type = input_type
# source file -> pp
if filetype.IsSourceType(cur_type) and output_type == 'pp':
chain.add(RunCC, 'cpp', mode='-E')
cur_type = 'pp'
if cur_type == output_type:
return
# source file -> ll
if (filetype.IsSourceType(cur_type) and
(env.getbool('FORCE_INTERMEDIATE_LL') or output_type == 'll')):
chain.add(RunCC, 'll', mode='-S')
cur_type = 'll'
if cur_type == output_type:
return
# ll -> po
if cur_type == 'll':
chain.add(RunLLVMAS, 'po')
cur_type = 'po'
if cur_type == output_type:
return
# source file -> po (we also force native output to go through this phase
if filetype.IsSourceType(cur_type) and output_type in ('po', 'o', 's'):
chain.add(RunCC, 'po', mode='-c')
cur_type = 'po'
if cur_type == output_type:
return
# po -> o
if (cur_type == 'po' and output_type == 'o'):
# If we aren't using biased bitcode, then at least -expand-byval
# must be run to work with the PPAPI shim calling convention.
if IsPortable():
chain.add(RunOpt, 'expand.po', pass_list=['-expand-byval'])
chain.add(RunTranslate, 'o', mode='-c')
cur_type = 'o'
if cur_type == output_type:
return
# po -> s
if cur_type == 'po':
# If we aren't using biased bitcode, then at least -expand-byval
# must be run to work with the PPAPI shim calling convention.
if IsPortable():
chain.add(RunOpt, 'expand.po', pass_list=['-expand-byval'])
chain.add(RunTranslate, 's', mode='-S')
cur_type = 's'
if cur_type == output_type:
return
# S -> s
if cur_type == 'S':
chain.add(RunCC, 's', mode='-E')
cur_type = 's'
if output_type == 'pp':
return
if cur_type == output_type:
return
# s -> o
if cur_type == 's' and output_type == 'o':
chain.add(RunNativeAS, 'o')
cur_type = 'o'
if cur_type == output_type:
return
Log.Fatal("Unable to compile .%s to .%s", input_type, output_type)
def get_help(argv):
tool = env.getone('SCRIPT_NAME')
if '--help-full' in argv:
# To get ${CC}, etc.
env.update(EXTRA_ENV)
code, stdout, stderr = Run('"${CC}" -help',
redirect_stdout=subprocess.PIPE,
redirect_stderr=subprocess.STDOUT,
errexit=False)
return stdout
else:
return """
This is a "GCC-compatible" driver using clang under the hood.
Usage: %s [options] <inputs> ...
BASIC OPTIONS:
-o <file> Output to <file>.
-E Only run the preprocessor.
-S Generate bitcode assembly.
-c Generate bitcode object.
-I <dir> Add header search path.
-L <dir> Add library search path.
-D<key>[=<val>] Add definition for the preprocessor.
-W<id> Toggle warning <id>.
-f<feature> Enable <feature>.
-Wl,<arg> Pass <arg> to the linker.
-Xlinker <arg> Pass <arg> to the linker.
-Wt,<arg> Pass <arg> to the translator.
-Xtranslator <arg> Pass <arg> to the translator.
-Wp,<arg> Pass <arg> to the preprocessor.
-Xpreprocessor,<arg> Pass <arg> to the preprocessor.
-x <language> Treat subsequent input files as having type <language>.
-static Produce a static executable (the default).
-Bstatic Link subsequent libraries statically.
-Bdynamic Link subsequent libraries dynamically.
-fPIC Ignored (only used by translator backend)
(accepted for compatibility).
-pipe Ignored (for compatibility).
-O<n> Optimation level <n>: 0, 1, 2, 3, 4 or s.
-g Generate complete debug information.
-gline-tables-only Generate debug line-information only
(allowing for stack traces).
-flimit-debug-info Generate limited debug information.
-save-temps Keep intermediate compilation results.
-v Verbose output / show commands.
-h | --help Show this help.
--help-full Show underlying clang driver's help message
(warning: not all options supported).
""" % (tool)
| 36.361461 | 84 | 0.575214 |
fa00c6fe83ece6c4f2c78553594c5611ca0520f9 | 406 | py | Python | tests/test_table.py | refgenie/refgenconf | 727fe1f219780b46fe3743a906dcd6238f1734f6 | [
"BSD-2-Clause"
] | 3 | 2019-07-12T16:46:37.000Z | 2019-11-20T20:39:43.000Z | tests/test_table.py | databio/refgenconf | 727fe1f219780b46fe3743a906dcd6238f1734f6 | [
"BSD-2-Clause"
] | 80 | 2019-05-14T20:59:44.000Z | 2020-06-16T17:09:52.000Z | tests/test_table.py | databio/refgenconf | 727fe1f219780b46fe3743a906dcd6238f1734f6 | [
"BSD-2-Clause"
] | 4 | 2019-05-15T15:22:15.000Z | 2020-05-26T14:28:48.000Z | class TestAliasTable:
def test_alias_table_dimensions(self, my_rgc):
assert len(my_rgc.genomes_list()) == my_rgc.genome_aliases_table.row_count
assert len(my_rgc.genome_aliases_table.columns) == 2
class TestAssetTable:
def test_asset_table_dimensions(self, my_rgc):
assert my_rgc.genome_aliases_table.row_count == len(
my_rgc.list_assets_by_genome()
)
| 33.833333 | 82 | 0.724138 |
fef3a2cc60bf0b7c0d25c6aef3f775622d353afd | 885 | py | Python | tests/test_metrics.py | Marcnuth/AutoQuant | b2fbade31a86c16b1287675cb7483afeb0e43663 | [
"MIT"
] | 1 | 2021-11-07T04:01:44.000Z | 2021-11-07T04:01:44.000Z | tests/test_metrics.py | Marcnuth/AutoQuant | b2fbade31a86c16b1287675cb7483afeb0e43663 | [
"MIT"
] | null | null | null | tests/test_metrics.py | Marcnuth/AutoQuant | b2fbade31a86c16b1287675cb7483afeb0e43663 | [
"MIT"
] | 3 | 2021-11-25T15:03:19.000Z | 2021-12-24T02:17:51.000Z | from autoquant.metric import gross_rate_of_return, CAGR, max_drawdown, beta, annualized_rate_of_return, avg_annualized_rate_of_return
from datetime import date
from autoquant import Market
def test_metrics():
assert gross_rate_of_return(1, 11) == 10
assert CAGR(1, 11, date(2021, 11, 1), date(2023, 5, 1)) - 4.96 < 0.01
assert max_drawdown([1, 2, 3, 4, 5, 6, 7]) == 0
assert max_drawdown([7, 6, 5, 4, 3, 2, 1]) - 0.85 < 0.01
assert max_drawdown([10, 8]) - 0.2 < 0.01
def test_beta():
beta(Market.SH, '000002', start=date(2020, 1, 1), end=date(2021, 1, 1))
def test_aror():
data = annualized_rate_of_return(market=Market.SZ, code='000002', start_year=2010, end_year=2021)
assert abs(data - 0.087) < 1e-3
data = avg_annualized_rate_of_return(market=Market.SZ, code='000002', start_year=2010, end_year=2021)
assert abs(data - 0.135) < 1e-3
| 35.4 | 133 | 0.685876 |
777bfe6d87f543429edf42a166f84a626c881d76 | 293,868 | py | Python | venv/Lib/site-packages/tensorflow/python/training/gen_training_ops.py | golam-shovon/Targated_Advertising | 903b42ec04c98e10fcacd126eb8ba52720ac3cc5 | [
"BSD-4-Clause-UC"
] | 1 | 2019-04-11T13:23:09.000Z | 2019-04-11T13:23:09.000Z | Lib/site-packages/tensorflow/python/training/gen_training_ops.py | caiyongji/Anaconda-py36.5-tensorflow-built-env | f4eb40b5ca3f49dfc929ff3ad2b4bb877e9663e2 | [
"PSF-2.0"
] | null | null | null | Lib/site-packages/tensorflow/python/training/gen_training_ops.py | caiyongji/Anaconda-py36.5-tensorflow-built-env | f4eb40b5ca3f49dfc929ff3ad2b4bb877e9663e2 | [
"PSF-2.0"
] | null | null | null | """Python wrappers around TensorFlow ops.
This file is MACHINE GENERATED! Do not edit.
"""
import collections as _collections
import six as _six
from tensorflow.python import pywrap_tensorflow as _pywrap_tensorflow
from tensorflow.python.eager import context as _context
from tensorflow.python.eager import core as _core
from tensorflow.python.eager import execute as _execute
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import errors as _errors
from tensorflow.python.framework import tensor_shape as _tensor_shape
from tensorflow.core.framework import op_def_pb2 as _op_def_pb2
# Needed to trigger the call to _set_call_cpp_shape_fn.
from tensorflow.python.framework import common_shapes as _common_shapes
from tensorflow.python.framework import op_def_registry as _op_def_registry
from tensorflow.python.framework import ops as _ops
from tensorflow.python.framework import op_def_library as _op_def_library
from tensorflow.python.util.deprecation import deprecated_endpoints
from tensorflow.python.util import dispatch as _dispatch
from tensorflow.python.util.tf_export import tf_export
def apply_ada_max(var, m, v, beta1_power, lr, beta1, beta2, epsilon, grad, use_locking=False, name=None):
r"""Update '*var' according to the AdaMax algorithm.
m_t <- beta1 * m_{t-1} + (1 - beta1) * g
v_t <- max(beta2 * v_{t-1}, abs(g))
variable <- variable - learning_rate / (1 - beta1^t) * m_t / (v_t + epsilon)
Args:
var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
Should be from a Variable().
m: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
v: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
beta1_power: A `Tensor`. Must have the same type as `var`.
Must be a scalar.
lr: A `Tensor`. Must have the same type as `var`.
Scaling factor. Must be a scalar.
beta1: A `Tensor`. Must have the same type as `var`.
Momentum factor. Must be a scalar.
beta2: A `Tensor`. Must have the same type as `var`.
Momentum factor. Must be a scalar.
epsilon: A `Tensor`. Must have the same type as `var`.
Ridge term. Must be a scalar.
grad: A `Tensor`. Must have the same type as `var`. The gradient.
use_locking: An optional `bool`. Defaults to `False`.
If `True`, updating of the var, m, and v tensors will be protected
by a lock; otherwise the behavior is undefined, but may exhibit less
contention.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `var`.
"""
_ctx = _context._context
if _ctx is not None and _ctx._eager_context.is_eager:
raise RuntimeError("apply_ada_max op does not support eager execution. Arg 'out' is a ref.")
# Add nodes to the TensorFlow graph.
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
_, _, _op = _op_def_lib._apply_op_helper(
"ApplyAdaMax", var=var, m=m, v=v, beta1_power=beta1_power, lr=lr,
beta1=beta1, beta2=beta2, epsilon=epsilon, grad=grad,
use_locking=use_locking, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "use_locking",
_op.get_attr("use_locking"))
_execute.record_gradient(
"ApplyAdaMax", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def apply_ada_max_eager_fallback(var, m, v, beta1_power, lr, beta1, beta2, epsilon, grad, use_locking=False, name=None, ctx=None):
raise RuntimeError("apply_ada_max op does not support eager execution. Arg 'out' is a ref.")
def apply_adadelta(var, accum, accum_update, lr, rho, epsilon, grad, use_locking=False, name=None):
r"""Update '*var' according to the adadelta scheme.
accum = rho() * accum + (1 - rho()) * grad.square();
update = (update_accum + epsilon).sqrt() * (accum + epsilon()).rsqrt() * grad;
update_accum = rho() * update_accum + (1 - rho()) * update.square();
var -= update;
Args:
var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
Should be from a Variable().
accum: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
accum_update: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
lr: A `Tensor`. Must have the same type as `var`.
Scaling factor. Must be a scalar.
rho: A `Tensor`. Must have the same type as `var`.
Decay factor. Must be a scalar.
epsilon: A `Tensor`. Must have the same type as `var`.
Constant factor. Must be a scalar.
grad: A `Tensor`. Must have the same type as `var`. The gradient.
use_locking: An optional `bool`. Defaults to `False`.
If True, updating of the var, accum and update_accum tensors will be protected by
a lock; otherwise the behavior is undefined, but may exhibit less contention.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `var`.
"""
_ctx = _context._context
if _ctx is not None and _ctx._eager_context.is_eager:
raise RuntimeError("apply_adadelta op does not support eager execution. Arg 'out' is a ref.")
# Add nodes to the TensorFlow graph.
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
_, _, _op = _op_def_lib._apply_op_helper(
"ApplyAdadelta", var=var, accum=accum, accum_update=accum_update,
lr=lr, rho=rho, epsilon=epsilon, grad=grad,
use_locking=use_locking, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "use_locking",
_op.get_attr("use_locking"))
_execute.record_gradient(
"ApplyAdadelta", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def apply_adadelta_eager_fallback(var, accum, accum_update, lr, rho, epsilon, grad, use_locking=False, name=None, ctx=None):
raise RuntimeError("apply_adadelta op does not support eager execution. Arg 'out' is a ref.")
def apply_adagrad(var, accum, lr, grad, use_locking=False, update_slots=True, name=None):
r"""Update '*var' according to the adagrad scheme.
accum += grad * grad
var -= lr * grad * (1 / sqrt(accum))
Args:
var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
Should be from a Variable().
accum: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
lr: A `Tensor`. Must have the same type as `var`.
Scaling factor. Must be a scalar.
grad: A `Tensor`. Must have the same type as `var`. The gradient.
use_locking: An optional `bool`. Defaults to `False`.
If `True`, updating of the var and accum tensors will be protected
by a lock; otherwise the behavior is undefined, but may exhibit less
contention.
update_slots: An optional `bool`. Defaults to `True`.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `var`.
"""
_ctx = _context._context
if _ctx is not None and _ctx._eager_context.is_eager:
raise RuntimeError("apply_adagrad op does not support eager execution. Arg 'out' is a ref.")
# Add nodes to the TensorFlow graph.
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
if update_slots is None:
update_slots = True
update_slots = _execute.make_bool(update_slots, "update_slots")
_, _, _op = _op_def_lib._apply_op_helper(
"ApplyAdagrad", var=var, accum=accum, lr=lr, grad=grad,
use_locking=use_locking, update_slots=update_slots,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "use_locking",
_op.get_attr("use_locking"), "update_slots",
_op.get_attr("update_slots"))
_execute.record_gradient(
"ApplyAdagrad", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def apply_adagrad_eager_fallback(var, accum, lr, grad, use_locking=False, update_slots=True, name=None, ctx=None):
raise RuntimeError("apply_adagrad op does not support eager execution. Arg 'out' is a ref.")
def apply_adagrad_da(var, gradient_accumulator, gradient_squared_accumulator, grad, lr, l1, l2, global_step, use_locking=False, name=None):
r"""Update '*var' according to the proximal adagrad scheme.
Args:
var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
Should be from a Variable().
gradient_accumulator: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
gradient_squared_accumulator: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
grad: A `Tensor`. Must have the same type as `var`. The gradient.
lr: A `Tensor`. Must have the same type as `var`.
Scaling factor. Must be a scalar.
l1: A `Tensor`. Must have the same type as `var`.
L1 regularization. Must be a scalar.
l2: A `Tensor`. Must have the same type as `var`.
L2 regularization. Must be a scalar.
global_step: A `Tensor` of type `int64`.
Training step number. Must be a scalar.
use_locking: An optional `bool`. Defaults to `False`.
If True, updating of the var and accum tensors will be protected by
a lock; otherwise the behavior is undefined, but may exhibit less contention.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `var`.
"""
_ctx = _context._context
if _ctx is not None and _ctx._eager_context.is_eager:
raise RuntimeError("apply_adagrad_da op does not support eager execution. Arg 'out' is a ref.")
# Add nodes to the TensorFlow graph.
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
_, _, _op = _op_def_lib._apply_op_helper(
"ApplyAdagradDA", var=var, gradient_accumulator=gradient_accumulator,
gradient_squared_accumulator=gradient_squared_accumulator,
grad=grad, lr=lr, l1=l1, l2=l2,
global_step=global_step, use_locking=use_locking,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "use_locking",
_op.get_attr("use_locking"))
_execute.record_gradient(
"ApplyAdagradDA", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def apply_adagrad_da_eager_fallback(var, gradient_accumulator, gradient_squared_accumulator, grad, lr, l1, l2, global_step, use_locking=False, name=None, ctx=None):
raise RuntimeError("apply_adagrad_da op does not support eager execution. Arg 'out' is a ref.")
def apply_adam(var, m, v, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad, use_locking=False, use_nesterov=False, name=None):
r"""Update '*var' according to the Adam algorithm.
$$lr_t := \text{learning\_rate} * \sqrt{1 - beta_2^t} / (1 - beta_1^t)$$
$$m_t := beta_1 * m_{t-1} + (1 - beta_1) * g$$
$$v_t := beta_2 * v_{t-1} + (1 - beta_2) * g * g$$
$$variable := variable - lr_t * m_t / (\sqrt{v_t} + \epsilon)$$
Args:
var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
Should be from a Variable().
m: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
v: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
beta1_power: A `Tensor`. Must have the same type as `var`.
Must be a scalar.
beta2_power: A `Tensor`. Must have the same type as `var`.
Must be a scalar.
lr: A `Tensor`. Must have the same type as `var`.
Scaling factor. Must be a scalar.
beta1: A `Tensor`. Must have the same type as `var`.
Momentum factor. Must be a scalar.
beta2: A `Tensor`. Must have the same type as `var`.
Momentum factor. Must be a scalar.
epsilon: A `Tensor`. Must have the same type as `var`.
Ridge term. Must be a scalar.
grad: A `Tensor`. Must have the same type as `var`. The gradient.
use_locking: An optional `bool`. Defaults to `False`.
If `True`, updating of the var, m, and v tensors will be protected
by a lock; otherwise the behavior is undefined, but may exhibit less
contention.
use_nesterov: An optional `bool`. Defaults to `False`.
If `True`, uses the nesterov update.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `var`.
"""
_ctx = _context._context
if _ctx is not None and _ctx._eager_context.is_eager:
raise RuntimeError("apply_adam op does not support eager execution. Arg 'out' is a ref.")
# Add nodes to the TensorFlow graph.
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
if use_nesterov is None:
use_nesterov = False
use_nesterov = _execute.make_bool(use_nesterov, "use_nesterov")
_, _, _op = _op_def_lib._apply_op_helper(
"ApplyAdam", var=var, m=m, v=v, beta1_power=beta1_power,
beta2_power=beta2_power, lr=lr, beta1=beta1, beta2=beta2,
epsilon=epsilon, grad=grad, use_locking=use_locking,
use_nesterov=use_nesterov, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "use_locking",
_op.get_attr("use_locking"), "use_nesterov",
_op.get_attr("use_nesterov"))
_execute.record_gradient(
"ApplyAdam", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def apply_adam_eager_fallback(var, m, v, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad, use_locking=False, use_nesterov=False, name=None, ctx=None):
raise RuntimeError("apply_adam op does not support eager execution. Arg 'out' is a ref.")
def apply_add_sign(var, m, lr, alpha, sign_decay, beta, grad, use_locking=False, name=None):
r"""Update '*var' according to the AddSign update.
m_t <- beta1 * m_{t-1} + (1 - beta1) * g
update <- (alpha + sign_decay * sign(g) *sign(m)) * g
variable <- variable - lr_t * update
Args:
var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
Should be from a Variable().
m: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
lr: A `Tensor`. Must have the same type as `var`.
Scaling factor. Must be a scalar.
alpha: A `Tensor`. Must have the same type as `var`. Must be a scalar.
sign_decay: A `Tensor`. Must have the same type as `var`.
Must be a scalar.
beta: A `Tensor`. Must have the same type as `var`. Must be a scalar.
grad: A `Tensor`. Must have the same type as `var`. The gradient.
use_locking: An optional `bool`. Defaults to `False`.
If `True`, updating of the var and m tensors is
protected by a lock; otherwise the behavior is undefined, but may exhibit less
contention.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `var`.
"""
_ctx = _context._context
if _ctx is not None and _ctx._eager_context.is_eager:
raise RuntimeError("apply_add_sign op does not support eager execution. Arg 'out' is a ref.")
# Add nodes to the TensorFlow graph.
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
_, _, _op = _op_def_lib._apply_op_helper(
"ApplyAddSign", var=var, m=m, lr=lr, alpha=alpha,
sign_decay=sign_decay, beta=beta, grad=grad,
use_locking=use_locking, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "use_locking",
_op.get_attr("use_locking"))
_execute.record_gradient(
"ApplyAddSign", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def apply_add_sign_eager_fallback(var, m, lr, alpha, sign_decay, beta, grad, use_locking=False, name=None, ctx=None):
raise RuntimeError("apply_add_sign op does not support eager execution. Arg 'out' is a ref.")
def apply_centered_rms_prop(var, mg, ms, mom, lr, rho, momentum, epsilon, grad, use_locking=False, name=None):
r"""Update '*var' according to the centered RMSProp algorithm.
The centered RMSProp algorithm uses an estimate of the centered second moment
(i.e., the variance) for normalization, as opposed to regular RMSProp, which
uses the (uncentered) second moment. This often helps with training, but is
slightly more expensive in terms of computation and memory.
Note that in dense implementation of this algorithm, mg, ms, and mom will
update even if the grad is zero, but in this sparse implementation, mg, ms,
and mom will not update in iterations during which the grad is zero.
mean_square = decay * mean_square + (1-decay) * gradient ** 2
mean_grad = decay * mean_grad + (1-decay) * gradient
Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2)
mg <- rho * mg_{t-1} + (1-rho) * grad
ms <- rho * ms_{t-1} + (1-rho) * grad * grad
mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms - mg * mg + epsilon)
var <- var - mom
Args:
var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
Should be from a Variable().
mg: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
ms: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
mom: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
lr: A `Tensor`. Must have the same type as `var`.
Scaling factor. Must be a scalar.
rho: A `Tensor`. Must have the same type as `var`.
Decay rate. Must be a scalar.
momentum: A `Tensor`. Must have the same type as `var`.
epsilon: A `Tensor`. Must have the same type as `var`.
Ridge term. Must be a scalar.
grad: A `Tensor`. Must have the same type as `var`. The gradient.
use_locking: An optional `bool`. Defaults to `False`.
If `True`, updating of the var, mg, ms, and mom tensors is
protected by a lock; otherwise the behavior is undefined, but may exhibit less
contention.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `var`.
"""
_ctx = _context._context
if _ctx is not None and _ctx._eager_context.is_eager:
raise RuntimeError("apply_centered_rms_prop op does not support eager execution. Arg 'out' is a ref.")
# Add nodes to the TensorFlow graph.
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
_, _, _op = _op_def_lib._apply_op_helper(
"ApplyCenteredRMSProp", var=var, mg=mg, ms=ms, mom=mom, lr=lr,
rho=rho, momentum=momentum, epsilon=epsilon,
grad=grad, use_locking=use_locking, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "use_locking",
_op.get_attr("use_locking"))
_execute.record_gradient(
"ApplyCenteredRMSProp", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def apply_centered_rms_prop_eager_fallback(var, mg, ms, mom, lr, rho, momentum, epsilon, grad, use_locking=False, name=None, ctx=None):
raise RuntimeError("apply_centered_rms_prop op does not support eager execution. Arg 'out' is a ref.")
def apply_ftrl(var, accum, linear, grad, lr, l1, l2, lr_power, use_locking=False, name=None):
r"""Update '*var' according to the Ftrl-proximal scheme.
accum_new = accum + grad * grad
linear += grad + (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
accum = accum_new
Args:
var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
Should be from a Variable().
accum: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
linear: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
grad: A `Tensor`. Must have the same type as `var`. The gradient.
lr: A `Tensor`. Must have the same type as `var`.
Scaling factor. Must be a scalar.
l1: A `Tensor`. Must have the same type as `var`.
L1 regulariation. Must be a scalar.
l2: A `Tensor`. Must have the same type as `var`.
L2 regulariation. Must be a scalar.
lr_power: A `Tensor`. Must have the same type as `var`.
Scaling factor. Must be a scalar.
use_locking: An optional `bool`. Defaults to `False`.
If `True`, updating of the var and accum tensors will be protected
by a lock; otherwise the behavior is undefined, but may exhibit less
contention.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `var`.
"""
_ctx = _context._context
if _ctx is not None and _ctx._eager_context.is_eager:
raise RuntimeError("apply_ftrl op does not support eager execution. Arg 'out' is a ref.")
# Add nodes to the TensorFlow graph.
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
_, _, _op = _op_def_lib._apply_op_helper(
"ApplyFtrl", var=var, accum=accum, linear=linear, grad=grad, lr=lr,
l1=l1, l2=l2, lr_power=lr_power, use_locking=use_locking,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "use_locking",
_op.get_attr("use_locking"))
_execute.record_gradient(
"ApplyFtrl", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def apply_ftrl_eager_fallback(var, accum, linear, grad, lr, l1, l2, lr_power, use_locking=False, name=None, ctx=None):
raise RuntimeError("apply_ftrl op does not support eager execution. Arg 'out' is a ref.")
def apply_ftrl_v2(var, accum, linear, grad, lr, l1, l2, l2_shrinkage, lr_power, use_locking=False, name=None):
r"""Update '*var' according to the Ftrl-proximal scheme.
grad_with_shrinkage = grad + 2 * l2_shrinkage * var
accum_new = accum + grad_with_shrinkage * grad_with_shrinkage
linear += grad_with_shrinkage +
(accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
accum = accum_new
Args:
var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
Should be from a Variable().
accum: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
linear: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
grad: A `Tensor`. Must have the same type as `var`. The gradient.
lr: A `Tensor`. Must have the same type as `var`.
Scaling factor. Must be a scalar.
l1: A `Tensor`. Must have the same type as `var`.
L1 regulariation. Must be a scalar.
l2: A `Tensor`. Must have the same type as `var`.
L2 shrinkage regulariation. Must be a scalar.
l2_shrinkage: A `Tensor`. Must have the same type as `var`.
lr_power: A `Tensor`. Must have the same type as `var`.
Scaling factor. Must be a scalar.
use_locking: An optional `bool`. Defaults to `False`.
If `True`, updating of the var and accum tensors will be protected
by a lock; otherwise the behavior is undefined, but may exhibit less
contention.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `var`.
"""
_ctx = _context._context
if _ctx is not None and _ctx._eager_context.is_eager:
raise RuntimeError("apply_ftrl_v2 op does not support eager execution. Arg 'out' is a ref.")
# Add nodes to the TensorFlow graph.
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
_, _, _op = _op_def_lib._apply_op_helper(
"ApplyFtrlV2", var=var, accum=accum, linear=linear, grad=grad, lr=lr,
l1=l1, l2=l2, l2_shrinkage=l2_shrinkage,
lr_power=lr_power, use_locking=use_locking, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "use_locking",
_op.get_attr("use_locking"))
_execute.record_gradient(
"ApplyFtrlV2", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def apply_ftrl_v2_eager_fallback(var, accum, linear, grad, lr, l1, l2, l2_shrinkage, lr_power, use_locking=False, name=None, ctx=None):
raise RuntimeError("apply_ftrl_v2 op does not support eager execution. Arg 'out' is a ref.")
def apply_gradient_descent(var, alpha, delta, use_locking=False, name=None):
r"""Update '*var' by subtracting 'alpha' * 'delta' from it.
Args:
var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
Should be from a Variable().
alpha: A `Tensor`. Must have the same type as `var`.
Scaling factor. Must be a scalar.
delta: A `Tensor`. Must have the same type as `var`. The change.
use_locking: An optional `bool`. Defaults to `False`.
If `True`, the subtraction will be protected by a lock;
otherwise the behavior is undefined, but may exhibit less contention.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `var`.
"""
_ctx = _context._context
if _ctx is not None and _ctx._eager_context.is_eager:
raise RuntimeError("apply_gradient_descent op does not support eager execution. Arg 'out' is a ref.")
# Add nodes to the TensorFlow graph.
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
_, _, _op = _op_def_lib._apply_op_helper(
"ApplyGradientDescent", var=var, alpha=alpha, delta=delta,
use_locking=use_locking, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "use_locking",
_op.get_attr("use_locking"))
_execute.record_gradient(
"ApplyGradientDescent", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def apply_gradient_descent_eager_fallback(var, alpha, delta, use_locking=False, name=None, ctx=None):
raise RuntimeError("apply_gradient_descent op does not support eager execution. Arg 'out' is a ref.")
def apply_momentum(var, accum, lr, grad, momentum, use_locking=False, use_nesterov=False, name=None):
r"""Update '*var' according to the momentum scheme. Set use_nesterov = True if you
want to use Nesterov momentum.
accum = accum * momentum + grad
var -= lr * accum
Args:
var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
Should be from a Variable().
accum: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
lr: A `Tensor`. Must have the same type as `var`.
Scaling factor. Must be a scalar.
grad: A `Tensor`. Must have the same type as `var`. The gradient.
momentum: A `Tensor`. Must have the same type as `var`.
Momentum. Must be a scalar.
use_locking: An optional `bool`. Defaults to `False`.
If `True`, updating of the var and accum tensors will be protected
by a lock; otherwise the behavior is undefined, but may exhibit less
contention.
use_nesterov: An optional `bool`. Defaults to `False`.
If `True`, the tensor passed to compute grad will be
var - lr * momentum * accum, so in the end, the var you get is actually
var - lr * momentum * accum.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `var`.
"""
_ctx = _context._context
if _ctx is not None and _ctx._eager_context.is_eager:
raise RuntimeError("apply_momentum op does not support eager execution. Arg 'out' is a ref.")
# Add nodes to the TensorFlow graph.
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
if use_nesterov is None:
use_nesterov = False
use_nesterov = _execute.make_bool(use_nesterov, "use_nesterov")
_, _, _op = _op_def_lib._apply_op_helper(
"ApplyMomentum", var=var, accum=accum, lr=lr, grad=grad,
momentum=momentum, use_locking=use_locking,
use_nesterov=use_nesterov, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "use_locking",
_op.get_attr("use_locking"), "use_nesterov",
_op.get_attr("use_nesterov"))
_execute.record_gradient(
"ApplyMomentum", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def apply_momentum_eager_fallback(var, accum, lr, grad, momentum, use_locking=False, use_nesterov=False, name=None, ctx=None):
raise RuntimeError("apply_momentum op does not support eager execution. Arg 'out' is a ref.")
def apply_power_sign(var, m, lr, logbase, sign_decay, beta, grad, use_locking=False, name=None):
r"""Update '*var' according to the AddSign update.
m_t <- beta1 * m_{t-1} + (1 - beta1) * g
update <- exp(logbase * sign_decay * sign(g) * sign(m_t)) * g
variable <- variable - lr_t * update
Args:
var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
Should be from a Variable().
m: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
lr: A `Tensor`. Must have the same type as `var`.
Scaling factor. Must be a scalar.
logbase: A `Tensor`. Must have the same type as `var`. Must be a scalar.
sign_decay: A `Tensor`. Must have the same type as `var`.
Must be a scalar.
beta: A `Tensor`. Must have the same type as `var`. Must be a scalar.
grad: A `Tensor`. Must have the same type as `var`. The gradient.
use_locking: An optional `bool`. Defaults to `False`.
If `True`, updating of the var and m tensors is
protected by a lock; otherwise the behavior is undefined, but may exhibit less
contention.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `var`.
"""
_ctx = _context._context
if _ctx is not None and _ctx._eager_context.is_eager:
raise RuntimeError("apply_power_sign op does not support eager execution. Arg 'out' is a ref.")
# Add nodes to the TensorFlow graph.
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
_, _, _op = _op_def_lib._apply_op_helper(
"ApplyPowerSign", var=var, m=m, lr=lr, logbase=logbase,
sign_decay=sign_decay, beta=beta, grad=grad,
use_locking=use_locking, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "use_locking",
_op.get_attr("use_locking"))
_execute.record_gradient(
"ApplyPowerSign", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def apply_power_sign_eager_fallback(var, m, lr, logbase, sign_decay, beta, grad, use_locking=False, name=None, ctx=None):
raise RuntimeError("apply_power_sign op does not support eager execution. Arg 'out' is a ref.")
def apply_proximal_adagrad(var, accum, lr, l1, l2, grad, use_locking=False, name=None):
r"""Update '*var' and '*accum' according to FOBOS with Adagrad learning rate.
accum += grad * grad
prox_v = var - lr * grad * (1 / sqrt(accum))
var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}
Args:
var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
Should be from a Variable().
accum: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
lr: A `Tensor`. Must have the same type as `var`.
Scaling factor. Must be a scalar.
l1: A `Tensor`. Must have the same type as `var`.
L1 regularization. Must be a scalar.
l2: A `Tensor`. Must have the same type as `var`.
L2 regularization. Must be a scalar.
grad: A `Tensor`. Must have the same type as `var`. The gradient.
use_locking: An optional `bool`. Defaults to `False`.
If True, updating of the var and accum tensors will be protected by
a lock; otherwise the behavior is undefined, but may exhibit less contention.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `var`.
"""
_ctx = _context._context
if _ctx is not None and _ctx._eager_context.is_eager:
raise RuntimeError("apply_proximal_adagrad op does not support eager execution. Arg 'out' is a ref.")
# Add nodes to the TensorFlow graph.
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
_, _, _op = _op_def_lib._apply_op_helper(
"ApplyProximalAdagrad", var=var, accum=accum, lr=lr, l1=l1, l2=l2,
grad=grad, use_locking=use_locking, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "use_locking",
_op.get_attr("use_locking"))
_execute.record_gradient(
"ApplyProximalAdagrad", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def apply_proximal_adagrad_eager_fallback(var, accum, lr, l1, l2, grad, use_locking=False, name=None, ctx=None):
raise RuntimeError("apply_proximal_adagrad op does not support eager execution. Arg 'out' is a ref.")
def apply_proximal_gradient_descent(var, alpha, l1, l2, delta, use_locking=False, name=None):
r"""Update '*var' as FOBOS algorithm with fixed learning rate.
prox_v = var - alpha * delta
var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}
Args:
var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
Should be from a Variable().
alpha: A `Tensor`. Must have the same type as `var`.
Scaling factor. Must be a scalar.
l1: A `Tensor`. Must have the same type as `var`.
L1 regularization. Must be a scalar.
l2: A `Tensor`. Must have the same type as `var`.
L2 regularization. Must be a scalar.
delta: A `Tensor`. Must have the same type as `var`. The change.
use_locking: An optional `bool`. Defaults to `False`.
If True, the subtraction will be protected by a lock;
otherwise the behavior is undefined, but may exhibit less contention.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `var`.
"""
_ctx = _context._context
if _ctx is not None and _ctx._eager_context.is_eager:
raise RuntimeError("apply_proximal_gradient_descent op does not support eager execution. Arg 'out' is a ref.")
# Add nodes to the TensorFlow graph.
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
_, _, _op = _op_def_lib._apply_op_helper(
"ApplyProximalGradientDescent", var=var, alpha=alpha, l1=l1, l2=l2,
delta=delta, use_locking=use_locking,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "use_locking",
_op.get_attr("use_locking"))
_execute.record_gradient(
"ApplyProximalGradientDescent", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def apply_proximal_gradient_descent_eager_fallback(var, alpha, l1, l2, delta, use_locking=False, name=None, ctx=None):
raise RuntimeError("apply_proximal_gradient_descent op does not support eager execution. Arg 'out' is a ref.")
def apply_rms_prop(var, ms, mom, lr, rho, momentum, epsilon, grad, use_locking=False, name=None):
r"""Update '*var' according to the RMSProp algorithm.
Note that in dense implementation of this algorithm, ms and mom will
update even if the grad is zero, but in this sparse implementation, ms
and mom will not update in iterations during which the grad is zero.
mean_square = decay * mean_square + (1-decay) * gradient ** 2
Delta = learning_rate * gradient / sqrt(mean_square + epsilon)
ms <- rho * ms_{t-1} + (1-rho) * grad * grad
mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)
var <- var - mom
Args:
var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
Should be from a Variable().
ms: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
mom: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
lr: A `Tensor`. Must have the same type as `var`.
Scaling factor. Must be a scalar.
rho: A `Tensor`. Must have the same type as `var`.
Decay rate. Must be a scalar.
momentum: A `Tensor`. Must have the same type as `var`.
epsilon: A `Tensor`. Must have the same type as `var`.
Ridge term. Must be a scalar.
grad: A `Tensor`. Must have the same type as `var`. The gradient.
use_locking: An optional `bool`. Defaults to `False`.
If `True`, updating of the var, ms, and mom tensors is protected
by a lock; otherwise the behavior is undefined, but may exhibit less
contention.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `var`.
"""
_ctx = _context._context
if _ctx is not None and _ctx._eager_context.is_eager:
raise RuntimeError("apply_rms_prop op does not support eager execution. Arg 'out' is a ref.")
# Add nodes to the TensorFlow graph.
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
_, _, _op = _op_def_lib._apply_op_helper(
"ApplyRMSProp", var=var, ms=ms, mom=mom, lr=lr, rho=rho,
momentum=momentum, epsilon=epsilon, grad=grad,
use_locking=use_locking, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "use_locking",
_op.get_attr("use_locking"))
_execute.record_gradient(
"ApplyRMSProp", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def apply_rms_prop_eager_fallback(var, ms, mom, lr, rho, momentum, epsilon, grad, use_locking=False, name=None, ctx=None):
raise RuntimeError("apply_rms_prop op does not support eager execution. Arg 'out' is a ref.")
def resource_apply_ada_max(var, m, v, beta1_power, lr, beta1, beta2, epsilon, grad, use_locking=False, name=None):
r"""Update '*var' according to the AdaMax algorithm.
m_t <- beta1 * m_{t-1} + (1 - beta1) * g
v_t <- max(beta2 * v_{t-1}, abs(g))
variable <- variable - learning_rate / (1 - beta1^t) * m_t / (v_t + epsilon)
Args:
var: A `Tensor` of type `resource`. Should be from a Variable().
m: A `Tensor` of type `resource`. Should be from a Variable().
v: A `Tensor` of type `resource`. Should be from a Variable().
beta1_power: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
Must be a scalar.
lr: A `Tensor`. Must have the same type as `beta1_power`.
Scaling factor. Must be a scalar.
beta1: A `Tensor`. Must have the same type as `beta1_power`.
Momentum factor. Must be a scalar.
beta2: A `Tensor`. Must have the same type as `beta1_power`.
Momentum factor. Must be a scalar.
epsilon: A `Tensor`. Must have the same type as `beta1_power`.
Ridge term. Must be a scalar.
grad: A `Tensor`. Must have the same type as `beta1_power`. The gradient.
use_locking: An optional `bool`. Defaults to `False`.
If `True`, updating of the var, m, and v tensors will be protected
by a lock; otherwise the behavior is undefined, but may exhibit less
contention.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
_ctx = _context._context
if _ctx is not None and _ctx._eager_context.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"ResourceApplyAdaMax", name, _ctx._post_execution_callbacks, var, m,
v, beta1_power, lr, beta1, beta2, epsilon, grad, "use_locking",
use_locking)
return _result
except _core._FallbackException:
try:
return resource_apply_ada_max_eager_fallback(
var, m, v, beta1_power, lr, beta1, beta2, epsilon, grad,
use_locking=use_locking, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
_, _, _op = _op_def_lib._apply_op_helper(
"ResourceApplyAdaMax", var=var, m=m, v=v, beta1_power=beta1_power,
lr=lr, beta1=beta1, beta2=beta2,
epsilon=epsilon, grad=grad,
use_locking=use_locking, name=name)
return _op
_result = None
return _result
def resource_apply_ada_max_eager_fallback(var, m, v, beta1_power, lr, beta1, beta2, epsilon, grad, use_locking=False, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function resource_apply_ada_max
"""
_ctx = ctx if ctx else _context.context()
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
_attr_T, _inputs_T = _execute.args_to_matching_eager([beta1_power, lr, beta1, beta2, epsilon, grad], _ctx)
(beta1_power, lr, beta1, beta2, epsilon, grad) = _inputs_T
var = _ops.convert_to_tensor(var, _dtypes.resource)
m = _ops.convert_to_tensor(m, _dtypes.resource)
v = _ops.convert_to_tensor(v, _dtypes.resource)
_inputs_flat = [var, m, v, beta1_power, lr, beta1, beta2, epsilon, grad]
_attrs = ("T", _attr_T, "use_locking", use_locking)
_result = _execute.execute(b"ResourceApplyAdaMax", 0, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_result = None
return _result
def resource_apply_adadelta(var, accum, accum_update, lr, rho, epsilon, grad, use_locking=False, name=None):
r"""Update '*var' according to the adadelta scheme.
accum = rho() * accum + (1 - rho()) * grad.square();
update = (update_accum + epsilon).sqrt() * (accum + epsilon()).rsqrt() * grad;
update_accum = rho() * update_accum + (1 - rho()) * update.square();
var -= update;
Args:
var: A `Tensor` of type `resource`. Should be from a Variable().
accum: A `Tensor` of type `resource`. Should be from a Variable().
accum_update: A `Tensor` of type `resource`. Should be from a Variable().
lr: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
Scaling factor. Must be a scalar.
rho: A `Tensor`. Must have the same type as `lr`.
Decay factor. Must be a scalar.
epsilon: A `Tensor`. Must have the same type as `lr`.
Constant factor. Must be a scalar.
grad: A `Tensor`. Must have the same type as `lr`. The gradient.
use_locking: An optional `bool`. Defaults to `False`.
If True, updating of the var, accum and update_accum tensors will be protected by
a lock; otherwise the behavior is undefined, but may exhibit less contention.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
_ctx = _context._context
if _ctx is not None and _ctx._eager_context.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"ResourceApplyAdadelta", name, _ctx._post_execution_callbacks, var,
accum, accum_update, lr, rho, epsilon, grad, "use_locking",
use_locking)
return _result
except _core._FallbackException:
try:
return resource_apply_adadelta_eager_fallback(
var, accum, accum_update, lr, rho, epsilon, grad,
use_locking=use_locking, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
_, _, _op = _op_def_lib._apply_op_helper(
"ResourceApplyAdadelta", var=var, accum=accum,
accum_update=accum_update, lr=lr, rho=rho,
epsilon=epsilon, grad=grad,
use_locking=use_locking, name=name)
return _op
_result = None
return _result
def resource_apply_adadelta_eager_fallback(var, accum, accum_update, lr, rho, epsilon, grad, use_locking=False, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function resource_apply_adadelta
"""
_ctx = ctx if ctx else _context.context()
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
_attr_T, _inputs_T = _execute.args_to_matching_eager([lr, rho, epsilon, grad], _ctx)
(lr, rho, epsilon, grad) = _inputs_T
var = _ops.convert_to_tensor(var, _dtypes.resource)
accum = _ops.convert_to_tensor(accum, _dtypes.resource)
accum_update = _ops.convert_to_tensor(accum_update, _dtypes.resource)
_inputs_flat = [var, accum, accum_update, lr, rho, epsilon, grad]
_attrs = ("T", _attr_T, "use_locking", use_locking)
_result = _execute.execute(b"ResourceApplyAdadelta", 0, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_result = None
return _result
def resource_apply_adagrad(var, accum, lr, grad, use_locking=False, update_slots=True, name=None):
r"""Update '*var' according to the adagrad scheme.
accum += grad * grad
var -= lr * grad * (1 / sqrt(accum))
Args:
var: A `Tensor` of type `resource`. Should be from a Variable().
accum: A `Tensor` of type `resource`. Should be from a Variable().
lr: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
Scaling factor. Must be a scalar.
grad: A `Tensor`. Must have the same type as `lr`. The gradient.
use_locking: An optional `bool`. Defaults to `False`.
If `True`, updating of the var and accum tensors will be protected
by a lock; otherwise the behavior is undefined, but may exhibit less
contention.
update_slots: An optional `bool`. Defaults to `True`.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
_ctx = _context._context
if _ctx is not None and _ctx._eager_context.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"ResourceApplyAdagrad", name, _ctx._post_execution_callbacks, var,
accum, lr, grad, "use_locking", use_locking, "update_slots",
update_slots)
return _result
except _core._FallbackException:
try:
return resource_apply_adagrad_eager_fallback(
var, accum, lr, grad, use_locking=use_locking,
update_slots=update_slots, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
if update_slots is None:
update_slots = True
update_slots = _execute.make_bool(update_slots, "update_slots")
_, _, _op = _op_def_lib._apply_op_helper(
"ResourceApplyAdagrad", var=var, accum=accum, lr=lr, grad=grad,
use_locking=use_locking,
update_slots=update_slots, name=name)
return _op
_result = None
return _result
def resource_apply_adagrad_eager_fallback(var, accum, lr, grad, use_locking=False, update_slots=True, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function resource_apply_adagrad
"""
_ctx = ctx if ctx else _context.context()
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
if update_slots is None:
update_slots = True
update_slots = _execute.make_bool(update_slots, "update_slots")
_attr_T, _inputs_T = _execute.args_to_matching_eager([lr, grad], _ctx)
(lr, grad) = _inputs_T
var = _ops.convert_to_tensor(var, _dtypes.resource)
accum = _ops.convert_to_tensor(accum, _dtypes.resource)
_inputs_flat = [var, accum, lr, grad]
_attrs = ("T", _attr_T, "use_locking", use_locking, "update_slots",
update_slots)
_result = _execute.execute(b"ResourceApplyAdagrad", 0, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_result = None
return _result
def resource_apply_adagrad_da(var, gradient_accumulator, gradient_squared_accumulator, grad, lr, l1, l2, global_step, use_locking=False, name=None):
r"""Update '*var' according to the proximal adagrad scheme.
Args:
var: A `Tensor` of type `resource`. Should be from a Variable().
gradient_accumulator: A `Tensor` of type `resource`.
Should be from a Variable().
gradient_squared_accumulator: A `Tensor` of type `resource`.
Should be from a Variable().
grad: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
The gradient.
lr: A `Tensor`. Must have the same type as `grad`.
Scaling factor. Must be a scalar.
l1: A `Tensor`. Must have the same type as `grad`.
L1 regularization. Must be a scalar.
l2: A `Tensor`. Must have the same type as `grad`.
L2 regularization. Must be a scalar.
global_step: A `Tensor` of type `int64`.
Training step number. Must be a scalar.
use_locking: An optional `bool`. Defaults to `False`.
If True, updating of the var and accum tensors will be protected by
a lock; otherwise the behavior is undefined, but may exhibit less contention.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
_ctx = _context._context
if _ctx is not None and _ctx._eager_context.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"ResourceApplyAdagradDA", name, _ctx._post_execution_callbacks, var,
gradient_accumulator, gradient_squared_accumulator, grad, lr, l1, l2,
global_step, "use_locking", use_locking)
return _result
except _core._FallbackException:
try:
return resource_apply_adagrad_da_eager_fallback(
var, gradient_accumulator, gradient_squared_accumulator, grad, lr,
l1, l2, global_step, use_locking=use_locking, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
_, _, _op = _op_def_lib._apply_op_helper(
"ResourceApplyAdagradDA", var=var,
gradient_accumulator=gradient_accumulator,
gradient_squared_accumulator=gradient_squared_accumulator,
grad=grad, lr=lr, l1=l1, l2=l2,
global_step=global_step,
use_locking=use_locking, name=name)
return _op
_result = None
return _result
def resource_apply_adagrad_da_eager_fallback(var, gradient_accumulator, gradient_squared_accumulator, grad, lr, l1, l2, global_step, use_locking=False, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function resource_apply_adagrad_da
"""
_ctx = ctx if ctx else _context.context()
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
_attr_T, _inputs_T = _execute.args_to_matching_eager([grad, lr, l1, l2], _ctx)
(grad, lr, l1, l2) = _inputs_T
var = _ops.convert_to_tensor(var, _dtypes.resource)
gradient_accumulator = _ops.convert_to_tensor(gradient_accumulator, _dtypes.resource)
gradient_squared_accumulator = _ops.convert_to_tensor(gradient_squared_accumulator, _dtypes.resource)
global_step = _ops.convert_to_tensor(global_step, _dtypes.int64)
_inputs_flat = [var, gradient_accumulator, gradient_squared_accumulator, grad, lr, l1, l2, global_step]
_attrs = ("T", _attr_T, "use_locking", use_locking)
_result = _execute.execute(b"ResourceApplyAdagradDA", 0,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_result = None
return _result
def resource_apply_adam(var, m, v, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad, use_locking=False, use_nesterov=False, name=None):
r"""Update '*var' according to the Adam algorithm.
$$lr_t := \text{learning\_rate} * \sqrt{1 - beta_2^t} / (1 - beta_1^t)$$
$$m_t := beta_1 * m_{t-1} + (1 - beta_1) * g$$
$$v_t := beta_2 * v_{t-1} + (1 - beta_2) * g * g$$
$$variable := variable - lr_t * m_t / (\sqrt{v_t} + \epsilon)$$
Args:
var: A `Tensor` of type `resource`. Should be from a Variable().
m: A `Tensor` of type `resource`. Should be from a Variable().
v: A `Tensor` of type `resource`. Should be from a Variable().
beta1_power: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
Must be a scalar.
beta2_power: A `Tensor`. Must have the same type as `beta1_power`.
Must be a scalar.
lr: A `Tensor`. Must have the same type as `beta1_power`.
Scaling factor. Must be a scalar.
beta1: A `Tensor`. Must have the same type as `beta1_power`.
Momentum factor. Must be a scalar.
beta2: A `Tensor`. Must have the same type as `beta1_power`.
Momentum factor. Must be a scalar.
epsilon: A `Tensor`. Must have the same type as `beta1_power`.
Ridge term. Must be a scalar.
grad: A `Tensor`. Must have the same type as `beta1_power`. The gradient.
use_locking: An optional `bool`. Defaults to `False`.
If `True`, updating of the var, m, and v tensors will be protected
by a lock; otherwise the behavior is undefined, but may exhibit less
contention.
use_nesterov: An optional `bool`. Defaults to `False`.
If `True`, uses the nesterov update.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
_ctx = _context._context
if _ctx is not None and _ctx._eager_context.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"ResourceApplyAdam", name, _ctx._post_execution_callbacks, var, m, v,
beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad,
"use_locking", use_locking, "use_nesterov", use_nesterov)
return _result
except _core._FallbackException:
try:
return resource_apply_adam_eager_fallback(
var, m, v, beta1_power, beta2_power, lr, beta1, beta2, epsilon,
grad, use_locking=use_locking, use_nesterov=use_nesterov,
name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
if use_nesterov is None:
use_nesterov = False
use_nesterov = _execute.make_bool(use_nesterov, "use_nesterov")
_, _, _op = _op_def_lib._apply_op_helper(
"ResourceApplyAdam", var=var, m=m, v=v, beta1_power=beta1_power,
beta2_power=beta2_power, lr=lr, beta1=beta1,
beta2=beta2, epsilon=epsilon, grad=grad,
use_locking=use_locking,
use_nesterov=use_nesterov, name=name)
return _op
_result = None
return _result
def resource_apply_adam_eager_fallback(var, m, v, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad, use_locking=False, use_nesterov=False, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function resource_apply_adam
"""
_ctx = ctx if ctx else _context.context()
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
if use_nesterov is None:
use_nesterov = False
use_nesterov = _execute.make_bool(use_nesterov, "use_nesterov")
_attr_T, _inputs_T = _execute.args_to_matching_eager([beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad], _ctx)
(beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad) = _inputs_T
var = _ops.convert_to_tensor(var, _dtypes.resource)
m = _ops.convert_to_tensor(m, _dtypes.resource)
v = _ops.convert_to_tensor(v, _dtypes.resource)
_inputs_flat = [var, m, v, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad]
_attrs = ("T", _attr_T, "use_locking", use_locking, "use_nesterov",
use_nesterov)
_result = _execute.execute(b"ResourceApplyAdam", 0, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_result = None
return _result
def resource_apply_adam_with_amsgrad(var, m, v, vhat, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad, use_locking=False, name=None):
r"""Update '*var' according to the Adam algorithm.
$$lr_t := \text{learning\_rate} * \sqrt{1 - beta_2^t} / (1 - beta_1^t)$$
$$m_t := beta_1 * m_{t-1} + (1 - beta_1) * g$$
$$v_t := beta_2 * v_{t-1} + (1 - beta_2) * g * g$$
$$vhat_t := max{vhat_{t-1}, v_t}$$
$$variable := variable - lr_t * m_t / (\sqrt{vhat_t} + \epsilon)$$
Args:
var: A `Tensor` of type `resource`. Should be from a Variable().
m: A `Tensor` of type `resource`. Should be from a Variable().
v: A `Tensor` of type `resource`. Should be from a Variable().
vhat: A `Tensor` of type `resource`. Should be from a Variable().
beta1_power: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
Must be a scalar.
beta2_power: A `Tensor`. Must have the same type as `beta1_power`.
Must be a scalar.
lr: A `Tensor`. Must have the same type as `beta1_power`.
Scaling factor. Must be a scalar.
beta1: A `Tensor`. Must have the same type as `beta1_power`.
Momentum factor. Must be a scalar.
beta2: A `Tensor`. Must have the same type as `beta1_power`.
Momentum factor. Must be a scalar.
epsilon: A `Tensor`. Must have the same type as `beta1_power`.
Ridge term. Must be a scalar.
grad: A `Tensor`. Must have the same type as `beta1_power`. The gradient.
use_locking: An optional `bool`. Defaults to `False`.
If `True`, updating of the var, m, and v tensors will be protected
by a lock; otherwise the behavior is undefined, but may exhibit less
contention.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
_ctx = _context._context
if _ctx is not None and _ctx._eager_context.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"ResourceApplyAdamWithAmsgrad", name, _ctx._post_execution_callbacks,
var, m, v, vhat, beta1_power, beta2_power, lr, beta1, beta2, epsilon,
grad, "use_locking", use_locking)
return _result
except _core._FallbackException:
try:
return resource_apply_adam_with_amsgrad_eager_fallback(
var, m, v, vhat, beta1_power, beta2_power, lr, beta1, beta2,
epsilon, grad, use_locking=use_locking, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
_, _, _op = _op_def_lib._apply_op_helper(
"ResourceApplyAdamWithAmsgrad", var=var, m=m, v=v, vhat=vhat,
beta1_power=beta1_power,
beta2_power=beta2_power, lr=lr,
beta1=beta1, beta2=beta2,
epsilon=epsilon, grad=grad,
use_locking=use_locking, name=name)
return _op
_result = None
return _result
def resource_apply_adam_with_amsgrad_eager_fallback(var, m, v, vhat, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad, use_locking=False, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function resource_apply_adam_with_amsgrad
"""
_ctx = ctx if ctx else _context.context()
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
_attr_T, _inputs_T = _execute.args_to_matching_eager([beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad], _ctx)
(beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad) = _inputs_T
var = _ops.convert_to_tensor(var, _dtypes.resource)
m = _ops.convert_to_tensor(m, _dtypes.resource)
v = _ops.convert_to_tensor(v, _dtypes.resource)
vhat = _ops.convert_to_tensor(vhat, _dtypes.resource)
_inputs_flat = [var, m, v, vhat, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad]
_attrs = ("T", _attr_T, "use_locking", use_locking)
_result = _execute.execute(b"ResourceApplyAdamWithAmsgrad", 0,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_result = None
return _result
def resource_apply_add_sign(var, m, lr, alpha, sign_decay, beta, grad, use_locking=False, name=None):
r"""Update '*var' according to the AddSign update.
m_t <- beta1 * m_{t-1} + (1 - beta1) * g
update <- (alpha + sign_decay * sign(g) *sign(m)) * g
variable <- variable - lr_t * update
Args:
var: A `Tensor` of type `resource`. Should be from a Variable().
m: A `Tensor` of type `resource`. Should be from a Variable().
lr: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
Scaling factor. Must be a scalar.
alpha: A `Tensor`. Must have the same type as `lr`. Must be a scalar.
sign_decay: A `Tensor`. Must have the same type as `lr`.
Must be a scalar.
beta: A `Tensor`. Must have the same type as `lr`. Must be a scalar.
grad: A `Tensor`. Must have the same type as `lr`. The gradient.
use_locking: An optional `bool`. Defaults to `False`.
If `True`, updating of the var and m tensors is
protected by a lock; otherwise the behavior is undefined, but may exhibit less
contention.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
_ctx = _context._context
if _ctx is not None and _ctx._eager_context.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"ResourceApplyAddSign", name, _ctx._post_execution_callbacks, var, m,
lr, alpha, sign_decay, beta, grad, "use_locking", use_locking)
return _result
except _core._FallbackException:
try:
return resource_apply_add_sign_eager_fallback(
var, m, lr, alpha, sign_decay, beta, grad,
use_locking=use_locking, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
_, _, _op = _op_def_lib._apply_op_helper(
"ResourceApplyAddSign", var=var, m=m, lr=lr, alpha=alpha,
sign_decay=sign_decay, beta=beta, grad=grad,
use_locking=use_locking, name=name)
return _op
_result = None
return _result
def resource_apply_add_sign_eager_fallback(var, m, lr, alpha, sign_decay, beta, grad, use_locking=False, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function resource_apply_add_sign
"""
_ctx = ctx if ctx else _context.context()
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
_attr_T, _inputs_T = _execute.args_to_matching_eager([lr, alpha, sign_decay, beta, grad], _ctx)
(lr, alpha, sign_decay, beta, grad) = _inputs_T
var = _ops.convert_to_tensor(var, _dtypes.resource)
m = _ops.convert_to_tensor(m, _dtypes.resource)
_inputs_flat = [var, m, lr, alpha, sign_decay, beta, grad]
_attrs = ("T", _attr_T, "use_locking", use_locking)
_result = _execute.execute(b"ResourceApplyAddSign", 0, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_result = None
return _result
def resource_apply_centered_rms_prop(var, mg, ms, mom, lr, rho, momentum, epsilon, grad, use_locking=False, name=None):
r"""Update '*var' according to the centered RMSProp algorithm.
The centered RMSProp algorithm uses an estimate of the centered second moment
(i.e., the variance) for normalization, as opposed to regular RMSProp, which
uses the (uncentered) second moment. This often helps with training, but is
slightly more expensive in terms of computation and memory.
Note that in dense implementation of this algorithm, mg, ms, and mom will
update even if the grad is zero, but in this sparse implementation, mg, ms,
and mom will not update in iterations during which the grad is zero.
mean_square = decay * mean_square + (1-decay) * gradient ** 2
mean_grad = decay * mean_grad + (1-decay) * gradient
Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2)
mg <- rho * mg_{t-1} + (1-rho) * grad
ms <- rho * ms_{t-1} + (1-rho) * grad * grad
mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms - mg * mg + epsilon)
var <- var - mom
Args:
var: A `Tensor` of type `resource`. Should be from a Variable().
mg: A `Tensor` of type `resource`. Should be from a Variable().
ms: A `Tensor` of type `resource`. Should be from a Variable().
mom: A `Tensor` of type `resource`. Should be from a Variable().
lr: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
Scaling factor. Must be a scalar.
rho: A `Tensor`. Must have the same type as `lr`.
Decay rate. Must be a scalar.
momentum: A `Tensor`. Must have the same type as `lr`.
epsilon: A `Tensor`. Must have the same type as `lr`.
Ridge term. Must be a scalar.
grad: A `Tensor`. Must have the same type as `lr`. The gradient.
use_locking: An optional `bool`. Defaults to `False`.
If `True`, updating of the var, mg, ms, and mom tensors is
protected by a lock; otherwise the behavior is undefined, but may exhibit less
contention.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
_ctx = _context._context
if _ctx is not None and _ctx._eager_context.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"ResourceApplyCenteredRMSProp", name, _ctx._post_execution_callbacks,
var, mg, ms, mom, lr, rho, momentum, epsilon, grad, "use_locking",
use_locking)
return _result
except _core._FallbackException:
try:
return resource_apply_centered_rms_prop_eager_fallback(
var, mg, ms, mom, lr, rho, momentum, epsilon, grad,
use_locking=use_locking, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
_, _, _op = _op_def_lib._apply_op_helper(
"ResourceApplyCenteredRMSProp", var=var, mg=mg, ms=ms, mom=mom, lr=lr,
rho=rho, momentum=momentum,
epsilon=epsilon, grad=grad,
use_locking=use_locking, name=name)
return _op
_result = None
return _result
def resource_apply_centered_rms_prop_eager_fallback(var, mg, ms, mom, lr, rho, momentum, epsilon, grad, use_locking=False, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function resource_apply_centered_rms_prop
"""
_ctx = ctx if ctx else _context.context()
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
_attr_T, _inputs_T = _execute.args_to_matching_eager([lr, rho, momentum, epsilon, grad], _ctx)
(lr, rho, momentum, epsilon, grad) = _inputs_T
var = _ops.convert_to_tensor(var, _dtypes.resource)
mg = _ops.convert_to_tensor(mg, _dtypes.resource)
ms = _ops.convert_to_tensor(ms, _dtypes.resource)
mom = _ops.convert_to_tensor(mom, _dtypes.resource)
_inputs_flat = [var, mg, ms, mom, lr, rho, momentum, epsilon, grad]
_attrs = ("T", _attr_T, "use_locking", use_locking)
_result = _execute.execute(b"ResourceApplyCenteredRMSProp", 0,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_result = None
return _result
def resource_apply_ftrl(var, accum, linear, grad, lr, l1, l2, lr_power, use_locking=False, name=None):
r"""Update '*var' according to the Ftrl-proximal scheme.
accum_new = accum + grad * grad
linear += grad - (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
accum = accum_new
Args:
var: A `Tensor` of type `resource`. Should be from a Variable().
accum: A `Tensor` of type `resource`. Should be from a Variable().
linear: A `Tensor` of type `resource`. Should be from a Variable().
grad: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
The gradient.
lr: A `Tensor`. Must have the same type as `grad`.
Scaling factor. Must be a scalar.
l1: A `Tensor`. Must have the same type as `grad`.
L1 regulariation. Must be a scalar.
l2: A `Tensor`. Must have the same type as `grad`.
L2 regulariation. Must be a scalar.
lr_power: A `Tensor`. Must have the same type as `grad`.
Scaling factor. Must be a scalar.
use_locking: An optional `bool`. Defaults to `False`.
If `True`, updating of the var and accum tensors will be protected
by a lock; otherwise the behavior is undefined, but may exhibit less
contention.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
_ctx = _context._context
if _ctx is not None and _ctx._eager_context.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"ResourceApplyFtrl", name, _ctx._post_execution_callbacks, var, accum,
linear, grad, lr, l1, l2, lr_power, "use_locking", use_locking)
return _result
except _core._FallbackException:
try:
return resource_apply_ftrl_eager_fallback(
var, accum, linear, grad, lr, l1, l2, lr_power,
use_locking=use_locking, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
_, _, _op = _op_def_lib._apply_op_helper(
"ResourceApplyFtrl", var=var, accum=accum, linear=linear, grad=grad,
lr=lr, l1=l1, l2=l2, lr_power=lr_power,
use_locking=use_locking, name=name)
return _op
_result = None
return _result
def resource_apply_ftrl_eager_fallback(var, accum, linear, grad, lr, l1, l2, lr_power, use_locking=False, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function resource_apply_ftrl
"""
_ctx = ctx if ctx else _context.context()
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
_attr_T, _inputs_T = _execute.args_to_matching_eager([grad, lr, l1, l2, lr_power], _ctx)
(grad, lr, l1, l2, lr_power) = _inputs_T
var = _ops.convert_to_tensor(var, _dtypes.resource)
accum = _ops.convert_to_tensor(accum, _dtypes.resource)
linear = _ops.convert_to_tensor(linear, _dtypes.resource)
_inputs_flat = [var, accum, linear, grad, lr, l1, l2, lr_power]
_attrs = ("T", _attr_T, "use_locking", use_locking)
_result = _execute.execute(b"ResourceApplyFtrl", 0, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_result = None
return _result
def resource_apply_ftrl_v2(var, accum, linear, grad, lr, l1, l2, l2_shrinkage, lr_power, use_locking=False, name=None):
r"""Update '*var' according to the Ftrl-proximal scheme.
grad_with_shrinkage = grad + 2 * l2_shrinkage * var
accum_new = accum + grad_with_shrinkage * grad_with_shrinkage
linear += grad_with_shrinkage +
(accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
accum = accum_new
Args:
var: A `Tensor` of type `resource`. Should be from a Variable().
accum: A `Tensor` of type `resource`. Should be from a Variable().
linear: A `Tensor` of type `resource`. Should be from a Variable().
grad: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
The gradient.
lr: A `Tensor`. Must have the same type as `grad`.
Scaling factor. Must be a scalar.
l1: A `Tensor`. Must have the same type as `grad`.
L1 regulariation. Must be a scalar.
l2: A `Tensor`. Must have the same type as `grad`.
L2 shrinkage regulariation. Must be a scalar.
l2_shrinkage: A `Tensor`. Must have the same type as `grad`.
lr_power: A `Tensor`. Must have the same type as `grad`.
Scaling factor. Must be a scalar.
use_locking: An optional `bool`. Defaults to `False`.
If `True`, updating of the var and accum tensors will be protected
by a lock; otherwise the behavior is undefined, but may exhibit less
contention.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
_ctx = _context._context
if _ctx is not None and _ctx._eager_context.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"ResourceApplyFtrlV2", name, _ctx._post_execution_callbacks, var,
accum, linear, grad, lr, l1, l2, l2_shrinkage, lr_power,
"use_locking", use_locking)
return _result
except _core._FallbackException:
try:
return resource_apply_ftrl_v2_eager_fallback(
var, accum, linear, grad, lr, l1, l2, l2_shrinkage, lr_power,
use_locking=use_locking, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
_, _, _op = _op_def_lib._apply_op_helper(
"ResourceApplyFtrlV2", var=var, accum=accum, linear=linear, grad=grad,
lr=lr, l1=l1, l2=l2, l2_shrinkage=l2_shrinkage,
lr_power=lr_power, use_locking=use_locking,
name=name)
return _op
_result = None
return _result
def resource_apply_ftrl_v2_eager_fallback(var, accum, linear, grad, lr, l1, l2, l2_shrinkage, lr_power, use_locking=False, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function resource_apply_ftrl_v2
"""
_ctx = ctx if ctx else _context.context()
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
_attr_T, _inputs_T = _execute.args_to_matching_eager([grad, lr, l1, l2, l2_shrinkage, lr_power], _ctx)
(grad, lr, l1, l2, l2_shrinkage, lr_power) = _inputs_T
var = _ops.convert_to_tensor(var, _dtypes.resource)
accum = _ops.convert_to_tensor(accum, _dtypes.resource)
linear = _ops.convert_to_tensor(linear, _dtypes.resource)
_inputs_flat = [var, accum, linear, grad, lr, l1, l2, l2_shrinkage, lr_power]
_attrs = ("T", _attr_T, "use_locking", use_locking)
_result = _execute.execute(b"ResourceApplyFtrlV2", 0, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_result = None
return _result
def resource_apply_gradient_descent(var, alpha, delta, use_locking=False, name=None):
r"""Update '*var' by subtracting 'alpha' * 'delta' from it.
Args:
var: A `Tensor` of type `resource`. Should be from a Variable().
alpha: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
Scaling factor. Must be a scalar.
delta: A `Tensor`. Must have the same type as `alpha`. The change.
use_locking: An optional `bool`. Defaults to `False`.
If `True`, the subtraction will be protected by a lock;
otherwise the behavior is undefined, but may exhibit less contention.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
_ctx = _context._context
if _ctx is not None and _ctx._eager_context.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"ResourceApplyGradientDescent", name, _ctx._post_execution_callbacks,
var, alpha, delta, "use_locking", use_locking)
return _result
except _core._FallbackException:
try:
return resource_apply_gradient_descent_eager_fallback(
var, alpha, delta, use_locking=use_locking, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
_, _, _op = _op_def_lib._apply_op_helper(
"ResourceApplyGradientDescent", var=var, alpha=alpha, delta=delta,
use_locking=use_locking, name=name)
return _op
_result = None
return _result
def resource_apply_gradient_descent_eager_fallback(var, alpha, delta, use_locking=False, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function resource_apply_gradient_descent
"""
_ctx = ctx if ctx else _context.context()
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
_attr_T, _inputs_T = _execute.args_to_matching_eager([alpha, delta], _ctx)
(alpha, delta) = _inputs_T
var = _ops.convert_to_tensor(var, _dtypes.resource)
_inputs_flat = [var, alpha, delta]
_attrs = ("T", _attr_T, "use_locking", use_locking)
_result = _execute.execute(b"ResourceApplyGradientDescent", 0,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_result = None
return _result
def resource_apply_keras_momentum(var, accum, lr, grad, momentum, use_locking=False, use_nesterov=False, name=None):
r"""Update '*var' according to the momentum scheme. Set use_nesterov = True if you
want to use Nesterov momentum.
accum = accum * momentum - lr * grad
var += accum
Args:
var: A `Tensor` of type `resource`. Should be from a Variable().
accum: A `Tensor` of type `resource`. Should be from a Variable().
lr: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
Scaling factor. Must be a scalar.
grad: A `Tensor`. Must have the same type as `lr`. The gradient.
momentum: A `Tensor`. Must have the same type as `lr`.
Momentum. Must be a scalar.
use_locking: An optional `bool`. Defaults to `False`.
If `True`, updating of the var and accum tensors will be protected
by a lock; otherwise the behavior is undefined, but may exhibit less
contention.
use_nesterov: An optional `bool`. Defaults to `False`.
If `True`, the tensor passed to compute grad will be
var + momentum * accum, so in the end, the var you get is actually
var + momentum * accum.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
_ctx = _context._context
if _ctx is not None and _ctx._eager_context.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"ResourceApplyKerasMomentum", name, _ctx._post_execution_callbacks,
var, accum, lr, grad, momentum, "use_locking", use_locking,
"use_nesterov", use_nesterov)
return _result
except _core._FallbackException:
try:
return resource_apply_keras_momentum_eager_fallback(
var, accum, lr, grad, momentum, use_locking=use_locking,
use_nesterov=use_nesterov, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
if use_nesterov is None:
use_nesterov = False
use_nesterov = _execute.make_bool(use_nesterov, "use_nesterov")
_, _, _op = _op_def_lib._apply_op_helper(
"ResourceApplyKerasMomentum", var=var, accum=accum, lr=lr, grad=grad,
momentum=momentum,
use_locking=use_locking,
use_nesterov=use_nesterov, name=name)
return _op
_result = None
return _result
def resource_apply_keras_momentum_eager_fallback(var, accum, lr, grad, momentum, use_locking=False, use_nesterov=False, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function resource_apply_keras_momentum
"""
_ctx = ctx if ctx else _context.context()
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
if use_nesterov is None:
use_nesterov = False
use_nesterov = _execute.make_bool(use_nesterov, "use_nesterov")
_attr_T, _inputs_T = _execute.args_to_matching_eager([lr, grad, momentum], _ctx)
(lr, grad, momentum) = _inputs_T
var = _ops.convert_to_tensor(var, _dtypes.resource)
accum = _ops.convert_to_tensor(accum, _dtypes.resource)
_inputs_flat = [var, accum, lr, grad, momentum]
_attrs = ("T", _attr_T, "use_locking", use_locking, "use_nesterov",
use_nesterov)
_result = _execute.execute(b"ResourceApplyKerasMomentum", 0,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_result = None
return _result
def resource_apply_momentum(var, accum, lr, grad, momentum, use_locking=False, use_nesterov=False, name=None):
r"""Update '*var' according to the momentum scheme. Set use_nesterov = True if you
want to use Nesterov momentum.
accum = accum * momentum + grad
var -= lr * accum
Args:
var: A `Tensor` of type `resource`. Should be from a Variable().
accum: A `Tensor` of type `resource`. Should be from a Variable().
lr: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
Scaling factor. Must be a scalar.
grad: A `Tensor`. Must have the same type as `lr`. The gradient.
momentum: A `Tensor`. Must have the same type as `lr`.
Momentum. Must be a scalar.
use_locking: An optional `bool`. Defaults to `False`.
If `True`, updating of the var and accum tensors will be protected
by a lock; otherwise the behavior is undefined, but may exhibit less
contention.
use_nesterov: An optional `bool`. Defaults to `False`.
If `True`, the tensor passed to compute grad will be
var - lr * momentum * accum, so in the end, the var you get is actually
var - lr * momentum * accum.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
_ctx = _context._context
if _ctx is not None and _ctx._eager_context.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"ResourceApplyMomentum", name, _ctx._post_execution_callbacks, var,
accum, lr, grad, momentum, "use_locking", use_locking, "use_nesterov",
use_nesterov)
return _result
except _core._FallbackException:
try:
return resource_apply_momentum_eager_fallback(
var, accum, lr, grad, momentum, use_locking=use_locking,
use_nesterov=use_nesterov, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
if use_nesterov is None:
use_nesterov = False
use_nesterov = _execute.make_bool(use_nesterov, "use_nesterov")
_, _, _op = _op_def_lib._apply_op_helper(
"ResourceApplyMomentum", var=var, accum=accum, lr=lr, grad=grad,
momentum=momentum, use_locking=use_locking,
use_nesterov=use_nesterov, name=name)
return _op
_result = None
return _result
def resource_apply_momentum_eager_fallback(var, accum, lr, grad, momentum, use_locking=False, use_nesterov=False, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function resource_apply_momentum
"""
_ctx = ctx if ctx else _context.context()
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
if use_nesterov is None:
use_nesterov = False
use_nesterov = _execute.make_bool(use_nesterov, "use_nesterov")
_attr_T, _inputs_T = _execute.args_to_matching_eager([lr, grad, momentum], _ctx)
(lr, grad, momentum) = _inputs_T
var = _ops.convert_to_tensor(var, _dtypes.resource)
accum = _ops.convert_to_tensor(accum, _dtypes.resource)
_inputs_flat = [var, accum, lr, grad, momentum]
_attrs = ("T", _attr_T, "use_locking", use_locking, "use_nesterov",
use_nesterov)
_result = _execute.execute(b"ResourceApplyMomentum", 0, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_result = None
return _result
def resource_apply_power_sign(var, m, lr, logbase, sign_decay, beta, grad, use_locking=False, name=None):
r"""Update '*var' according to the AddSign update.
m_t <- beta1 * m_{t-1} + (1 - beta1) * g
update <- exp(logbase * sign_decay * sign(g) * sign(m_t)) * g
variable <- variable - lr_t * update
Args:
var: A `Tensor` of type `resource`. Should be from a Variable().
m: A `Tensor` of type `resource`. Should be from a Variable().
lr: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
Scaling factor. Must be a scalar.
logbase: A `Tensor`. Must have the same type as `lr`. Must be a scalar.
sign_decay: A `Tensor`. Must have the same type as `lr`.
Must be a scalar.
beta: A `Tensor`. Must have the same type as `lr`. Must be a scalar.
grad: A `Tensor`. Must have the same type as `lr`. The gradient.
use_locking: An optional `bool`. Defaults to `False`.
If `True`, updating of the var and m tensors is
protected by a lock; otherwise the behavior is undefined, but may exhibit less
contention.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
_ctx = _context._context
if _ctx is not None and _ctx._eager_context.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"ResourceApplyPowerSign", name, _ctx._post_execution_callbacks, var,
m, lr, logbase, sign_decay, beta, grad, "use_locking", use_locking)
return _result
except _core._FallbackException:
try:
return resource_apply_power_sign_eager_fallback(
var, m, lr, logbase, sign_decay, beta, grad,
use_locking=use_locking, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
_, _, _op = _op_def_lib._apply_op_helper(
"ResourceApplyPowerSign", var=var, m=m, lr=lr, logbase=logbase,
sign_decay=sign_decay, beta=beta, grad=grad,
use_locking=use_locking, name=name)
return _op
_result = None
return _result
def resource_apply_power_sign_eager_fallback(var, m, lr, logbase, sign_decay, beta, grad, use_locking=False, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function resource_apply_power_sign
"""
_ctx = ctx if ctx else _context.context()
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
_attr_T, _inputs_T = _execute.args_to_matching_eager([lr, logbase, sign_decay, beta, grad], _ctx)
(lr, logbase, sign_decay, beta, grad) = _inputs_T
var = _ops.convert_to_tensor(var, _dtypes.resource)
m = _ops.convert_to_tensor(m, _dtypes.resource)
_inputs_flat = [var, m, lr, logbase, sign_decay, beta, grad]
_attrs = ("T", _attr_T, "use_locking", use_locking)
_result = _execute.execute(b"ResourceApplyPowerSign", 0,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_result = None
return _result
def resource_apply_proximal_adagrad(var, accum, lr, l1, l2, grad, use_locking=False, name=None):
r"""Update '*var' and '*accum' according to FOBOS with Adagrad learning rate.
accum += grad * grad
prox_v = var - lr * grad * (1 / sqrt(accum))
var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}
Args:
var: A `Tensor` of type `resource`. Should be from a Variable().
accum: A `Tensor` of type `resource`. Should be from a Variable().
lr: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
Scaling factor. Must be a scalar.
l1: A `Tensor`. Must have the same type as `lr`.
L1 regularization. Must be a scalar.
l2: A `Tensor`. Must have the same type as `lr`.
L2 regularization. Must be a scalar.
grad: A `Tensor`. Must have the same type as `lr`. The gradient.
use_locking: An optional `bool`. Defaults to `False`.
If True, updating of the var and accum tensors will be protected by
a lock; otherwise the behavior is undefined, but may exhibit less contention.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
_ctx = _context._context
if _ctx is not None and _ctx._eager_context.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"ResourceApplyProximalAdagrad", name, _ctx._post_execution_callbacks,
var, accum, lr, l1, l2, grad, "use_locking", use_locking)
return _result
except _core._FallbackException:
try:
return resource_apply_proximal_adagrad_eager_fallback(
var, accum, lr, l1, l2, grad, use_locking=use_locking, name=name,
ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
_, _, _op = _op_def_lib._apply_op_helper(
"ResourceApplyProximalAdagrad", var=var, accum=accum, lr=lr, l1=l1,
l2=l2, grad=grad,
use_locking=use_locking, name=name)
return _op
_result = None
return _result
def resource_apply_proximal_adagrad_eager_fallback(var, accum, lr, l1, l2, grad, use_locking=False, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function resource_apply_proximal_adagrad
"""
_ctx = ctx if ctx else _context.context()
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
_attr_T, _inputs_T = _execute.args_to_matching_eager([lr, l1, l2, grad], _ctx)
(lr, l1, l2, grad) = _inputs_T
var = _ops.convert_to_tensor(var, _dtypes.resource)
accum = _ops.convert_to_tensor(accum, _dtypes.resource)
_inputs_flat = [var, accum, lr, l1, l2, grad]
_attrs = ("T", _attr_T, "use_locking", use_locking)
_result = _execute.execute(b"ResourceApplyProximalAdagrad", 0,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_result = None
return _result
def resource_apply_proximal_gradient_descent(var, alpha, l1, l2, delta, use_locking=False, name=None):
r"""Update '*var' as FOBOS algorithm with fixed learning rate.
prox_v = var - alpha * delta
var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}
Args:
var: A `Tensor` of type `resource`. Should be from a Variable().
alpha: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
Scaling factor. Must be a scalar.
l1: A `Tensor`. Must have the same type as `alpha`.
L1 regularization. Must be a scalar.
l2: A `Tensor`. Must have the same type as `alpha`.
L2 regularization. Must be a scalar.
delta: A `Tensor`. Must have the same type as `alpha`. The change.
use_locking: An optional `bool`. Defaults to `False`.
If True, the subtraction will be protected by a lock;
otherwise the behavior is undefined, but may exhibit less contention.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
_ctx = _context._context
if _ctx is not None and _ctx._eager_context.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"ResourceApplyProximalGradientDescent", name,
_ctx._post_execution_callbacks, var, alpha, l1, l2, delta,
"use_locking", use_locking)
return _result
except _core._FallbackException:
try:
return resource_apply_proximal_gradient_descent_eager_fallback(
var, alpha, l1, l2, delta, use_locking=use_locking, name=name,
ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
_, _, _op = _op_def_lib._apply_op_helper(
"ResourceApplyProximalGradientDescent", var=var, alpha=alpha, l1=l1,
l2=l2, delta=delta,
use_locking=use_locking,
name=name)
return _op
_result = None
return _result
def resource_apply_proximal_gradient_descent_eager_fallback(var, alpha, l1, l2, delta, use_locking=False, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function resource_apply_proximal_gradient_descent
"""
_ctx = ctx if ctx else _context.context()
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
_attr_T, _inputs_T = _execute.args_to_matching_eager([alpha, l1, l2, delta], _ctx)
(alpha, l1, l2, delta) = _inputs_T
var = _ops.convert_to_tensor(var, _dtypes.resource)
_inputs_flat = [var, alpha, l1, l2, delta]
_attrs = ("T", _attr_T, "use_locking", use_locking)
_result = _execute.execute(b"ResourceApplyProximalGradientDescent", 0,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_result = None
return _result
def resource_apply_rms_prop(var, ms, mom, lr, rho, momentum, epsilon, grad, use_locking=False, name=None):
r"""Update '*var' according to the RMSProp algorithm.
Note that in dense implementation of this algorithm, ms and mom will
update even if the grad is zero, but in this sparse implementation, ms
and mom will not update in iterations during which the grad is zero.
mean_square = decay * mean_square + (1-decay) * gradient ** 2
Delta = learning_rate * gradient / sqrt(mean_square + epsilon)
ms <- rho * ms_{t-1} + (1-rho) * grad * grad
mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)
var <- var - mom
Args:
var: A `Tensor` of type `resource`. Should be from a Variable().
ms: A `Tensor` of type `resource`. Should be from a Variable().
mom: A `Tensor` of type `resource`. Should be from a Variable().
lr: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
Scaling factor. Must be a scalar.
rho: A `Tensor`. Must have the same type as `lr`.
Decay rate. Must be a scalar.
momentum: A `Tensor`. Must have the same type as `lr`.
epsilon: A `Tensor`. Must have the same type as `lr`.
Ridge term. Must be a scalar.
grad: A `Tensor`. Must have the same type as `lr`. The gradient.
use_locking: An optional `bool`. Defaults to `False`.
If `True`, updating of the var, ms, and mom tensors is protected
by a lock; otherwise the behavior is undefined, but may exhibit less
contention.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
_ctx = _context._context
if _ctx is not None and _ctx._eager_context.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"ResourceApplyRMSProp", name, _ctx._post_execution_callbacks, var, ms,
mom, lr, rho, momentum, epsilon, grad, "use_locking", use_locking)
return _result
except _core._FallbackException:
try:
return resource_apply_rms_prop_eager_fallback(
var, ms, mom, lr, rho, momentum, epsilon, grad,
use_locking=use_locking, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
_, _, _op = _op_def_lib._apply_op_helper(
"ResourceApplyRMSProp", var=var, ms=ms, mom=mom, lr=lr, rho=rho,
momentum=momentum, epsilon=epsilon, grad=grad,
use_locking=use_locking, name=name)
return _op
_result = None
return _result
def resource_apply_rms_prop_eager_fallback(var, ms, mom, lr, rho, momentum, epsilon, grad, use_locking=False, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function resource_apply_rms_prop
"""
_ctx = ctx if ctx else _context.context()
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
_attr_T, _inputs_T = _execute.args_to_matching_eager([lr, rho, momentum, epsilon, grad], _ctx)
(lr, rho, momentum, epsilon, grad) = _inputs_T
var = _ops.convert_to_tensor(var, _dtypes.resource)
ms = _ops.convert_to_tensor(ms, _dtypes.resource)
mom = _ops.convert_to_tensor(mom, _dtypes.resource)
_inputs_flat = [var, ms, mom, lr, rho, momentum, epsilon, grad]
_attrs = ("T", _attr_T, "use_locking", use_locking)
_result = _execute.execute(b"ResourceApplyRMSProp", 0, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_result = None
return _result
def resource_sparse_apply_adadelta(var, accum, accum_update, lr, rho, epsilon, grad, indices, use_locking=False, name=None):
r"""var: Should be from a Variable().
Args:
var: A `Tensor` of type `resource`.
accum: A `Tensor` of type `resource`. Should be from a Variable().
accum_update: A `Tensor` of type `resource`.
: Should be from a Variable().
lr: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
Learning rate. Must be a scalar.
rho: A `Tensor`. Must have the same type as `lr`.
Decay factor. Must be a scalar.
epsilon: A `Tensor`. Must have the same type as `lr`.
Constant factor. Must be a scalar.
grad: A `Tensor`. Must have the same type as `lr`. The gradient.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A vector of indices into the first dimension of var and accum.
use_locking: An optional `bool`. Defaults to `False`.
If True, updating of the var and accum tensors will be protected by
a lock; otherwise the behavior is undefined, but may exhibit less contention.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
_ctx = _context._context
if _ctx is not None and _ctx._eager_context.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"ResourceSparseApplyAdadelta", name, _ctx._post_execution_callbacks,
var, accum, accum_update, lr, rho, epsilon, grad, indices,
"use_locking", use_locking)
return _result
except _core._FallbackException:
try:
return resource_sparse_apply_adadelta_eager_fallback(
var, accum, accum_update, lr, rho, epsilon, grad, indices,
use_locking=use_locking, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
_, _, _op = _op_def_lib._apply_op_helper(
"ResourceSparseApplyAdadelta", var=var, accum=accum,
accum_update=accum_update, lr=lr,
rho=rho, epsilon=epsilon, grad=grad,
indices=indices,
use_locking=use_locking, name=name)
return _op
_result = None
return _result
def resource_sparse_apply_adadelta_eager_fallback(var, accum, accum_update, lr, rho, epsilon, grad, indices, use_locking=False, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function resource_sparse_apply_adadelta
"""
_ctx = ctx if ctx else _context.context()
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
_attr_T, _inputs_T = _execute.args_to_matching_eager([lr, rho, epsilon, grad], _ctx)
(lr, rho, epsilon, grad) = _inputs_T
_attr_Tindices, (indices,) = _execute.args_to_matching_eager([indices], _ctx)
var = _ops.convert_to_tensor(var, _dtypes.resource)
accum = _ops.convert_to_tensor(accum, _dtypes.resource)
accum_update = _ops.convert_to_tensor(accum_update, _dtypes.resource)
_inputs_flat = [var, accum, accum_update, lr, rho, epsilon, grad, indices]
_attrs = ("T", _attr_T, "Tindices", _attr_Tindices, "use_locking",
use_locking)
_result = _execute.execute(b"ResourceSparseApplyAdadelta", 0,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_result = None
return _result
def resource_sparse_apply_adagrad(var, accum, lr, grad, indices, use_locking=False, update_slots=True, name=None):
r"""Update relevant entries in '*var' and '*accum' according to the adagrad scheme.
That is for rows we have grad for, we update var and accum as follows:
accum += grad * grad
var -= lr * grad * (1 / sqrt(accum))
Args:
var: A `Tensor` of type `resource`. Should be from a Variable().
accum: A `Tensor` of type `resource`. Should be from a Variable().
lr: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
Learning rate. Must be a scalar.
grad: A `Tensor`. Must have the same type as `lr`. The gradient.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A vector of indices into the first dimension of var and accum.
use_locking: An optional `bool`. Defaults to `False`.
If `True`, updating of the var and accum tensors will be protected
by a lock; otherwise the behavior is undefined, but may exhibit less
contention.
update_slots: An optional `bool`. Defaults to `True`.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
_ctx = _context._context
if _ctx is not None and _ctx._eager_context.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"ResourceSparseApplyAdagrad", name, _ctx._post_execution_callbacks,
var, accum, lr, grad, indices, "use_locking", use_locking,
"update_slots", update_slots)
return _result
except _core._FallbackException:
try:
return resource_sparse_apply_adagrad_eager_fallback(
var, accum, lr, grad, indices, use_locking=use_locking,
update_slots=update_slots, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
if update_slots is None:
update_slots = True
update_slots = _execute.make_bool(update_slots, "update_slots")
_, _, _op = _op_def_lib._apply_op_helper(
"ResourceSparseApplyAdagrad", var=var, accum=accum, lr=lr, grad=grad,
indices=indices,
use_locking=use_locking,
update_slots=update_slots, name=name)
return _op
_result = None
return _result
def resource_sparse_apply_adagrad_eager_fallback(var, accum, lr, grad, indices, use_locking=False, update_slots=True, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function resource_sparse_apply_adagrad
"""
_ctx = ctx if ctx else _context.context()
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
if update_slots is None:
update_slots = True
update_slots = _execute.make_bool(update_slots, "update_slots")
_attr_T, _inputs_T = _execute.args_to_matching_eager([lr, grad], _ctx)
(lr, grad) = _inputs_T
_attr_Tindices, (indices,) = _execute.args_to_matching_eager([indices], _ctx)
var = _ops.convert_to_tensor(var, _dtypes.resource)
accum = _ops.convert_to_tensor(accum, _dtypes.resource)
_inputs_flat = [var, accum, lr, grad, indices]
_attrs = ("T", _attr_T, "Tindices", _attr_Tindices, "use_locking",
use_locking, "update_slots", update_slots)
_result = _execute.execute(b"ResourceSparseApplyAdagrad", 0,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_result = None
return _result
def resource_sparse_apply_adagrad_da(var, gradient_accumulator, gradient_squared_accumulator, grad, indices, lr, l1, l2, global_step, use_locking=False, name=None):
r"""Update entries in '*var' and '*accum' according to the proximal adagrad scheme.
Args:
var: A `Tensor` of type `resource`. Should be from a Variable().
gradient_accumulator: A `Tensor` of type `resource`.
Should be from a Variable().
gradient_squared_accumulator: A `Tensor` of type `resource`.
Should be from a Variable().
grad: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
The gradient.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A vector of indices into the first dimension of var and accum.
lr: A `Tensor`. Must have the same type as `grad`.
Learning rate. Must be a scalar.
l1: A `Tensor`. Must have the same type as `grad`.
L1 regularization. Must be a scalar.
l2: A `Tensor`. Must have the same type as `grad`.
L2 regularization. Must be a scalar.
global_step: A `Tensor` of type `int64`.
Training step number. Must be a scalar.
use_locking: An optional `bool`. Defaults to `False`.
If True, updating of the var and accum tensors will be protected by
a lock; otherwise the behavior is undefined, but may exhibit less contention.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
_ctx = _context._context
if _ctx is not None and _ctx._eager_context.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"ResourceSparseApplyAdagradDA", name, _ctx._post_execution_callbacks,
var, gradient_accumulator, gradient_squared_accumulator, grad,
indices, lr, l1, l2, global_step, "use_locking", use_locking)
return _result
except _core._FallbackException:
try:
return resource_sparse_apply_adagrad_da_eager_fallback(
var, gradient_accumulator, gradient_squared_accumulator, grad,
indices, lr, l1, l2, global_step, use_locking=use_locking,
name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
_, _, _op = _op_def_lib._apply_op_helper(
"ResourceSparseApplyAdagradDA", var=var,
gradient_accumulator=gradient_accumulator,
gradient_squared_accumulator=gradient_squared_accumulator,
grad=grad, indices=indices, lr=lr,
l1=l1, l2=l2, global_step=global_step,
use_locking=use_locking, name=name)
return _op
_result = None
return _result
def resource_sparse_apply_adagrad_da_eager_fallback(var, gradient_accumulator, gradient_squared_accumulator, grad, indices, lr, l1, l2, global_step, use_locking=False, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function resource_sparse_apply_adagrad_da
"""
_ctx = ctx if ctx else _context.context()
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
_attr_T, _inputs_T = _execute.args_to_matching_eager([grad, lr, l1, l2], _ctx)
(grad, lr, l1, l2) = _inputs_T
_attr_Tindices, (indices,) = _execute.args_to_matching_eager([indices], _ctx)
var = _ops.convert_to_tensor(var, _dtypes.resource)
gradient_accumulator = _ops.convert_to_tensor(gradient_accumulator, _dtypes.resource)
gradient_squared_accumulator = _ops.convert_to_tensor(gradient_squared_accumulator, _dtypes.resource)
global_step = _ops.convert_to_tensor(global_step, _dtypes.int64)
_inputs_flat = [var, gradient_accumulator, gradient_squared_accumulator, grad, indices, lr, l1, l2, global_step]
_attrs = ("T", _attr_T, "Tindices", _attr_Tindices, "use_locking",
use_locking)
_result = _execute.execute(b"ResourceSparseApplyAdagradDA", 0,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_result = None
return _result
def resource_sparse_apply_centered_rms_prop(var, mg, ms, mom, lr, rho, momentum, epsilon, grad, indices, use_locking=False, name=None):
r"""Update '*var' according to the centered RMSProp algorithm.
The centered RMSProp algorithm uses an estimate of the centered second moment
(i.e., the variance) for normalization, as opposed to regular RMSProp, which
uses the (uncentered) second moment. This often helps with training, but is
slightly more expensive in terms of computation and memory.
Note that in dense implementation of this algorithm, mg, ms, and mom will
update even if the grad is zero, but in this sparse implementation, mg, ms,
and mom will not update in iterations during which the grad is zero.
mean_square = decay * mean_square + (1-decay) * gradient ** 2
mean_grad = decay * mean_grad + (1-decay) * gradient
Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2)
ms <- rho * ms_{t-1} + (1-rho) * grad * grad
mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)
var <- var - mom
Args:
var: A `Tensor` of type `resource`. Should be from a Variable().
mg: A `Tensor` of type `resource`. Should be from a Variable().
ms: A `Tensor` of type `resource`. Should be from a Variable().
mom: A `Tensor` of type `resource`. Should be from a Variable().
lr: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
Scaling factor. Must be a scalar.
rho: A `Tensor`. Must have the same type as `lr`.
Decay rate. Must be a scalar.
momentum: A `Tensor`. Must have the same type as `lr`.
epsilon: A `Tensor`. Must have the same type as `lr`.
Ridge term. Must be a scalar.
grad: A `Tensor`. Must have the same type as `lr`. The gradient.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A vector of indices into the first dimension of var, ms and mom.
use_locking: An optional `bool`. Defaults to `False`.
If `True`, updating of the var, mg, ms, and mom tensors is
protected by a lock; otherwise the behavior is undefined, but may exhibit less
contention.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
_ctx = _context._context
if _ctx is not None and _ctx._eager_context.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"ResourceSparseApplyCenteredRMSProp", name,
_ctx._post_execution_callbacks, var, mg, ms, mom, lr, rho, momentum,
epsilon, grad, indices, "use_locking", use_locking)
return _result
except _core._FallbackException:
try:
return resource_sparse_apply_centered_rms_prop_eager_fallback(
var, mg, ms, mom, lr, rho, momentum, epsilon, grad, indices,
use_locking=use_locking, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
_, _, _op = _op_def_lib._apply_op_helper(
"ResourceSparseApplyCenteredRMSProp", var=var, mg=mg, ms=ms, mom=mom,
lr=lr, rho=rho,
momentum=momentum,
epsilon=epsilon, grad=grad,
indices=indices,
use_locking=use_locking,
name=name)
return _op
_result = None
return _result
def resource_sparse_apply_centered_rms_prop_eager_fallback(var, mg, ms, mom, lr, rho, momentum, epsilon, grad, indices, use_locking=False, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function resource_sparse_apply_centered_rms_prop
"""
_ctx = ctx if ctx else _context.context()
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
_attr_T, _inputs_T = _execute.args_to_matching_eager([lr, rho, momentum, epsilon, grad], _ctx)
(lr, rho, momentum, epsilon, grad) = _inputs_T
_attr_Tindices, (indices,) = _execute.args_to_matching_eager([indices], _ctx)
var = _ops.convert_to_tensor(var, _dtypes.resource)
mg = _ops.convert_to_tensor(mg, _dtypes.resource)
ms = _ops.convert_to_tensor(ms, _dtypes.resource)
mom = _ops.convert_to_tensor(mom, _dtypes.resource)
_inputs_flat = [var, mg, ms, mom, lr, rho, momentum, epsilon, grad, indices]
_attrs = ("T", _attr_T, "Tindices", _attr_Tindices, "use_locking",
use_locking)
_result = _execute.execute(b"ResourceSparseApplyCenteredRMSProp", 0,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_result = None
return _result
def resource_sparse_apply_ftrl(var, accum, linear, grad, indices, lr, l1, l2, lr_power, use_locking=False, name=None):
r"""Update relevant entries in '*var' according to the Ftrl-proximal scheme.
That is for rows we have grad for, we update var, accum and linear as follows:
accum_new = accum + grad * grad
linear += grad + (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
accum = accum_new
Args:
var: A `Tensor` of type `resource`. Should be from a Variable().
accum: A `Tensor` of type `resource`. Should be from a Variable().
linear: A `Tensor` of type `resource`. Should be from a Variable().
grad: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
The gradient.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A vector of indices into the first dimension of var and accum.
lr: A `Tensor`. Must have the same type as `grad`.
Scaling factor. Must be a scalar.
l1: A `Tensor`. Must have the same type as `grad`.
L1 regularization. Must be a scalar.
l2: A `Tensor`. Must have the same type as `grad`.
L2 regularization. Must be a scalar.
lr_power: A `Tensor`. Must have the same type as `grad`.
Scaling factor. Must be a scalar.
use_locking: An optional `bool`. Defaults to `False`.
If `True`, updating of the var and accum tensors will be protected
by a lock; otherwise the behavior is undefined, but may exhibit less
contention.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
_ctx = _context._context
if _ctx is not None and _ctx._eager_context.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"ResourceSparseApplyFtrl", name, _ctx._post_execution_callbacks, var,
accum, linear, grad, indices, lr, l1, l2, lr_power, "use_locking",
use_locking)
return _result
except _core._FallbackException:
try:
return resource_sparse_apply_ftrl_eager_fallback(
var, accum, linear, grad, indices, lr, l1, l2, lr_power,
use_locking=use_locking, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
_, _, _op = _op_def_lib._apply_op_helper(
"ResourceSparseApplyFtrl", var=var, accum=accum, linear=linear,
grad=grad, indices=indices, lr=lr, l1=l1,
l2=l2, lr_power=lr_power,
use_locking=use_locking, name=name)
return _op
_result = None
return _result
def resource_sparse_apply_ftrl_eager_fallback(var, accum, linear, grad, indices, lr, l1, l2, lr_power, use_locking=False, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function resource_sparse_apply_ftrl
"""
_ctx = ctx if ctx else _context.context()
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
_attr_T, _inputs_T = _execute.args_to_matching_eager([grad, lr, l1, l2, lr_power], _ctx)
(grad, lr, l1, l2, lr_power) = _inputs_T
_attr_Tindices, (indices,) = _execute.args_to_matching_eager([indices], _ctx)
var = _ops.convert_to_tensor(var, _dtypes.resource)
accum = _ops.convert_to_tensor(accum, _dtypes.resource)
linear = _ops.convert_to_tensor(linear, _dtypes.resource)
_inputs_flat = [var, accum, linear, grad, indices, lr, l1, l2, lr_power]
_attrs = ("T", _attr_T, "Tindices", _attr_Tindices, "use_locking",
use_locking)
_result = _execute.execute(b"ResourceSparseApplyFtrl", 0,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_result = None
return _result
def resource_sparse_apply_ftrl_v2(var, accum, linear, grad, indices, lr, l1, l2, l2_shrinkage, lr_power, use_locking=False, name=None):
r"""Update relevant entries in '*var' according to the Ftrl-proximal scheme.
That is for rows we have grad for, we update var, accum and linear as follows:
grad_with_shrinkage = grad + 2 * l2_shrinkage * var
accum_new = accum + grad_with_shrinkage * grad_with_shrinkage
linear += grad_with_shrinkage +
(accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
accum = accum_new
Args:
var: A `Tensor` of type `resource`. Should be from a Variable().
accum: A `Tensor` of type `resource`. Should be from a Variable().
linear: A `Tensor` of type `resource`. Should be from a Variable().
grad: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
The gradient.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A vector of indices into the first dimension of var and accum.
lr: A `Tensor`. Must have the same type as `grad`.
Scaling factor. Must be a scalar.
l1: A `Tensor`. Must have the same type as `grad`.
L1 regularization. Must be a scalar.
l2: A `Tensor`. Must have the same type as `grad`.
L2 shrinkage regulariation. Must be a scalar.
l2_shrinkage: A `Tensor`. Must have the same type as `grad`.
lr_power: A `Tensor`. Must have the same type as `grad`.
Scaling factor. Must be a scalar.
use_locking: An optional `bool`. Defaults to `False`.
If `True`, updating of the var and accum tensors will be protected
by a lock; otherwise the behavior is undefined, but may exhibit less
contention.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
_ctx = _context._context
if _ctx is not None and _ctx._eager_context.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"ResourceSparseApplyFtrlV2", name, _ctx._post_execution_callbacks,
var, accum, linear, grad, indices, lr, l1, l2, l2_shrinkage, lr_power,
"use_locking", use_locking)
return _result
except _core._FallbackException:
try:
return resource_sparse_apply_ftrl_v2_eager_fallback(
var, accum, linear, grad, indices, lr, l1, l2, l2_shrinkage,
lr_power, use_locking=use_locking, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
_, _, _op = _op_def_lib._apply_op_helper(
"ResourceSparseApplyFtrlV2", var=var, accum=accum, linear=linear,
grad=grad, indices=indices, lr=lr, l1=l1,
l2=l2, l2_shrinkage=l2_shrinkage,
lr_power=lr_power,
use_locking=use_locking, name=name)
return _op
_result = None
return _result
def resource_sparse_apply_ftrl_v2_eager_fallback(var, accum, linear, grad, indices, lr, l1, l2, l2_shrinkage, lr_power, use_locking=False, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function resource_sparse_apply_ftrl_v2
"""
_ctx = ctx if ctx else _context.context()
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
_attr_T, _inputs_T = _execute.args_to_matching_eager([grad, lr, l1, l2, l2_shrinkage, lr_power], _ctx)
(grad, lr, l1, l2, l2_shrinkage, lr_power) = _inputs_T
_attr_Tindices, (indices,) = _execute.args_to_matching_eager([indices], _ctx)
var = _ops.convert_to_tensor(var, _dtypes.resource)
accum = _ops.convert_to_tensor(accum, _dtypes.resource)
linear = _ops.convert_to_tensor(linear, _dtypes.resource)
_inputs_flat = [var, accum, linear, grad, indices, lr, l1, l2, l2_shrinkage, lr_power]
_attrs = ("T", _attr_T, "Tindices", _attr_Tindices, "use_locking",
use_locking)
_result = _execute.execute(b"ResourceSparseApplyFtrlV2", 0,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_result = None
return _result
def resource_sparse_apply_keras_momentum(var, accum, lr, grad, indices, momentum, use_locking=False, use_nesterov=False, name=None):
r"""Update relevant entries in '*var' and '*accum' according to the momentum scheme.
Set use_nesterov = True if you want to use Nesterov momentum.
That is for rows we have grad for, we update var and accum as follows:
accum = accum * momentum - lr * grad
var += accum
Args:
var: A `Tensor` of type `resource`. Should be from a Variable().
accum: A `Tensor` of type `resource`. Should be from a Variable().
lr: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
Learning rate. Must be a scalar.
grad: A `Tensor`. Must have the same type as `lr`. The gradient.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A vector of indices into the first dimension of var and accum.
momentum: A `Tensor`. Must have the same type as `lr`.
Momentum. Must be a scalar.
use_locking: An optional `bool`. Defaults to `False`.
If `True`, updating of the var and accum tensors will be protected
by a lock; otherwise the behavior is undefined, but may exhibit less
contention.
use_nesterov: An optional `bool`. Defaults to `False`.
If `True`, the tensor passed to compute grad will be
var + momentum * accum, so in the end, the var you get is actually
var + momentum * accum.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
_ctx = _context._context
if _ctx is not None and _ctx._eager_context.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"ResourceSparseApplyKerasMomentum", name,
_ctx._post_execution_callbacks, var, accum, lr, grad, indices,
momentum, "use_locking", use_locking, "use_nesterov", use_nesterov)
return _result
except _core._FallbackException:
try:
return resource_sparse_apply_keras_momentum_eager_fallback(
var, accum, lr, grad, indices, momentum, use_locking=use_locking,
use_nesterov=use_nesterov, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
if use_nesterov is None:
use_nesterov = False
use_nesterov = _execute.make_bool(use_nesterov, "use_nesterov")
_, _, _op = _op_def_lib._apply_op_helper(
"ResourceSparseApplyKerasMomentum", var=var, accum=accum, lr=lr,
grad=grad, indices=indices,
momentum=momentum,
use_locking=use_locking,
use_nesterov=use_nesterov,
name=name)
return _op
_result = None
return _result
def resource_sparse_apply_keras_momentum_eager_fallback(var, accum, lr, grad, indices, momentum, use_locking=False, use_nesterov=False, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function resource_sparse_apply_keras_momentum
"""
_ctx = ctx if ctx else _context.context()
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
if use_nesterov is None:
use_nesterov = False
use_nesterov = _execute.make_bool(use_nesterov, "use_nesterov")
_attr_T, _inputs_T = _execute.args_to_matching_eager([lr, grad, momentum], _ctx)
(lr, grad, momentum) = _inputs_T
_attr_Tindices, (indices,) = _execute.args_to_matching_eager([indices], _ctx)
var = _ops.convert_to_tensor(var, _dtypes.resource)
accum = _ops.convert_to_tensor(accum, _dtypes.resource)
_inputs_flat = [var, accum, lr, grad, indices, momentum]
_attrs = ("T", _attr_T, "Tindices", _attr_Tindices, "use_locking",
use_locking, "use_nesterov", use_nesterov)
_result = _execute.execute(b"ResourceSparseApplyKerasMomentum", 0,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_result = None
return _result
def resource_sparse_apply_momentum(var, accum, lr, grad, indices, momentum, use_locking=False, use_nesterov=False, name=None):
r"""Update relevant entries in '*var' and '*accum' according to the momentum scheme.
Set use_nesterov = True if you want to use Nesterov momentum.
That is for rows we have grad for, we update var and accum as follows:
accum = accum * momentum + grad
var -= lr * accum
Args:
var: A `Tensor` of type `resource`. Should be from a Variable().
accum: A `Tensor` of type `resource`. Should be from a Variable().
lr: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
Learning rate. Must be a scalar.
grad: A `Tensor`. Must have the same type as `lr`. The gradient.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A vector of indices into the first dimension of var and accum.
momentum: A `Tensor`. Must have the same type as `lr`.
Momentum. Must be a scalar.
use_locking: An optional `bool`. Defaults to `False`.
If `True`, updating of the var and accum tensors will be protected
by a lock; otherwise the behavior is undefined, but may exhibit less
contention.
use_nesterov: An optional `bool`. Defaults to `False`.
If `True`, the tensor passed to compute grad will be
var - lr * momentum * accum, so in the end, the var you get is actually
var - lr * momentum * accum.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
_ctx = _context._context
if _ctx is not None and _ctx._eager_context.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"ResourceSparseApplyMomentum", name, _ctx._post_execution_callbacks,
var, accum, lr, grad, indices, momentum, "use_locking", use_locking,
"use_nesterov", use_nesterov)
return _result
except _core._FallbackException:
try:
return resource_sparse_apply_momentum_eager_fallback(
var, accum, lr, grad, indices, momentum, use_locking=use_locking,
use_nesterov=use_nesterov, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
if use_nesterov is None:
use_nesterov = False
use_nesterov = _execute.make_bool(use_nesterov, "use_nesterov")
_, _, _op = _op_def_lib._apply_op_helper(
"ResourceSparseApplyMomentum", var=var, accum=accum, lr=lr, grad=grad,
indices=indices, momentum=momentum,
use_locking=use_locking,
use_nesterov=use_nesterov, name=name)
return _op
_result = None
return _result
def resource_sparse_apply_momentum_eager_fallback(var, accum, lr, grad, indices, momentum, use_locking=False, use_nesterov=False, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function resource_sparse_apply_momentum
"""
_ctx = ctx if ctx else _context.context()
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
if use_nesterov is None:
use_nesterov = False
use_nesterov = _execute.make_bool(use_nesterov, "use_nesterov")
_attr_T, _inputs_T = _execute.args_to_matching_eager([lr, grad, momentum], _ctx)
(lr, grad, momentum) = _inputs_T
_attr_Tindices, (indices,) = _execute.args_to_matching_eager([indices], _ctx)
var = _ops.convert_to_tensor(var, _dtypes.resource)
accum = _ops.convert_to_tensor(accum, _dtypes.resource)
_inputs_flat = [var, accum, lr, grad, indices, momentum]
_attrs = ("T", _attr_T, "Tindices", _attr_Tindices, "use_locking",
use_locking, "use_nesterov", use_nesterov)
_result = _execute.execute(b"ResourceSparseApplyMomentum", 0,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_result = None
return _result
def resource_sparse_apply_proximal_adagrad(var, accum, lr, l1, l2, grad, indices, use_locking=False, name=None):
r"""Sparse update entries in '*var' and '*accum' according to FOBOS algorithm.
That is for rows we have grad for, we update var and accum as follows:
accum += grad * grad
prox_v = var
prox_v -= lr * grad * (1 / sqrt(accum))
var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}
Args:
var: A `Tensor` of type `resource`. Should be from a Variable().
accum: A `Tensor` of type `resource`. Should be from a Variable().
lr: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
Learning rate. Must be a scalar.
l1: A `Tensor`. Must have the same type as `lr`.
L1 regularization. Must be a scalar.
l2: A `Tensor`. Must have the same type as `lr`.
L2 regularization. Must be a scalar.
grad: A `Tensor`. Must have the same type as `lr`. The gradient.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A vector of indices into the first dimension of var and accum.
use_locking: An optional `bool`. Defaults to `False`.
If True, updating of the var and accum tensors will be protected by
a lock; otherwise the behavior is undefined, but may exhibit less contention.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
_ctx = _context._context
if _ctx is not None and _ctx._eager_context.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"ResourceSparseApplyProximalAdagrad", name,
_ctx._post_execution_callbacks, var, accum, lr, l1, l2, grad, indices,
"use_locking", use_locking)
return _result
except _core._FallbackException:
try:
return resource_sparse_apply_proximal_adagrad_eager_fallback(
var, accum, lr, l1, l2, grad, indices, use_locking=use_locking,
name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
_, _, _op = _op_def_lib._apply_op_helper(
"ResourceSparseApplyProximalAdagrad", var=var, accum=accum, lr=lr,
l1=l1, l2=l2, grad=grad,
indices=indices,
use_locking=use_locking,
name=name)
return _op
_result = None
return _result
def resource_sparse_apply_proximal_adagrad_eager_fallback(var, accum, lr, l1, l2, grad, indices, use_locking=False, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function resource_sparse_apply_proximal_adagrad
"""
_ctx = ctx if ctx else _context.context()
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
_attr_T, _inputs_T = _execute.args_to_matching_eager([lr, l1, l2, grad], _ctx)
(lr, l1, l2, grad) = _inputs_T
_attr_Tindices, (indices,) = _execute.args_to_matching_eager([indices], _ctx)
var = _ops.convert_to_tensor(var, _dtypes.resource)
accum = _ops.convert_to_tensor(accum, _dtypes.resource)
_inputs_flat = [var, accum, lr, l1, l2, grad, indices]
_attrs = ("T", _attr_T, "Tindices", _attr_Tindices, "use_locking",
use_locking)
_result = _execute.execute(b"ResourceSparseApplyProximalAdagrad", 0,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_result = None
return _result
def resource_sparse_apply_proximal_gradient_descent(var, alpha, l1, l2, grad, indices, use_locking=False, name=None):
r"""Sparse update '*var' as FOBOS algorithm with fixed learning rate.
That is for rows we have grad for, we update var as follows:
prox_v = var - alpha * grad
var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}
Args:
var: A `Tensor` of type `resource`. Should be from a Variable().
alpha: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
Scaling factor. Must be a scalar.
l1: A `Tensor`. Must have the same type as `alpha`.
L1 regularization. Must be a scalar.
l2: A `Tensor`. Must have the same type as `alpha`.
L2 regularization. Must be a scalar.
grad: A `Tensor`. Must have the same type as `alpha`. The gradient.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A vector of indices into the first dimension of var and accum.
use_locking: An optional `bool`. Defaults to `False`.
If True, the subtraction will be protected by a lock;
otherwise the behavior is undefined, but may exhibit less contention.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
_ctx = _context._context
if _ctx is not None and _ctx._eager_context.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"ResourceSparseApplyProximalGradientDescent", name,
_ctx._post_execution_callbacks, var, alpha, l1, l2, grad, indices,
"use_locking", use_locking)
return _result
except _core._FallbackException:
try:
return resource_sparse_apply_proximal_gradient_descent_eager_fallback(
var, alpha, l1, l2, grad, indices, use_locking=use_locking,
name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
_, _, _op = _op_def_lib._apply_op_helper(
"ResourceSparseApplyProximalGradientDescent", var=var, alpha=alpha,
l1=l1, l2=l2, grad=grad,
indices=indices,
use_locking=use_locking,
name=name)
return _op
_result = None
return _result
def resource_sparse_apply_proximal_gradient_descent_eager_fallback(var, alpha, l1, l2, grad, indices, use_locking=False, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function resource_sparse_apply_proximal_gradient_descent
"""
_ctx = ctx if ctx else _context.context()
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
_attr_T, _inputs_T = _execute.args_to_matching_eager([alpha, l1, l2, grad], _ctx)
(alpha, l1, l2, grad) = _inputs_T
_attr_Tindices, (indices,) = _execute.args_to_matching_eager([indices], _ctx)
var = _ops.convert_to_tensor(var, _dtypes.resource)
_inputs_flat = [var, alpha, l1, l2, grad, indices]
_attrs = ("T", _attr_T, "Tindices", _attr_Tindices, "use_locking",
use_locking)
_result = _execute.execute(b"ResourceSparseApplyProximalGradientDescent", 0,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_result = None
return _result
def resource_sparse_apply_rms_prop(var, ms, mom, lr, rho, momentum, epsilon, grad, indices, use_locking=False, name=None):
r"""Update '*var' according to the RMSProp algorithm.
Note that in dense implementation of this algorithm, ms and mom will
update even if the grad is zero, but in this sparse implementation, ms
and mom will not update in iterations during which the grad is zero.
mean_square = decay * mean_square + (1-decay) * gradient ** 2
Delta = learning_rate * gradient / sqrt(mean_square + epsilon)
ms <- rho * ms_{t-1} + (1-rho) * grad * grad
mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)
var <- var - mom
Args:
var: A `Tensor` of type `resource`. Should be from a Variable().
ms: A `Tensor` of type `resource`. Should be from a Variable().
mom: A `Tensor` of type `resource`. Should be from a Variable().
lr: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
Scaling factor. Must be a scalar.
rho: A `Tensor`. Must have the same type as `lr`.
Decay rate. Must be a scalar.
momentum: A `Tensor`. Must have the same type as `lr`.
epsilon: A `Tensor`. Must have the same type as `lr`.
Ridge term. Must be a scalar.
grad: A `Tensor`. Must have the same type as `lr`. The gradient.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A vector of indices into the first dimension of var, ms and mom.
use_locking: An optional `bool`. Defaults to `False`.
If `True`, updating of the var, ms, and mom tensors is protected
by a lock; otherwise the behavior is undefined, but may exhibit less
contention.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
_ctx = _context._context
if _ctx is not None and _ctx._eager_context.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"ResourceSparseApplyRMSProp", name, _ctx._post_execution_callbacks,
var, ms, mom, lr, rho, momentum, epsilon, grad, indices,
"use_locking", use_locking)
return _result
except _core._FallbackException:
try:
return resource_sparse_apply_rms_prop_eager_fallback(
var, ms, mom, lr, rho, momentum, epsilon, grad, indices,
use_locking=use_locking, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
_, _, _op = _op_def_lib._apply_op_helper(
"ResourceSparseApplyRMSProp", var=var, ms=ms, mom=mom, lr=lr, rho=rho,
momentum=momentum, epsilon=epsilon,
grad=grad, indices=indices,
use_locking=use_locking, name=name)
return _op
_result = None
return _result
def resource_sparse_apply_rms_prop_eager_fallback(var, ms, mom, lr, rho, momentum, epsilon, grad, indices, use_locking=False, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function resource_sparse_apply_rms_prop
"""
_ctx = ctx if ctx else _context.context()
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
_attr_T, _inputs_T = _execute.args_to_matching_eager([lr, rho, momentum, epsilon, grad], _ctx)
(lr, rho, momentum, epsilon, grad) = _inputs_T
_attr_Tindices, (indices,) = _execute.args_to_matching_eager([indices], _ctx)
var = _ops.convert_to_tensor(var, _dtypes.resource)
ms = _ops.convert_to_tensor(ms, _dtypes.resource)
mom = _ops.convert_to_tensor(mom, _dtypes.resource)
_inputs_flat = [var, ms, mom, lr, rho, momentum, epsilon, grad, indices]
_attrs = ("T", _attr_T, "Tindices", _attr_Tindices, "use_locking",
use_locking)
_result = _execute.execute(b"ResourceSparseApplyRMSProp", 0,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_result = None
return _result
def sparse_apply_adadelta(var, accum, accum_update, lr, rho, epsilon, grad, indices, use_locking=False, name=None):
r"""var: Should be from a Variable().
Args:
var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
accum: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
accum_update: A mutable `Tensor`. Must have the same type as `var`.
: Should be from a Variable().
lr: A `Tensor`. Must have the same type as `var`.
Learning rate. Must be a scalar.
rho: A `Tensor`. Must have the same type as `var`.
Decay factor. Must be a scalar.
epsilon: A `Tensor`. Must have the same type as `var`.
Constant factor. Must be a scalar.
grad: A `Tensor`. Must have the same type as `var`. The gradient.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A vector of indices into the first dimension of var and accum.
use_locking: An optional `bool`. Defaults to `False`.
If True, updating of the var and accum tensors will be protected by
a lock; otherwise the behavior is undefined, but may exhibit less contention.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `var`.
"""
_ctx = _context._context
if _ctx is not None and _ctx._eager_context.is_eager:
raise RuntimeError("sparse_apply_adadelta op does not support eager execution. Arg 'out' is a ref.")
# Add nodes to the TensorFlow graph.
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
_, _, _op = _op_def_lib._apply_op_helper(
"SparseApplyAdadelta", var=var, accum=accum,
accum_update=accum_update, lr=lr, rho=rho,
epsilon=epsilon, grad=grad, indices=indices,
use_locking=use_locking, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "Tindices", _op.get_attr("Tindices"),
"use_locking", _op.get_attr("use_locking"))
_execute.record_gradient(
"SparseApplyAdadelta", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def sparse_apply_adadelta_eager_fallback(var, accum, accum_update, lr, rho, epsilon, grad, indices, use_locking=False, name=None, ctx=None):
raise RuntimeError("sparse_apply_adadelta op does not support eager execution. Arg 'out' is a ref.")
def sparse_apply_adagrad(var, accum, lr, grad, indices, use_locking=False, update_slots=True, name=None):
r"""Update relevant entries in '*var' and '*accum' according to the adagrad scheme.
That is for rows we have grad for, we update var and accum as follows:
$$accum += grad * grad$$
$$var -= lr * grad * (1 / sqrt(accum))$$
Args:
var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
Should be from a Variable().
accum: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
lr: A `Tensor`. Must have the same type as `var`.
Learning rate. Must be a scalar.
grad: A `Tensor`. Must have the same type as `var`. The gradient.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A vector of indices into the first dimension of var and accum.
use_locking: An optional `bool`. Defaults to `False`.
If `True`, updating of the var and accum tensors will be protected
by a lock; otherwise the behavior is undefined, but may exhibit less
contention.
update_slots: An optional `bool`. Defaults to `True`.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `var`.
"""
_ctx = _context._context
if _ctx is not None and _ctx._eager_context.is_eager:
raise RuntimeError("sparse_apply_adagrad op does not support eager execution. Arg 'out' is a ref.")
# Add nodes to the TensorFlow graph.
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
if update_slots is None:
update_slots = True
update_slots = _execute.make_bool(update_slots, "update_slots")
_, _, _op = _op_def_lib._apply_op_helper(
"SparseApplyAdagrad", var=var, accum=accum, lr=lr, grad=grad,
indices=indices, use_locking=use_locking,
update_slots=update_slots, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "Tindices", _op.get_attr("Tindices"),
"use_locking", _op.get_attr("use_locking"), "update_slots",
_op.get_attr("update_slots"))
_execute.record_gradient(
"SparseApplyAdagrad", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def sparse_apply_adagrad_eager_fallback(var, accum, lr, grad, indices, use_locking=False, update_slots=True, name=None, ctx=None):
raise RuntimeError("sparse_apply_adagrad op does not support eager execution. Arg 'out' is a ref.")
def sparse_apply_adagrad_da(var, gradient_accumulator, gradient_squared_accumulator, grad, indices, lr, l1, l2, global_step, use_locking=False, name=None):
r"""Update entries in '*var' and '*accum' according to the proximal adagrad scheme.
Args:
var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
Should be from a Variable().
gradient_accumulator: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
gradient_squared_accumulator: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
grad: A `Tensor`. Must have the same type as `var`. The gradient.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A vector of indices into the first dimension of var and accum.
lr: A `Tensor`. Must have the same type as `var`.
Learning rate. Must be a scalar.
l1: A `Tensor`. Must have the same type as `var`.
L1 regularization. Must be a scalar.
l2: A `Tensor`. Must have the same type as `var`.
L2 regularization. Must be a scalar.
global_step: A `Tensor` of type `int64`.
Training step number. Must be a scalar.
use_locking: An optional `bool`. Defaults to `False`.
If True, updating of the var and accum tensors will be protected by
a lock; otherwise the behavior is undefined, but may exhibit less contention.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `var`.
"""
_ctx = _context._context
if _ctx is not None and _ctx._eager_context.is_eager:
raise RuntimeError("sparse_apply_adagrad_da op does not support eager execution. Arg 'out' is a ref.")
# Add nodes to the TensorFlow graph.
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
_, _, _op = _op_def_lib._apply_op_helper(
"SparseApplyAdagradDA", var=var,
gradient_accumulator=gradient_accumulator,
gradient_squared_accumulator=gradient_squared_accumulator,
grad=grad, indices=indices, lr=lr, l1=l1,
l2=l2, global_step=global_step,
use_locking=use_locking, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "Tindices", _op.get_attr("Tindices"),
"use_locking", _op.get_attr("use_locking"))
_execute.record_gradient(
"SparseApplyAdagradDA", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def sparse_apply_adagrad_da_eager_fallback(var, gradient_accumulator, gradient_squared_accumulator, grad, indices, lr, l1, l2, global_step, use_locking=False, name=None, ctx=None):
raise RuntimeError("sparse_apply_adagrad_da op does not support eager execution. Arg 'out' is a ref.")
def sparse_apply_centered_rms_prop(var, mg, ms, mom, lr, rho, momentum, epsilon, grad, indices, use_locking=False, name=None):
r"""Update '*var' according to the centered RMSProp algorithm.
The centered RMSProp algorithm uses an estimate of the centered second moment
(i.e., the variance) for normalization, as opposed to regular RMSProp, which
uses the (uncentered) second moment. This often helps with training, but is
slightly more expensive in terms of computation and memory.
Note that in dense implementation of this algorithm, mg, ms, and mom will
update even if the grad is zero, but in this sparse implementation, mg, ms,
and mom will not update in iterations during which the grad is zero.
mean_square = decay * mean_square + (1-decay) * gradient ** 2
mean_grad = decay * mean_grad + (1-decay) * gradient
Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2)
$$ms <- rho * ms_{t-1} + (1-rho) * grad * grad$$
$$mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)$$
$$var <- var - mom$$
Args:
var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
Should be from a Variable().
mg: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
ms: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
mom: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
lr: A `Tensor`. Must have the same type as `var`.
Scaling factor. Must be a scalar.
rho: A `Tensor`. Must have the same type as `var`.
Decay rate. Must be a scalar.
momentum: A `Tensor`. Must have the same type as `var`.
epsilon: A `Tensor`. Must have the same type as `var`.
Ridge term. Must be a scalar.
grad: A `Tensor`. Must have the same type as `var`. The gradient.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A vector of indices into the first dimension of var, ms and mom.
use_locking: An optional `bool`. Defaults to `False`.
If `True`, updating of the var, mg, ms, and mom tensors is
protected by a lock; otherwise the behavior is undefined, but may exhibit less
contention.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `var`.
"""
_ctx = _context._context
if _ctx is not None and _ctx._eager_context.is_eager:
raise RuntimeError("sparse_apply_centered_rms_prop op does not support eager execution. Arg 'out' is a ref.")
# Add nodes to the TensorFlow graph.
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
_, _, _op = _op_def_lib._apply_op_helper(
"SparseApplyCenteredRMSProp", var=var, mg=mg, ms=ms, mom=mom, lr=lr,
rho=rho, momentum=momentum,
epsilon=epsilon, grad=grad,
indices=indices,
use_locking=use_locking, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "Tindices", _op.get_attr("Tindices"),
"use_locking", _op.get_attr("use_locking"))
_execute.record_gradient(
"SparseApplyCenteredRMSProp", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def sparse_apply_centered_rms_prop_eager_fallback(var, mg, ms, mom, lr, rho, momentum, epsilon, grad, indices, use_locking=False, name=None, ctx=None):
raise RuntimeError("sparse_apply_centered_rms_prop op does not support eager execution. Arg 'out' is a ref.")
def sparse_apply_ftrl(var, accum, linear, grad, indices, lr, l1, l2, lr_power, use_locking=False, name=None):
r"""Update relevant entries in '*var' according to the Ftrl-proximal scheme.
That is for rows we have grad for, we update var, accum and linear as follows:
$$accum_new = accum + grad * grad$$
$$linear += grad + (accum_{new}^{-lr_{power}} - accum^{-lr_{power}} / lr * var$$
$$quadratic = 1.0 / (accum_{new}^{lr_{power}} * lr) + 2 * l2$$
$$var = (sign(linear) * l1 - linear) / quadratic\ if\ |linear| > l1\ else\ 0.0$$
$$accum = accum_{new}$$
Args:
var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
Should be from a Variable().
accum: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
linear: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
grad: A `Tensor`. Must have the same type as `var`. The gradient.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A vector of indices into the first dimension of var and accum.
lr: A `Tensor`. Must have the same type as `var`.
Scaling factor. Must be a scalar.
l1: A `Tensor`. Must have the same type as `var`.
L1 regularization. Must be a scalar.
l2: A `Tensor`. Must have the same type as `var`.
L2 regularization. Must be a scalar.
lr_power: A `Tensor`. Must have the same type as `var`.
Scaling factor. Must be a scalar.
use_locking: An optional `bool`. Defaults to `False`.
If `True`, updating of the var and accum tensors will be protected
by a lock; otherwise the behavior is undefined, but may exhibit less
contention.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `var`.
"""
_ctx = _context._context
if _ctx is not None and _ctx._eager_context.is_eager:
raise RuntimeError("sparse_apply_ftrl op does not support eager execution. Arg 'out' is a ref.")
# Add nodes to the TensorFlow graph.
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
_, _, _op = _op_def_lib._apply_op_helper(
"SparseApplyFtrl", var=var, accum=accum, linear=linear, grad=grad,
indices=indices, lr=lr, l1=l1, l2=l2,
lr_power=lr_power, use_locking=use_locking,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "Tindices", _op.get_attr("Tindices"),
"use_locking", _op.get_attr("use_locking"))
_execute.record_gradient(
"SparseApplyFtrl", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def sparse_apply_ftrl_eager_fallback(var, accum, linear, grad, indices, lr, l1, l2, lr_power, use_locking=False, name=None, ctx=None):
raise RuntimeError("sparse_apply_ftrl op does not support eager execution. Arg 'out' is a ref.")
def sparse_apply_ftrl_v2(var, accum, linear, grad, indices, lr, l1, l2, l2_shrinkage, lr_power, use_locking=False, name=None):
r"""Update relevant entries in '*var' according to the Ftrl-proximal scheme.
That is for rows we have grad for, we update var, accum and linear as follows:
grad_with_shrinkage = grad + 2 * l2_shrinkage * var
accum_new = accum + grad_with_shrinkage * grad_with_shrinkage
linear += grad_with_shrinkage +
(accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
accum = accum_new
Args:
var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
Should be from a Variable().
accum: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
linear: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
grad: A `Tensor`. Must have the same type as `var`. The gradient.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A vector of indices into the first dimension of var and accum.
lr: A `Tensor`. Must have the same type as `var`.
Scaling factor. Must be a scalar.
l1: A `Tensor`. Must have the same type as `var`.
L1 regularization. Must be a scalar.
l2: A `Tensor`. Must have the same type as `var`.
L2 shrinkage regulariation. Must be a scalar.
l2_shrinkage: A `Tensor`. Must have the same type as `var`.
lr_power: A `Tensor`. Must have the same type as `var`.
Scaling factor. Must be a scalar.
use_locking: An optional `bool`. Defaults to `False`.
If `True`, updating of the var and accum tensors will be protected
by a lock; otherwise the behavior is undefined, but may exhibit less
contention.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `var`.
"""
_ctx = _context._context
if _ctx is not None and _ctx._eager_context.is_eager:
raise RuntimeError("sparse_apply_ftrl_v2 op does not support eager execution. Arg 'out' is a ref.")
# Add nodes to the TensorFlow graph.
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
_, _, _op = _op_def_lib._apply_op_helper(
"SparseApplyFtrlV2", var=var, accum=accum, linear=linear, grad=grad,
indices=indices, lr=lr, l1=l1, l2=l2,
l2_shrinkage=l2_shrinkage, lr_power=lr_power,
use_locking=use_locking, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "Tindices", _op.get_attr("Tindices"),
"use_locking", _op.get_attr("use_locking"))
_execute.record_gradient(
"SparseApplyFtrlV2", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def sparse_apply_ftrl_v2_eager_fallback(var, accum, linear, grad, indices, lr, l1, l2, l2_shrinkage, lr_power, use_locking=False, name=None, ctx=None):
raise RuntimeError("sparse_apply_ftrl_v2 op does not support eager execution. Arg 'out' is a ref.")
def sparse_apply_momentum(var, accum, lr, grad, indices, momentum, use_locking=False, use_nesterov=False, name=None):
r"""Update relevant entries in '*var' and '*accum' according to the momentum scheme.
Set use_nesterov = True if you want to use Nesterov momentum.
That is for rows we have grad for, we update var and accum as follows:
$$accum = accum * momentum + grad$$
$$var -= lr * accum$$
Args:
var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
Should be from a Variable().
accum: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
lr: A `Tensor`. Must have the same type as `var`.
Learning rate. Must be a scalar.
grad: A `Tensor`. Must have the same type as `var`. The gradient.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A vector of indices into the first dimension of var and accum.
momentum: A `Tensor`. Must have the same type as `var`.
Momentum. Must be a scalar.
use_locking: An optional `bool`. Defaults to `False`.
If `True`, updating of the var and accum tensors will be protected
by a lock; otherwise the behavior is undefined, but may exhibit less
contention.
use_nesterov: An optional `bool`. Defaults to `False`.
If `True`, the tensor passed to compute grad will be
var - lr * momentum * accum, so in the end, the var you get is actually
var - lr * momentum * accum.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `var`.
"""
_ctx = _context._context
if _ctx is not None and _ctx._eager_context.is_eager:
raise RuntimeError("sparse_apply_momentum op does not support eager execution. Arg 'out' is a ref.")
# Add nodes to the TensorFlow graph.
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
if use_nesterov is None:
use_nesterov = False
use_nesterov = _execute.make_bool(use_nesterov, "use_nesterov")
_, _, _op = _op_def_lib._apply_op_helper(
"SparseApplyMomentum", var=var, accum=accum, lr=lr, grad=grad,
indices=indices, momentum=momentum,
use_locking=use_locking,
use_nesterov=use_nesterov, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "Tindices", _op.get_attr("Tindices"),
"use_locking", _op.get_attr("use_locking"), "use_nesterov",
_op.get_attr("use_nesterov"))
_execute.record_gradient(
"SparseApplyMomentum", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def sparse_apply_momentum_eager_fallback(var, accum, lr, grad, indices, momentum, use_locking=False, use_nesterov=False, name=None, ctx=None):
raise RuntimeError("sparse_apply_momentum op does not support eager execution. Arg 'out' is a ref.")
def sparse_apply_proximal_adagrad(var, accum, lr, l1, l2, grad, indices, use_locking=False, name=None):
r"""Sparse update entries in '*var' and '*accum' according to FOBOS algorithm.
That is for rows we have grad for, we update var and accum as follows:
$$accum += grad * grad$$
$$prox_v = var$$
$$prox_v -= lr * grad * (1 / sqrt(accum))$$
$$var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}$$
Args:
var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
Should be from a Variable().
accum: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
lr: A `Tensor`. Must have the same type as `var`.
Learning rate. Must be a scalar.
l1: A `Tensor`. Must have the same type as `var`.
L1 regularization. Must be a scalar.
l2: A `Tensor`. Must have the same type as `var`.
L2 regularization. Must be a scalar.
grad: A `Tensor`. Must have the same type as `var`. The gradient.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A vector of indices into the first dimension of var and accum.
use_locking: An optional `bool`. Defaults to `False`.
If True, updating of the var and accum tensors will be protected by
a lock; otherwise the behavior is undefined, but may exhibit less contention.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `var`.
"""
_ctx = _context._context
if _ctx is not None and _ctx._eager_context.is_eager:
raise RuntimeError("sparse_apply_proximal_adagrad op does not support eager execution. Arg 'out' is a ref.")
# Add nodes to the TensorFlow graph.
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
_, _, _op = _op_def_lib._apply_op_helper(
"SparseApplyProximalAdagrad", var=var, accum=accum, lr=lr, l1=l1,
l2=l2, grad=grad, indices=indices,
use_locking=use_locking, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "Tindices", _op.get_attr("Tindices"),
"use_locking", _op.get_attr("use_locking"))
_execute.record_gradient(
"SparseApplyProximalAdagrad", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def sparse_apply_proximal_adagrad_eager_fallback(var, accum, lr, l1, l2, grad, indices, use_locking=False, name=None, ctx=None):
raise RuntimeError("sparse_apply_proximal_adagrad op does not support eager execution. Arg 'out' is a ref.")
def sparse_apply_proximal_gradient_descent(var, alpha, l1, l2, grad, indices, use_locking=False, name=None):
r"""Sparse update '*var' as FOBOS algorithm with fixed learning rate.
That is for rows we have grad for, we update var as follows:
$$prox_v = var - alpha * grad$$
$$var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}$$
Args:
var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
Should be from a Variable().
alpha: A `Tensor`. Must have the same type as `var`.
Scaling factor. Must be a scalar.
l1: A `Tensor`. Must have the same type as `var`.
L1 regularization. Must be a scalar.
l2: A `Tensor`. Must have the same type as `var`.
L2 regularization. Must be a scalar.
grad: A `Tensor`. Must have the same type as `var`. The gradient.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A vector of indices into the first dimension of var and accum.
use_locking: An optional `bool`. Defaults to `False`.
If True, the subtraction will be protected by a lock;
otherwise the behavior is undefined, but may exhibit less contention.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `var`.
"""
_ctx = _context._context
if _ctx is not None and _ctx._eager_context.is_eager:
raise RuntimeError("sparse_apply_proximal_gradient_descent op does not support eager execution. Arg 'out' is a ref.")
# Add nodes to the TensorFlow graph.
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
_, _, _op = _op_def_lib._apply_op_helper(
"SparseApplyProximalGradientDescent", var=var, alpha=alpha, l1=l1,
l2=l2, grad=grad,
indices=indices,
use_locking=use_locking,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "Tindices", _op.get_attr("Tindices"),
"use_locking", _op.get_attr("use_locking"))
_execute.record_gradient(
"SparseApplyProximalGradientDescent", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def sparse_apply_proximal_gradient_descent_eager_fallback(var, alpha, l1, l2, grad, indices, use_locking=False, name=None, ctx=None):
raise RuntimeError("sparse_apply_proximal_gradient_descent op does not support eager execution. Arg 'out' is a ref.")
def sparse_apply_rms_prop(var, ms, mom, lr, rho, momentum, epsilon, grad, indices, use_locking=False, name=None):
r"""Update '*var' according to the RMSProp algorithm.
Note that in dense implementation of this algorithm, ms and mom will
update even if the grad is zero, but in this sparse implementation, ms
and mom will not update in iterations during which the grad is zero.
mean_square = decay * mean_square + (1-decay) * gradient ** 2
Delta = learning_rate * gradient / sqrt(mean_square + epsilon)
$$ms <- rho * ms_{t-1} + (1-rho) * grad * grad$$
$$mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)$$
$$var <- var - mom$$
Args:
var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
Should be from a Variable().
ms: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
mom: A mutable `Tensor`. Must have the same type as `var`.
Should be from a Variable().
lr: A `Tensor`. Must have the same type as `var`.
Scaling factor. Must be a scalar.
rho: A `Tensor`. Must have the same type as `var`.
Decay rate. Must be a scalar.
momentum: A `Tensor`. Must have the same type as `var`.
epsilon: A `Tensor`. Must have the same type as `var`.
Ridge term. Must be a scalar.
grad: A `Tensor`. Must have the same type as `var`. The gradient.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A vector of indices into the first dimension of var, ms and mom.
use_locking: An optional `bool`. Defaults to `False`.
If `True`, updating of the var, ms, and mom tensors is protected
by a lock; otherwise the behavior is undefined, but may exhibit less
contention.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `var`.
"""
_ctx = _context._context
if _ctx is not None and _ctx._eager_context.is_eager:
raise RuntimeError("sparse_apply_rms_prop op does not support eager execution. Arg 'out' is a ref.")
# Add nodes to the TensorFlow graph.
if use_locking is None:
use_locking = False
use_locking = _execute.make_bool(use_locking, "use_locking")
_, _, _op = _op_def_lib._apply_op_helper(
"SparseApplyRMSProp", var=var, ms=ms, mom=mom, lr=lr, rho=rho,
momentum=momentum, epsilon=epsilon, grad=grad,
indices=indices, use_locking=use_locking,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "Tindices", _op.get_attr("Tindices"),
"use_locking", _op.get_attr("use_locking"))
_execute.record_gradient(
"SparseApplyRMSProp", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def sparse_apply_rms_prop_eager_fallback(var, ms, mom, lr, rho, momentum, epsilon, grad, indices, use_locking=False, name=None, ctx=None):
raise RuntimeError("sparse_apply_rms_prop op does not support eager execution. Arg 'out' is a ref.")
def _InitOpDefLibrary(op_list_proto_bytes):
op_list = _op_def_pb2.OpList()
op_list.ParseFromString(op_list_proto_bytes)
_op_def_registry.register_op_list(op_list)
op_def_lib = _op_def_library.OpDefLibrary()
op_def_lib.add_op_list(op_list)
return op_def_lib
# op {
# name: "ApplyAdaMax"
# input_arg {
# name: "var"
# type_attr: "T"
# is_ref: true
# }
# input_arg {
# name: "m"
# type_attr: "T"
# is_ref: true
# }
# input_arg {
# name: "v"
# type_attr: "T"
# is_ref: true
# }
# input_arg {
# name: "beta1_power"
# type_attr: "T"
# }
# input_arg {
# name: "lr"
# type_attr: "T"
# }
# input_arg {
# name: "beta1"
# type_attr: "T"
# }
# input_arg {
# name: "beta2"
# type_attr: "T"
# }
# input_arg {
# name: "epsilon"
# type_attr: "T"
# }
# input_arg {
# name: "grad"
# type_attr: "T"
# }
# output_arg {
# name: "out"
# type_attr: "T"
# is_ref: true
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "use_locking"
# type: "bool"
# default_value {
# b: false
# }
# }
# }
# op {
# name: "ApplyAdadelta"
# input_arg {
# name: "var"
# type_attr: "T"
# is_ref: true
# }
# input_arg {
# name: "accum"
# type_attr: "T"
# is_ref: true
# }
# input_arg {
# name: "accum_update"
# type_attr: "T"
# is_ref: true
# }
# input_arg {
# name: "lr"
# type_attr: "T"
# }
# input_arg {
# name: "rho"
# type_attr: "T"
# }
# input_arg {
# name: "epsilon"
# type_attr: "T"
# }
# input_arg {
# name: "grad"
# type_attr: "T"
# }
# output_arg {
# name: "out"
# type_attr: "T"
# is_ref: true
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "use_locking"
# type: "bool"
# default_value {
# b: false
# }
# }
# }
# op {
# name: "ApplyAdagrad"
# input_arg {
# name: "var"
# type_attr: "T"
# is_ref: true
# }
# input_arg {
# name: "accum"
# type_attr: "T"
# is_ref: true
# }
# input_arg {
# name: "lr"
# type_attr: "T"
# }
# input_arg {
# name: "grad"
# type_attr: "T"
# }
# output_arg {
# name: "out"
# type_attr: "T"
# is_ref: true
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "use_locking"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "update_slots"
# type: "bool"
# default_value {
# b: true
# }
# }
# }
# op {
# name: "ApplyAdagradDA"
# input_arg {
# name: "var"
# type_attr: "T"
# is_ref: true
# }
# input_arg {
# name: "gradient_accumulator"
# type_attr: "T"
# is_ref: true
# }
# input_arg {
# name: "gradient_squared_accumulator"
# type_attr: "T"
# is_ref: true
# }
# input_arg {
# name: "grad"
# type_attr: "T"
# }
# input_arg {
# name: "lr"
# type_attr: "T"
# }
# input_arg {
# name: "l1"
# type_attr: "T"
# }
# input_arg {
# name: "l2"
# type_attr: "T"
# }
# input_arg {
# name: "global_step"
# type: DT_INT64
# }
# output_arg {
# name: "out"
# type_attr: "T"
# is_ref: true
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "use_locking"
# type: "bool"
# default_value {
# b: false
# }
# }
# }
# op {
# name: "ApplyAdam"
# input_arg {
# name: "var"
# type_attr: "T"
# is_ref: true
# }
# input_arg {
# name: "m"
# type_attr: "T"
# is_ref: true
# }
# input_arg {
# name: "v"
# type_attr: "T"
# is_ref: true
# }
# input_arg {
# name: "beta1_power"
# type_attr: "T"
# }
# input_arg {
# name: "beta2_power"
# type_attr: "T"
# }
# input_arg {
# name: "lr"
# type_attr: "T"
# }
# input_arg {
# name: "beta1"
# type_attr: "T"
# }
# input_arg {
# name: "beta2"
# type_attr: "T"
# }
# input_arg {
# name: "epsilon"
# type_attr: "T"
# }
# input_arg {
# name: "grad"
# type_attr: "T"
# }
# output_arg {
# name: "out"
# type_attr: "T"
# is_ref: true
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "use_locking"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "use_nesterov"
# type: "bool"
# default_value {
# b: false
# }
# }
# }
# op {
# name: "ApplyAddSign"
# input_arg {
# name: "var"
# type_attr: "T"
# is_ref: true
# }
# input_arg {
# name: "m"
# type_attr: "T"
# is_ref: true
# }
# input_arg {
# name: "lr"
# type_attr: "T"
# }
# input_arg {
# name: "alpha"
# type_attr: "T"
# }
# input_arg {
# name: "sign_decay"
# type_attr: "T"
# }
# input_arg {
# name: "beta"
# type_attr: "T"
# }
# input_arg {
# name: "grad"
# type_attr: "T"
# }
# output_arg {
# name: "out"
# type_attr: "T"
# is_ref: true
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "use_locking"
# type: "bool"
# default_value {
# b: false
# }
# }
# }
# op {
# name: "ApplyCenteredRMSProp"
# input_arg {
# name: "var"
# type_attr: "T"
# is_ref: true
# }
# input_arg {
# name: "mg"
# type_attr: "T"
# is_ref: true
# }
# input_arg {
# name: "ms"
# type_attr: "T"
# is_ref: true
# }
# input_arg {
# name: "mom"
# type_attr: "T"
# is_ref: true
# }
# input_arg {
# name: "lr"
# type_attr: "T"
# }
# input_arg {
# name: "rho"
# type_attr: "T"
# }
# input_arg {
# name: "momentum"
# type_attr: "T"
# }
# input_arg {
# name: "epsilon"
# type_attr: "T"
# }
# input_arg {
# name: "grad"
# type_attr: "T"
# }
# output_arg {
# name: "out"
# type_attr: "T"
# is_ref: true
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "use_locking"
# type: "bool"
# default_value {
# b: false
# }
# }
# }
# op {
# name: "ApplyFtrl"
# input_arg {
# name: "var"
# type_attr: "T"
# is_ref: true
# }
# input_arg {
# name: "accum"
# type_attr: "T"
# is_ref: true
# }
# input_arg {
# name: "linear"
# type_attr: "T"
# is_ref: true
# }
# input_arg {
# name: "grad"
# type_attr: "T"
# }
# input_arg {
# name: "lr"
# type_attr: "T"
# }
# input_arg {
# name: "l1"
# type_attr: "T"
# }
# input_arg {
# name: "l2"
# type_attr: "T"
# }
# input_arg {
# name: "lr_power"
# type_attr: "T"
# }
# output_arg {
# name: "out"
# type_attr: "T"
# is_ref: true
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "use_locking"
# type: "bool"
# default_value {
# b: false
# }
# }
# }
# op {
# name: "ApplyFtrlV2"
# input_arg {
# name: "var"
# type_attr: "T"
# is_ref: true
# }
# input_arg {
# name: "accum"
# type_attr: "T"
# is_ref: true
# }
# input_arg {
# name: "linear"
# type_attr: "T"
# is_ref: true
# }
# input_arg {
# name: "grad"
# type_attr: "T"
# }
# input_arg {
# name: "lr"
# type_attr: "T"
# }
# input_arg {
# name: "l1"
# type_attr: "T"
# }
# input_arg {
# name: "l2"
# type_attr: "T"
# }
# input_arg {
# name: "l2_shrinkage"
# type_attr: "T"
# }
# input_arg {
# name: "lr_power"
# type_attr: "T"
# }
# output_arg {
# name: "out"
# type_attr: "T"
# is_ref: true
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "use_locking"
# type: "bool"
# default_value {
# b: false
# }
# }
# }
# op {
# name: "ApplyGradientDescent"
# input_arg {
# name: "var"
# type_attr: "T"
# is_ref: true
# }
# input_arg {
# name: "alpha"
# type_attr: "T"
# }
# input_arg {
# name: "delta"
# type_attr: "T"
# }
# output_arg {
# name: "out"
# type_attr: "T"
# is_ref: true
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "use_locking"
# type: "bool"
# default_value {
# b: false
# }
# }
# }
# op {
# name: "ApplyMomentum"
# input_arg {
# name: "var"
# type_attr: "T"
# is_ref: true
# }
# input_arg {
# name: "accum"
# type_attr: "T"
# is_ref: true
# }
# input_arg {
# name: "lr"
# type_attr: "T"
# }
# input_arg {
# name: "grad"
# type_attr: "T"
# }
# input_arg {
# name: "momentum"
# type_attr: "T"
# }
# output_arg {
# name: "out"
# type_attr: "T"
# is_ref: true
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "use_locking"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "use_nesterov"
# type: "bool"
# default_value {
# b: false
# }
# }
# }
# op {
# name: "ApplyPowerSign"
# input_arg {
# name: "var"
# type_attr: "T"
# is_ref: true
# }
# input_arg {
# name: "m"
# type_attr: "T"
# is_ref: true
# }
# input_arg {
# name: "lr"
# type_attr: "T"
# }
# input_arg {
# name: "logbase"
# type_attr: "T"
# }
# input_arg {
# name: "sign_decay"
# type_attr: "T"
# }
# input_arg {
# name: "beta"
# type_attr: "T"
# }
# input_arg {
# name: "grad"
# type_attr: "T"
# }
# output_arg {
# name: "out"
# type_attr: "T"
# is_ref: true
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "use_locking"
# type: "bool"
# default_value {
# b: false
# }
# }
# }
# op {
# name: "ApplyProximalAdagrad"
# input_arg {
# name: "var"
# type_attr: "T"
# is_ref: true
# }
# input_arg {
# name: "accum"
# type_attr: "T"
# is_ref: true
# }
# input_arg {
# name: "lr"
# type_attr: "T"
# }
# input_arg {
# name: "l1"
# type_attr: "T"
# }
# input_arg {
# name: "l2"
# type_attr: "T"
# }
# input_arg {
# name: "grad"
# type_attr: "T"
# }
# output_arg {
# name: "out"
# type_attr: "T"
# is_ref: true
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "use_locking"
# type: "bool"
# default_value {
# b: false
# }
# }
# }
# op {
# name: "ApplyProximalGradientDescent"
# input_arg {
# name: "var"
# type_attr: "T"
# is_ref: true
# }
# input_arg {
# name: "alpha"
# type_attr: "T"
# }
# input_arg {
# name: "l1"
# type_attr: "T"
# }
# input_arg {
# name: "l2"
# type_attr: "T"
# }
# input_arg {
# name: "delta"
# type_attr: "T"
# }
# output_arg {
# name: "out"
# type_attr: "T"
# is_ref: true
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "use_locking"
# type: "bool"
# default_value {
# b: false
# }
# }
# }
# op {
# name: "ApplyRMSProp"
# input_arg {
# name: "var"
# type_attr: "T"
# is_ref: true
# }
# input_arg {
# name: "ms"
# type_attr: "T"
# is_ref: true
# }
# input_arg {
# name: "mom"
# type_attr: "T"
# is_ref: true
# }
# input_arg {
# name: "lr"
# type_attr: "T"
# }
# input_arg {
# name: "rho"
# type_attr: "T"
# }
# input_arg {
# name: "momentum"
# type_attr: "T"
# }
# input_arg {
# name: "epsilon"
# type_attr: "T"
# }
# input_arg {
# name: "grad"
# type_attr: "T"
# }
# output_arg {
# name: "out"
# type_attr: "T"
# is_ref: true
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "use_locking"
# type: "bool"
# default_value {
# b: false
# }
# }
# }
# op {
# name: "ResourceApplyAdaMax"
# input_arg {
# name: "var"
# type: DT_RESOURCE
# }
# input_arg {
# name: "m"
# type: DT_RESOURCE
# }
# input_arg {
# name: "v"
# type: DT_RESOURCE
# }
# input_arg {
# name: "beta1_power"
# type_attr: "T"
# }
# input_arg {
# name: "lr"
# type_attr: "T"
# }
# input_arg {
# name: "beta1"
# type_attr: "T"
# }
# input_arg {
# name: "beta2"
# type_attr: "T"
# }
# input_arg {
# name: "epsilon"
# type_attr: "T"
# }
# input_arg {
# name: "grad"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "use_locking"
# type: "bool"
# default_value {
# b: false
# }
# }
# is_stateful: true
# }
# op {
# name: "ResourceApplyAdadelta"
# input_arg {
# name: "var"
# type: DT_RESOURCE
# }
# input_arg {
# name: "accum"
# type: DT_RESOURCE
# }
# input_arg {
# name: "accum_update"
# type: DT_RESOURCE
# }
# input_arg {
# name: "lr"
# type_attr: "T"
# }
# input_arg {
# name: "rho"
# type_attr: "T"
# }
# input_arg {
# name: "epsilon"
# type_attr: "T"
# }
# input_arg {
# name: "grad"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "use_locking"
# type: "bool"
# default_value {
# b: false
# }
# }
# is_stateful: true
# }
# op {
# name: "ResourceApplyAdagrad"
# input_arg {
# name: "var"
# type: DT_RESOURCE
# }
# input_arg {
# name: "accum"
# type: DT_RESOURCE
# }
# input_arg {
# name: "lr"
# type_attr: "T"
# }
# input_arg {
# name: "grad"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "use_locking"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "update_slots"
# type: "bool"
# default_value {
# b: true
# }
# }
# is_stateful: true
# }
# op {
# name: "ResourceApplyAdagradDA"
# input_arg {
# name: "var"
# type: DT_RESOURCE
# }
# input_arg {
# name: "gradient_accumulator"
# type: DT_RESOURCE
# }
# input_arg {
# name: "gradient_squared_accumulator"
# type: DT_RESOURCE
# }
# input_arg {
# name: "grad"
# type_attr: "T"
# }
# input_arg {
# name: "lr"
# type_attr: "T"
# }
# input_arg {
# name: "l1"
# type_attr: "T"
# }
# input_arg {
# name: "l2"
# type_attr: "T"
# }
# input_arg {
# name: "global_step"
# type: DT_INT64
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "use_locking"
# type: "bool"
# default_value {
# b: false
# }
# }
# is_stateful: true
# }
# op {
# name: "ResourceApplyAdam"
# input_arg {
# name: "var"
# type: DT_RESOURCE
# }
# input_arg {
# name: "m"
# type: DT_RESOURCE
# }
# input_arg {
# name: "v"
# type: DT_RESOURCE
# }
# input_arg {
# name: "beta1_power"
# type_attr: "T"
# }
# input_arg {
# name: "beta2_power"
# type_attr: "T"
# }
# input_arg {
# name: "lr"
# type_attr: "T"
# }
# input_arg {
# name: "beta1"
# type_attr: "T"
# }
# input_arg {
# name: "beta2"
# type_attr: "T"
# }
# input_arg {
# name: "epsilon"
# type_attr: "T"
# }
# input_arg {
# name: "grad"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "use_locking"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "use_nesterov"
# type: "bool"
# default_value {
# b: false
# }
# }
# is_stateful: true
# }
# op {
# name: "ResourceApplyAdamWithAmsgrad"
# input_arg {
# name: "var"
# type: DT_RESOURCE
# }
# input_arg {
# name: "m"
# type: DT_RESOURCE
# }
# input_arg {
# name: "v"
# type: DT_RESOURCE
# }
# input_arg {
# name: "vhat"
# type: DT_RESOURCE
# }
# input_arg {
# name: "beta1_power"
# type_attr: "T"
# }
# input_arg {
# name: "beta2_power"
# type_attr: "T"
# }
# input_arg {
# name: "lr"
# type_attr: "T"
# }
# input_arg {
# name: "beta1"
# type_attr: "T"
# }
# input_arg {
# name: "beta2"
# type_attr: "T"
# }
# input_arg {
# name: "epsilon"
# type_attr: "T"
# }
# input_arg {
# name: "grad"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "use_locking"
# type: "bool"
# default_value {
# b: false
# }
# }
# is_stateful: true
# }
# op {
# name: "ResourceApplyAddSign"
# input_arg {
# name: "var"
# type: DT_RESOURCE
# }
# input_arg {
# name: "m"
# type: DT_RESOURCE
# }
# input_arg {
# name: "lr"
# type_attr: "T"
# }
# input_arg {
# name: "alpha"
# type_attr: "T"
# }
# input_arg {
# name: "sign_decay"
# type_attr: "T"
# }
# input_arg {
# name: "beta"
# type_attr: "T"
# }
# input_arg {
# name: "grad"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "use_locking"
# type: "bool"
# default_value {
# b: false
# }
# }
# is_stateful: true
# }
# op {
# name: "ResourceApplyCenteredRMSProp"
# input_arg {
# name: "var"
# type: DT_RESOURCE
# }
# input_arg {
# name: "mg"
# type: DT_RESOURCE
# }
# input_arg {
# name: "ms"
# type: DT_RESOURCE
# }
# input_arg {
# name: "mom"
# type: DT_RESOURCE
# }
# input_arg {
# name: "lr"
# type_attr: "T"
# }
# input_arg {
# name: "rho"
# type_attr: "T"
# }
# input_arg {
# name: "momentum"
# type_attr: "T"
# }
# input_arg {
# name: "epsilon"
# type_attr: "T"
# }
# input_arg {
# name: "grad"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "use_locking"
# type: "bool"
# default_value {
# b: false
# }
# }
# is_stateful: true
# }
# op {
# name: "ResourceApplyFtrl"
# input_arg {
# name: "var"
# type: DT_RESOURCE
# }
# input_arg {
# name: "accum"
# type: DT_RESOURCE
# }
# input_arg {
# name: "linear"
# type: DT_RESOURCE
# }
# input_arg {
# name: "grad"
# type_attr: "T"
# }
# input_arg {
# name: "lr"
# type_attr: "T"
# }
# input_arg {
# name: "l1"
# type_attr: "T"
# }
# input_arg {
# name: "l2"
# type_attr: "T"
# }
# input_arg {
# name: "lr_power"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "use_locking"
# type: "bool"
# default_value {
# b: false
# }
# }
# is_stateful: true
# }
# op {
# name: "ResourceApplyFtrlV2"
# input_arg {
# name: "var"
# type: DT_RESOURCE
# }
# input_arg {
# name: "accum"
# type: DT_RESOURCE
# }
# input_arg {
# name: "linear"
# type: DT_RESOURCE
# }
# input_arg {
# name: "grad"
# type_attr: "T"
# }
# input_arg {
# name: "lr"
# type_attr: "T"
# }
# input_arg {
# name: "l1"
# type_attr: "T"
# }
# input_arg {
# name: "l2"
# type_attr: "T"
# }
# input_arg {
# name: "l2_shrinkage"
# type_attr: "T"
# }
# input_arg {
# name: "lr_power"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "use_locking"
# type: "bool"
# default_value {
# b: false
# }
# }
# is_stateful: true
# }
# op {
# name: "ResourceApplyGradientDescent"
# input_arg {
# name: "var"
# type: DT_RESOURCE
# }
# input_arg {
# name: "alpha"
# type_attr: "T"
# }
# input_arg {
# name: "delta"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "use_locking"
# type: "bool"
# default_value {
# b: false
# }
# }
# is_stateful: true
# }
# op {
# name: "ResourceApplyKerasMomentum"
# input_arg {
# name: "var"
# type: DT_RESOURCE
# }
# input_arg {
# name: "accum"
# type: DT_RESOURCE
# }
# input_arg {
# name: "lr"
# type_attr: "T"
# }
# input_arg {
# name: "grad"
# type_attr: "T"
# }
# input_arg {
# name: "momentum"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "use_locking"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "use_nesterov"
# type: "bool"
# default_value {
# b: false
# }
# }
# is_stateful: true
# }
# op {
# name: "ResourceApplyMomentum"
# input_arg {
# name: "var"
# type: DT_RESOURCE
# }
# input_arg {
# name: "accum"
# type: DT_RESOURCE
# }
# input_arg {
# name: "lr"
# type_attr: "T"
# }
# input_arg {
# name: "grad"
# type_attr: "T"
# }
# input_arg {
# name: "momentum"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "use_locking"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "use_nesterov"
# type: "bool"
# default_value {
# b: false
# }
# }
# is_stateful: true
# }
# op {
# name: "ResourceApplyPowerSign"
# input_arg {
# name: "var"
# type: DT_RESOURCE
# }
# input_arg {
# name: "m"
# type: DT_RESOURCE
# }
# input_arg {
# name: "lr"
# type_attr: "T"
# }
# input_arg {
# name: "logbase"
# type_attr: "T"
# }
# input_arg {
# name: "sign_decay"
# type_attr: "T"
# }
# input_arg {
# name: "beta"
# type_attr: "T"
# }
# input_arg {
# name: "grad"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "use_locking"
# type: "bool"
# default_value {
# b: false
# }
# }
# is_stateful: true
# }
# op {
# name: "ResourceApplyProximalAdagrad"
# input_arg {
# name: "var"
# type: DT_RESOURCE
# }
# input_arg {
# name: "accum"
# type: DT_RESOURCE
# }
# input_arg {
# name: "lr"
# type_attr: "T"
# }
# input_arg {
# name: "l1"
# type_attr: "T"
# }
# input_arg {
# name: "l2"
# type_attr: "T"
# }
# input_arg {
# name: "grad"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "use_locking"
# type: "bool"
# default_value {
# b: false
# }
# }
# is_stateful: true
# }
# op {
# name: "ResourceApplyProximalGradientDescent"
# input_arg {
# name: "var"
# type: DT_RESOURCE
# }
# input_arg {
# name: "alpha"
# type_attr: "T"
# }
# input_arg {
# name: "l1"
# type_attr: "T"
# }
# input_arg {
# name: "l2"
# type_attr: "T"
# }
# input_arg {
# name: "delta"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "use_locking"
# type: "bool"
# default_value {
# b: false
# }
# }
# is_stateful: true
# }
# op {
# name: "ResourceApplyRMSProp"
# input_arg {
# name: "var"
# type: DT_RESOURCE
# }
# input_arg {
# name: "ms"
# type: DT_RESOURCE
# }
# input_arg {
# name: "mom"
# type: DT_RESOURCE
# }
# input_arg {
# name: "lr"
# type_attr: "T"
# }
# input_arg {
# name: "rho"
# type_attr: "T"
# }
# input_arg {
# name: "momentum"
# type_attr: "T"
# }
# input_arg {
# name: "epsilon"
# type_attr: "T"
# }
# input_arg {
# name: "grad"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "use_locking"
# type: "bool"
# default_value {
# b: false
# }
# }
# is_stateful: true
# }
# op {
# name: "ResourceSparseApplyAdadelta"
# input_arg {
# name: "var"
# type: DT_RESOURCE
# }
# input_arg {
# name: "accum"
# type: DT_RESOURCE
# }
# input_arg {
# name: "accum_update"
# type: DT_RESOURCE
# }
# input_arg {
# name: "lr"
# type_attr: "T"
# }
# input_arg {
# name: "rho"
# type_attr: "T"
# }
# input_arg {
# name: "epsilon"
# type_attr: "T"
# }
# input_arg {
# name: "grad"
# type_attr: "T"
# }
# input_arg {
# name: "indices"
# type_attr: "Tindices"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "Tindices"
# type: "type"
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# attr {
# name: "use_locking"
# type: "bool"
# default_value {
# b: false
# }
# }
# is_stateful: true
# }
# op {
# name: "ResourceSparseApplyAdagrad"
# input_arg {
# name: "var"
# type: DT_RESOURCE
# }
# input_arg {
# name: "accum"
# type: DT_RESOURCE
# }
# input_arg {
# name: "lr"
# type_attr: "T"
# }
# input_arg {
# name: "grad"
# type_attr: "T"
# }
# input_arg {
# name: "indices"
# type_attr: "Tindices"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "Tindices"
# type: "type"
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# attr {
# name: "use_locking"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "update_slots"
# type: "bool"
# default_value {
# b: true
# }
# }
# is_stateful: true
# }
# op {
# name: "ResourceSparseApplyAdagradDA"
# input_arg {
# name: "var"
# type: DT_RESOURCE
# }
# input_arg {
# name: "gradient_accumulator"
# type: DT_RESOURCE
# }
# input_arg {
# name: "gradient_squared_accumulator"
# type: DT_RESOURCE
# }
# input_arg {
# name: "grad"
# type_attr: "T"
# }
# input_arg {
# name: "indices"
# type_attr: "Tindices"
# }
# input_arg {
# name: "lr"
# type_attr: "T"
# }
# input_arg {
# name: "l1"
# type_attr: "T"
# }
# input_arg {
# name: "l2"
# type_attr: "T"
# }
# input_arg {
# name: "global_step"
# type: DT_INT64
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "Tindices"
# type: "type"
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# attr {
# name: "use_locking"
# type: "bool"
# default_value {
# b: false
# }
# }
# is_stateful: true
# }
# op {
# name: "ResourceSparseApplyCenteredRMSProp"
# input_arg {
# name: "var"
# type: DT_RESOURCE
# }
# input_arg {
# name: "mg"
# type: DT_RESOURCE
# }
# input_arg {
# name: "ms"
# type: DT_RESOURCE
# }
# input_arg {
# name: "mom"
# type: DT_RESOURCE
# }
# input_arg {
# name: "lr"
# type_attr: "T"
# }
# input_arg {
# name: "rho"
# type_attr: "T"
# }
# input_arg {
# name: "momentum"
# type_attr: "T"
# }
# input_arg {
# name: "epsilon"
# type_attr: "T"
# }
# input_arg {
# name: "grad"
# type_attr: "T"
# }
# input_arg {
# name: "indices"
# type_attr: "Tindices"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "Tindices"
# type: "type"
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# attr {
# name: "use_locking"
# type: "bool"
# default_value {
# b: false
# }
# }
# is_stateful: true
# }
# op {
# name: "ResourceSparseApplyFtrl"
# input_arg {
# name: "var"
# type: DT_RESOURCE
# }
# input_arg {
# name: "accum"
# type: DT_RESOURCE
# }
# input_arg {
# name: "linear"
# type: DT_RESOURCE
# }
# input_arg {
# name: "grad"
# type_attr: "T"
# }
# input_arg {
# name: "indices"
# type_attr: "Tindices"
# }
# input_arg {
# name: "lr"
# type_attr: "T"
# }
# input_arg {
# name: "l1"
# type_attr: "T"
# }
# input_arg {
# name: "l2"
# type_attr: "T"
# }
# input_arg {
# name: "lr_power"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "Tindices"
# type: "type"
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# attr {
# name: "use_locking"
# type: "bool"
# default_value {
# b: false
# }
# }
# is_stateful: true
# }
# op {
# name: "ResourceSparseApplyFtrlV2"
# input_arg {
# name: "var"
# type: DT_RESOURCE
# }
# input_arg {
# name: "accum"
# type: DT_RESOURCE
# }
# input_arg {
# name: "linear"
# type: DT_RESOURCE
# }
# input_arg {
# name: "grad"
# type_attr: "T"
# }
# input_arg {
# name: "indices"
# type_attr: "Tindices"
# }
# input_arg {
# name: "lr"
# type_attr: "T"
# }
# input_arg {
# name: "l1"
# type_attr: "T"
# }
# input_arg {
# name: "l2"
# type_attr: "T"
# }
# input_arg {
# name: "l2_shrinkage"
# type_attr: "T"
# }
# input_arg {
# name: "lr_power"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "Tindices"
# type: "type"
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# attr {
# name: "use_locking"
# type: "bool"
# default_value {
# b: false
# }
# }
# is_stateful: true
# }
# op {
# name: "ResourceSparseApplyKerasMomentum"
# input_arg {
# name: "var"
# type: DT_RESOURCE
# }
# input_arg {
# name: "accum"
# type: DT_RESOURCE
# }
# input_arg {
# name: "lr"
# type_attr: "T"
# }
# input_arg {
# name: "grad"
# type_attr: "T"
# }
# input_arg {
# name: "indices"
# type_attr: "Tindices"
# }
# input_arg {
# name: "momentum"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "Tindices"
# type: "type"
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# attr {
# name: "use_locking"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "use_nesterov"
# type: "bool"
# default_value {
# b: false
# }
# }
# is_stateful: true
# }
# op {
# name: "ResourceSparseApplyMomentum"
# input_arg {
# name: "var"
# type: DT_RESOURCE
# }
# input_arg {
# name: "accum"
# type: DT_RESOURCE
# }
# input_arg {
# name: "lr"
# type_attr: "T"
# }
# input_arg {
# name: "grad"
# type_attr: "T"
# }
# input_arg {
# name: "indices"
# type_attr: "Tindices"
# }
# input_arg {
# name: "momentum"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "Tindices"
# type: "type"
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# attr {
# name: "use_locking"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "use_nesterov"
# type: "bool"
# default_value {
# b: false
# }
# }
# is_stateful: true
# }
# op {
# name: "ResourceSparseApplyProximalAdagrad"
# input_arg {
# name: "var"
# type: DT_RESOURCE
# }
# input_arg {
# name: "accum"
# type: DT_RESOURCE
# }
# input_arg {
# name: "lr"
# type_attr: "T"
# }
# input_arg {
# name: "l1"
# type_attr: "T"
# }
# input_arg {
# name: "l2"
# type_attr: "T"
# }
# input_arg {
# name: "grad"
# type_attr: "T"
# }
# input_arg {
# name: "indices"
# type_attr: "Tindices"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "Tindices"
# type: "type"
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# attr {
# name: "use_locking"
# type: "bool"
# default_value {
# b: false
# }
# }
# is_stateful: true
# }
# op {
# name: "ResourceSparseApplyProximalGradientDescent"
# input_arg {
# name: "var"
# type: DT_RESOURCE
# }
# input_arg {
# name: "alpha"
# type_attr: "T"
# }
# input_arg {
# name: "l1"
# type_attr: "T"
# }
# input_arg {
# name: "l2"
# type_attr: "T"
# }
# input_arg {
# name: "grad"
# type_attr: "T"
# }
# input_arg {
# name: "indices"
# type_attr: "Tindices"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "Tindices"
# type: "type"
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# attr {
# name: "use_locking"
# type: "bool"
# default_value {
# b: false
# }
# }
# is_stateful: true
# }
# op {
# name: "ResourceSparseApplyRMSProp"
# input_arg {
# name: "var"
# type: DT_RESOURCE
# }
# input_arg {
# name: "ms"
# type: DT_RESOURCE
# }
# input_arg {
# name: "mom"
# type: DT_RESOURCE
# }
# input_arg {
# name: "lr"
# type_attr: "T"
# }
# input_arg {
# name: "rho"
# type_attr: "T"
# }
# input_arg {
# name: "momentum"
# type_attr: "T"
# }
# input_arg {
# name: "epsilon"
# type_attr: "T"
# }
# input_arg {
# name: "grad"
# type_attr: "T"
# }
# input_arg {
# name: "indices"
# type_attr: "Tindices"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "Tindices"
# type: "type"
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# attr {
# name: "use_locking"
# type: "bool"
# default_value {
# b: false
# }
# }
# is_stateful: true
# }
# op {
# name: "SparseApplyAdadelta"
# input_arg {
# name: "var"
# type_attr: "T"
# is_ref: true
# }
# input_arg {
# name: "accum"
# type_attr: "T"
# is_ref: true
# }
# input_arg {
# name: "accum_update"
# type_attr: "T"
# is_ref: true
# }
# input_arg {
# name: "lr"
# type_attr: "T"
# }
# input_arg {
# name: "rho"
# type_attr: "T"
# }
# input_arg {
# name: "epsilon"
# type_attr: "T"
# }
# input_arg {
# name: "grad"
# type_attr: "T"
# }
# input_arg {
# name: "indices"
# type_attr: "Tindices"
# }
# output_arg {
# name: "out"
# type_attr: "T"
# is_ref: true
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "Tindices"
# type: "type"
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# attr {
# name: "use_locking"
# type: "bool"
# default_value {
# b: false
# }
# }
# }
# op {
# name: "SparseApplyAdagrad"
# input_arg {
# name: "var"
# type_attr: "T"
# is_ref: true
# }
# input_arg {
# name: "accum"
# type_attr: "T"
# is_ref: true
# }
# input_arg {
# name: "lr"
# type_attr: "T"
# }
# input_arg {
# name: "grad"
# type_attr: "T"
# }
# input_arg {
# name: "indices"
# type_attr: "Tindices"
# }
# output_arg {
# name: "out"
# type_attr: "T"
# is_ref: true
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "Tindices"
# type: "type"
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# attr {
# name: "use_locking"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "update_slots"
# type: "bool"
# default_value {
# b: true
# }
# }
# }
# op {
# name: "SparseApplyAdagradDA"
# input_arg {
# name: "var"
# type_attr: "T"
# is_ref: true
# }
# input_arg {
# name: "gradient_accumulator"
# type_attr: "T"
# is_ref: true
# }
# input_arg {
# name: "gradient_squared_accumulator"
# type_attr: "T"
# is_ref: true
# }
# input_arg {
# name: "grad"
# type_attr: "T"
# }
# input_arg {
# name: "indices"
# type_attr: "Tindices"
# }
# input_arg {
# name: "lr"
# type_attr: "T"
# }
# input_arg {
# name: "l1"
# type_attr: "T"
# }
# input_arg {
# name: "l2"
# type_attr: "T"
# }
# input_arg {
# name: "global_step"
# type: DT_INT64
# }
# output_arg {
# name: "out"
# type_attr: "T"
# is_ref: true
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "Tindices"
# type: "type"
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# attr {
# name: "use_locking"
# type: "bool"
# default_value {
# b: false
# }
# }
# }
# op {
# name: "SparseApplyCenteredRMSProp"
# input_arg {
# name: "var"
# type_attr: "T"
# is_ref: true
# }
# input_arg {
# name: "mg"
# type_attr: "T"
# is_ref: true
# }
# input_arg {
# name: "ms"
# type_attr: "T"
# is_ref: true
# }
# input_arg {
# name: "mom"
# type_attr: "T"
# is_ref: true
# }
# input_arg {
# name: "lr"
# type_attr: "T"
# }
# input_arg {
# name: "rho"
# type_attr: "T"
# }
# input_arg {
# name: "momentum"
# type_attr: "T"
# }
# input_arg {
# name: "epsilon"
# type_attr: "T"
# }
# input_arg {
# name: "grad"
# type_attr: "T"
# }
# input_arg {
# name: "indices"
# type_attr: "Tindices"
# }
# output_arg {
# name: "out"
# type_attr: "T"
# is_ref: true
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "Tindices"
# type: "type"
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# attr {
# name: "use_locking"
# type: "bool"
# default_value {
# b: false
# }
# }
# }
# op {
# name: "SparseApplyFtrl"
# input_arg {
# name: "var"
# type_attr: "T"
# is_ref: true
# }
# input_arg {
# name: "accum"
# type_attr: "T"
# is_ref: true
# }
# input_arg {
# name: "linear"
# type_attr: "T"
# is_ref: true
# }
# input_arg {
# name: "grad"
# type_attr: "T"
# }
# input_arg {
# name: "indices"
# type_attr: "Tindices"
# }
# input_arg {
# name: "lr"
# type_attr: "T"
# }
# input_arg {
# name: "l1"
# type_attr: "T"
# }
# input_arg {
# name: "l2"
# type_attr: "T"
# }
# input_arg {
# name: "lr_power"
# type_attr: "T"
# }
# output_arg {
# name: "out"
# type_attr: "T"
# is_ref: true
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "Tindices"
# type: "type"
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# attr {
# name: "use_locking"
# type: "bool"
# default_value {
# b: false
# }
# }
# }
# op {
# name: "SparseApplyFtrlV2"
# input_arg {
# name: "var"
# type_attr: "T"
# is_ref: true
# }
# input_arg {
# name: "accum"
# type_attr: "T"
# is_ref: true
# }
# input_arg {
# name: "linear"
# type_attr: "T"
# is_ref: true
# }
# input_arg {
# name: "grad"
# type_attr: "T"
# }
# input_arg {
# name: "indices"
# type_attr: "Tindices"
# }
# input_arg {
# name: "lr"
# type_attr: "T"
# }
# input_arg {
# name: "l1"
# type_attr: "T"
# }
# input_arg {
# name: "l2"
# type_attr: "T"
# }
# input_arg {
# name: "l2_shrinkage"
# type_attr: "T"
# }
# input_arg {
# name: "lr_power"
# type_attr: "T"
# }
# output_arg {
# name: "out"
# type_attr: "T"
# is_ref: true
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "Tindices"
# type: "type"
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# attr {
# name: "use_locking"
# type: "bool"
# default_value {
# b: false
# }
# }
# }
# op {
# name: "SparseApplyMomentum"
# input_arg {
# name: "var"
# type_attr: "T"
# is_ref: true
# }
# input_arg {
# name: "accum"
# type_attr: "T"
# is_ref: true
# }
# input_arg {
# name: "lr"
# type_attr: "T"
# }
# input_arg {
# name: "grad"
# type_attr: "T"
# }
# input_arg {
# name: "indices"
# type_attr: "Tindices"
# }
# input_arg {
# name: "momentum"
# type_attr: "T"
# }
# output_arg {
# name: "out"
# type_attr: "T"
# is_ref: true
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "Tindices"
# type: "type"
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# attr {
# name: "use_locking"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "use_nesterov"
# type: "bool"
# default_value {
# b: false
# }
# }
# }
# op {
# name: "SparseApplyProximalAdagrad"
# input_arg {
# name: "var"
# type_attr: "T"
# is_ref: true
# }
# input_arg {
# name: "accum"
# type_attr: "T"
# is_ref: true
# }
# input_arg {
# name: "lr"
# type_attr: "T"
# }
# input_arg {
# name: "l1"
# type_attr: "T"
# }
# input_arg {
# name: "l2"
# type_attr: "T"
# }
# input_arg {
# name: "grad"
# type_attr: "T"
# }
# input_arg {
# name: "indices"
# type_attr: "Tindices"
# }
# output_arg {
# name: "out"
# type_attr: "T"
# is_ref: true
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "Tindices"
# type: "type"
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# attr {
# name: "use_locking"
# type: "bool"
# default_value {
# b: false
# }
# }
# }
# op {
# name: "SparseApplyProximalGradientDescent"
# input_arg {
# name: "var"
# type_attr: "T"
# is_ref: true
# }
# input_arg {
# name: "alpha"
# type_attr: "T"
# }
# input_arg {
# name: "l1"
# type_attr: "T"
# }
# input_arg {
# name: "l2"
# type_attr: "T"
# }
# input_arg {
# name: "grad"
# type_attr: "T"
# }
# input_arg {
# name: "indices"
# type_attr: "Tindices"
# }
# output_arg {
# name: "out"
# type_attr: "T"
# is_ref: true
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "Tindices"
# type: "type"
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# attr {
# name: "use_locking"
# type: "bool"
# default_value {
# b: false
# }
# }
# }
# op {
# name: "SparseApplyRMSProp"
# input_arg {
# name: "var"
# type_attr: "T"
# is_ref: true
# }
# input_arg {
# name: "ms"
# type_attr: "T"
# is_ref: true
# }
# input_arg {
# name: "mom"
# type_attr: "T"
# is_ref: true
# }
# input_arg {
# name: "lr"
# type_attr: "T"
# }
# input_arg {
# name: "rho"
# type_attr: "T"
# }
# input_arg {
# name: "momentum"
# type_attr: "T"
# }
# input_arg {
# name: "epsilon"
# type_attr: "T"
# }
# input_arg {
# name: "grad"
# type_attr: "T"
# }
# input_arg {
# name: "indices"
# type_attr: "Tindices"
# }
# output_arg {
# name: "out"
# type_attr: "T"
# is_ref: true
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "Tindices"
# type: "type"
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# attr {
# name: "use_locking"
# type: "bool"
# default_value {
# b: false
# }
# }
# }
_op_def_lib = _InitOpDefLibrary(b"\n\304\001\n\013ApplyAdaMax\022\013\n\003var\"\001T\200\001\001\022\t\n\001m\"\001T\200\001\001\022\t\n\001v\"\001T\200\001\001\022\020\n\013beta1_power\"\001T\022\007\n\002lr\"\001T\022\n\n\005beta1\"\001T\022\n\n\005beta2\"\001T\022\014\n\007epsilon\"\001T\022\t\n\004grad\"\001T\032\013\n\003out\"\001T\200\001\001\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\027\n\013use_locking\022\004bool\032\002(\000\n\265\001\n\rApplyAdadelta\022\013\n\003var\"\001T\200\001\001\022\r\n\005accum\"\001T\200\001\001\022\024\n\014accum_update\"\001T\200\001\001\022\007\n\002lr\"\001T\022\010\n\003rho\"\001T\022\014\n\007epsilon\"\001T\022\t\n\004grad\"\001T\032\013\n\003out\"\001T\200\001\001\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\027\n\013use_locking\022\004bool\032\002(\000\n\240\001\n\014ApplyAdagrad\022\013\n\003var\"\001T\200\001\001\022\r\n\005accum\"\001T\200\001\001\022\007\n\002lr\"\001T\022\t\n\004grad\"\001T\032\013\n\003out\"\001T\200\001\001\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\027\n\013use_locking\022\004bool\032\002(\000\"\030\n\014update_slots\022\004bool\032\002(\001\n\340\001\n\016ApplyAdagradDA\022\013\n\003var\"\001T\200\001\001\022\034\n\024gradient_accumulator\"\001T\200\001\001\022$\n\034gradient_squared_accumulator\"\001T\200\001\001\022\t\n\004grad\"\001T\022\007\n\002lr\"\001T\022\007\n\002l1\"\001T\022\007\n\002l2\"\001T\022\017\n\013global_step\030\t\032\013\n\003out\"\001T\200\001\001\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\027\n\013use_locking\022\004bool\032\002(\000\n\356\001\n\tApplyAdam\022\013\n\003var\"\001T\200\001\001\022\t\n\001m\"\001T\200\001\001\022\t\n\001v\"\001T\200\001\001\022\020\n\013beta1_power\"\001T\022\020\n\013beta2_power\"\001T\022\007\n\002lr\"\001T\022\n\n\005beta1\"\001T\022\n\n\005beta2\"\001T\022\014\n\007epsilon\"\001T\022\t\n\004grad\"\001T\032\013\n\003out\"\001T\200\001\001\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\027\n\013use_locking\022\004bool\032\002(\000\"\030\n\014use_nesterov\022\004bool\032\002(\000\n\252\001\n\014ApplyAddSign\022\013\n\003var\"\001T\200\001\001\022\t\n\001m\"\001T\200\001\001\022\007\n\002lr\"\001T\022\n\n\005alpha\"\001T\022\017\n\nsign_decay\"\001T\022\t\n\004beta\"\001T\022\t\n\004grad\"\001T\032\013\n\003out\"\001T\200\001\001\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\027\n\013use_locking\022\004bool\032\002(\000\n\313\001\n\024ApplyCenteredRMSProp\022\013\n\003var\"\001T\200\001\001\022\n\n\002mg\"\001T\200\001\001\022\n\n\002ms\"\001T\200\001\001\022\013\n\003mom\"\001T\200\001\001\022\007\n\002lr\"\001T\022\010\n\003rho\"\001T\022\r\n\010momentum\"\001T\022\014\n\007epsilon\"\001T\022\t\n\004grad\"\001T\032\013\n\003out\"\001T\200\001\001\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\027\n\013use_locking\022\004bool\032\002(\000\n\264\001\n\tApplyFtrl\022\013\n\003var\"\001T\200\001\001\022\r\n\005accum\"\001T\200\001\001\022\016\n\006linear\"\001T\200\001\001\022\t\n\004grad\"\001T\022\007\n\002lr\"\001T\022\007\n\002l1\"\001T\022\007\n\002l2\"\001T\022\r\n\010lr_power\"\001T\032\013\n\003out\"\001T\200\001\001\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\027\n\013use_locking\022\004bool\032\002(\000\n\311\001\n\013ApplyFtrlV2\022\013\n\003var\"\001T\200\001\001\022\r\n\005accum\"\001T\200\001\001\022\016\n\006linear\"\001T\200\001\001\022\t\n\004grad\"\001T\022\007\n\002lr\"\001T\022\007\n\002l1\"\001T\022\007\n\002l2\"\001T\022\021\n\014l2_shrinkage\"\001T\022\r\n\010lr_power\"\001T\032\013\n\003out\"\001T\200\001\001\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\027\n\013use_locking\022\004bool\032\002(\000\n\203\001\n\024ApplyGradientDescent\022\013\n\003var\"\001T\200\001\001\022\n\n\005alpha\"\001T\022\n\n\005delta\"\001T\032\013\n\003out\"\001T\200\001\001\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\027\n\013use_locking\022\004bool\032\002(\000\n\260\001\n\rApplyMomentum\022\013\n\003var\"\001T\200\001\001\022\r\n\005accum\"\001T\200\001\001\022\007\n\002lr\"\001T\022\t\n\004grad\"\001T\022\r\n\010momentum\"\001T\032\013\n\003out\"\001T\200\001\001\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\027\n\013use_locking\022\004bool\032\002(\000\"\030\n\014use_nesterov\022\004bool\032\002(\000\n\256\001\n\016ApplyPowerSign\022\013\n\003var\"\001T\200\001\001\022\t\n\001m\"\001T\200\001\001\022\007\n\002lr\"\001T\022\014\n\007logbase\"\001T\022\017\n\nsign_decay\"\001T\022\t\n\004beta\"\001T\022\t\n\004grad\"\001T\032\013\n\003out\"\001T\200\001\001\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\027\n\013use_locking\022\004bool\032\002(\000\n\240\001\n\024ApplyProximalAdagrad\022\013\n\003var\"\001T\200\001\001\022\r\n\005accum\"\001T\200\001\001\022\007\n\002lr\"\001T\022\007\n\002l1\"\001T\022\007\n\002l2\"\001T\022\t\n\004grad\"\001T\032\013\n\003out\"\001T\200\001\001\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\027\n\013use_locking\022\004bool\032\002(\000\n\235\001\n\034ApplyProximalGradientDescent\022\013\n\003var\"\001T\200\001\001\022\n\n\005alpha\"\001T\022\007\n\002l1\"\001T\022\007\n\002l2\"\001T\022\n\n\005delta\"\001T\032\013\n\003out\"\001T\200\001\001\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\027\n\013use_locking\022\004bool\032\002(\000\n\267\001\n\014ApplyRMSProp\022\013\n\003var\"\001T\200\001\001\022\n\n\002ms\"\001T\200\001\001\022\013\n\003mom\"\001T\200\001\001\022\007\n\002lr\"\001T\022\010\n\003rho\"\001T\022\r\n\010momentum\"\001T\022\014\n\007epsilon\"\001T\022\t\n\004grad\"\001T\032\013\n\003out\"\001T\200\001\001\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\027\n\013use_locking\022\004bool\032\002(\000\n\266\001\n\023ResourceApplyAdaMax\022\007\n\003var\030\024\022\005\n\001m\030\024\022\005\n\001v\030\024\022\020\n\013beta1_power\"\001T\022\007\n\002lr\"\001T\022\n\n\005beta1\"\001T\022\n\n\005beta2\"\001T\022\014\n\007epsilon\"\001T\022\t\n\004grad\"\001T\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\027\n\013use_locking\022\004bool\032\002(\000\210\001\001\n\247\001\n\025ResourceApplyAdadelta\022\007\n\003var\030\024\022\t\n\005accum\030\024\022\020\n\014accum_update\030\024\022\007\n\002lr\"\001T\022\010\n\003rho\"\001T\022\014\n\007epsilon\"\001T\022\t\n\004grad\"\001T\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\027\n\013use_locking\022\004bool\032\002(\000\210\001\001\n\226\001\n\024ResourceApplyAdagrad\022\007\n\003var\030\024\022\t\n\005accum\030\024\022\007\n\002lr\"\001T\022\t\n\004grad\"\001T\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\027\n\013use_locking\022\004bool\032\002(\000\"\030\n\014update_slots\022\004bool\032\002(\001\210\001\001\n\322\001\n\026ResourceApplyAdagradDA\022\007\n\003var\030\024\022\030\n\024gradient_accumulator\030\024\022 \n\034gradient_squared_accumulator\030\024\022\t\n\004grad\"\001T\022\007\n\002lr\"\001T\022\007\n\002l1\"\001T\022\007\n\002l2\"\001T\022\017\n\013global_step\030\t\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\027\n\013use_locking\022\004bool\032\002(\000\210\001\001\n\340\001\n\021ResourceApplyAdam\022\007\n\003var\030\024\022\005\n\001m\030\024\022\005\n\001v\030\024\022\020\n\013beta1_power\"\001T\022\020\n\013beta2_power\"\001T\022\007\n\002lr\"\001T\022\n\n\005beta1\"\001T\022\n\n\005beta2\"\001T\022\014\n\007epsilon\"\001T\022\t\n\004grad\"\001T\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\027\n\013use_locking\022\004bool\032\002(\000\"\030\n\014use_nesterov\022\004bool\032\002(\000\210\001\001\n\333\001\n\034ResourceApplyAdamWithAmsgrad\022\007\n\003var\030\024\022\005\n\001m\030\024\022\005\n\001v\030\024\022\010\n\004vhat\030\024\022\020\n\013beta1_power\"\001T\022\020\n\013beta2_power\"\001T\022\007\n\002lr\"\001T\022\n\n\005beta1\"\001T\022\n\n\005beta2\"\001T\022\014\n\007epsilon\"\001T\022\t\n\004grad\"\001T\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\027\n\013use_locking\022\004bool\032\002(\000\210\001\001\n\240\001\n\024ResourceApplyAddSign\022\007\n\003var\030\024\022\005\n\001m\030\024\022\007\n\002lr\"\001T\022\n\n\005alpha\"\001T\022\017\n\nsign_decay\"\001T\022\t\n\004beta\"\001T\022\t\n\004grad\"\001T\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\027\n\013use_locking\022\004bool\032\002(\000\210\001\001\n\271\001\n\034ResourceApplyCenteredRMSProp\022\007\n\003var\030\024\022\006\n\002mg\030\024\022\006\n\002ms\030\024\022\007\n\003mom\030\024\022\007\n\002lr\"\001T\022\010\n\003rho\"\001T\022\r\n\010momentum\"\001T\022\014\n\007epsilon\"\001T\022\t\n\004grad\"\001T\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\027\n\013use_locking\022\004bool\032\002(\000\210\001\001\n\246\001\n\021ResourceApplyFtrl\022\007\n\003var\030\024\022\t\n\005accum\030\024\022\n\n\006linear\030\024\022\t\n\004grad\"\001T\022\007\n\002lr\"\001T\022\007\n\002l1\"\001T\022\007\n\002l2\"\001T\022\r\n\010lr_power\"\001T\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\027\n\013use_locking\022\004bool\032\002(\000\210\001\001\n\273\001\n\023ResourceApplyFtrlV2\022\007\n\003var\030\024\022\t\n\005accum\030\024\022\n\n\006linear\030\024\022\t\n\004grad\"\001T\022\007\n\002lr\"\001T\022\007\n\002l1\"\001T\022\007\n\002l2\"\001T\022\021\n\014l2_shrinkage\"\001T\022\r\n\010lr_power\"\001T\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\027\n\013use_locking\022\004bool\032\002(\000\210\001\001\n}\n\034ResourceApplyGradientDescent\022\007\n\003var\030\024\022\n\n\005alpha\"\001T\022\n\n\005delta\"\001T\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\027\n\013use_locking\022\004bool\032\002(\000\210\001\001\n\253\001\n\032ResourceApplyKerasMomentum\022\007\n\003var\030\024\022\t\n\005accum\030\024\022\007\n\002lr\"\001T\022\t\n\004grad\"\001T\022\r\n\010momentum\"\001T\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\027\n\013use_locking\022\004bool\032\002(\000\"\030\n\014use_nesterov\022\004bool\032\002(\000\210\001\001\n\246\001\n\025ResourceApplyMomentum\022\007\n\003var\030\024\022\t\n\005accum\030\024\022\007\n\002lr\"\001T\022\t\n\004grad\"\001T\022\r\n\010momentum\"\001T\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\027\n\013use_locking\022\004bool\032\002(\000\"\030\n\014use_nesterov\022\004bool\032\002(\000\210\001\001\n\244\001\n\026ResourceApplyPowerSign\022\007\n\003var\030\024\022\005\n\001m\030\024\022\007\n\002lr\"\001T\022\014\n\007logbase\"\001T\022\017\n\nsign_decay\"\001T\022\t\n\004beta\"\001T\022\t\n\004grad\"\001T\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\027\n\013use_locking\022\004bool\032\002(\000\210\001\001\n\226\001\n\034ResourceApplyProximalAdagrad\022\007\n\003var\030\024\022\t\n\005accum\030\024\022\007\n\002lr\"\001T\022\007\n\002l1\"\001T\022\007\n\002l2\"\001T\022\t\n\004grad\"\001T\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\027\n\013use_locking\022\004bool\032\002(\000\210\001\001\n\227\001\n$ResourceApplyProximalGradientDescent\022\007\n\003var\030\024\022\n\n\005alpha\"\001T\022\007\n\002l1\"\001T\022\007\n\002l2\"\001T\022\n\n\005delta\"\001T\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\027\n\013use_locking\022\004bool\032\002(\000\210\001\001\n\251\001\n\024ResourceApplyRMSProp\022\007\n\003var\030\024\022\006\n\002ms\030\024\022\007\n\003mom\030\024\022\007\n\002lr\"\001T\022\010\n\003rho\"\001T\022\r\n\010momentum\"\001T\022\014\n\007epsilon\"\001T\022\t\n\004grad\"\001T\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\027\n\013use_locking\022\004bool\032\002(\000\210\001\001\n\334\001\n\033ResourceSparseApplyAdadelta\022\007\n\003var\030\024\022\t\n\005accum\030\024\022\020\n\014accum_update\030\024\022\007\n\002lr\"\001T\022\010\n\003rho\"\001T\022\014\n\007epsilon\"\001T\022\t\n\004grad\"\001T\022\023\n\007indices\"\010Tindices\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\"\027\n\013use_locking\022\004bool\032\002(\000\210\001\001\n\313\001\n\032ResourceSparseApplyAdagrad\022\007\n\003var\030\024\022\t\n\005accum\030\024\022\007\n\002lr\"\001T\022\t\n\004grad\"\001T\022\023\n\007indices\"\010Tindices\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\"\027\n\013use_locking\022\004bool\032\002(\000\"\030\n\014update_slots\022\004bool\032\002(\001\210\001\001\n\207\002\n\034ResourceSparseApplyAdagradDA\022\007\n\003var\030\024\022\030\n\024gradient_accumulator\030\024\022 \n\034gradient_squared_accumulator\030\024\022\t\n\004grad\"\001T\022\023\n\007indices\"\010Tindices\022\007\n\002lr\"\001T\022\007\n\002l1\"\001T\022\007\n\002l2\"\001T\022\017\n\013global_step\030\t\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\"\027\n\013use_locking\022\004bool\032\002(\000\210\001\001\n\356\001\n\"ResourceSparseApplyCenteredRMSProp\022\007\n\003var\030\024\022\006\n\002mg\030\024\022\006\n\002ms\030\024\022\007\n\003mom\030\024\022\007\n\002lr\"\001T\022\010\n\003rho\"\001T\022\r\n\010momentum\"\001T\022\014\n\007epsilon\"\001T\022\t\n\004grad\"\001T\022\023\n\007indices\"\010Tindices\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\"\027\n\013use_locking\022\004bool\032\002(\000\210\001\001\n\333\001\n\027ResourceSparseApplyFtrl\022\007\n\003var\030\024\022\t\n\005accum\030\024\022\n\n\006linear\030\024\022\t\n\004grad\"\001T\022\023\n\007indices\"\010Tindices\022\007\n\002lr\"\001T\022\007\n\002l1\"\001T\022\007\n\002l2\"\001T\022\r\n\010lr_power\"\001T\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\"\027\n\013use_locking\022\004bool\032\002(\000\210\001\001\n\360\001\n\031ResourceSparseApplyFtrlV2\022\007\n\003var\030\024\022\t\n\005accum\030\024\022\n\n\006linear\030\024\022\t\n\004grad\"\001T\022\023\n\007indices\"\010Tindices\022\007\n\002lr\"\001T\022\007\n\002l1\"\001T\022\007\n\002l2\"\001T\022\021\n\014l2_shrinkage\"\001T\022\r\n\010lr_power\"\001T\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\"\027\n\013use_locking\022\004bool\032\002(\000\210\001\001\n\340\001\n ResourceSparseApplyKerasMomentum\022\007\n\003var\030\024\022\t\n\005accum\030\024\022\007\n\002lr\"\001T\022\t\n\004grad\"\001T\022\023\n\007indices\"\010Tindices\022\r\n\010momentum\"\001T\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\"\027\n\013use_locking\022\004bool\032\002(\000\"\030\n\014use_nesterov\022\004bool\032\002(\000\210\001\001\n\333\001\n\033ResourceSparseApplyMomentum\022\007\n\003var\030\024\022\t\n\005accum\030\024\022\007\n\002lr\"\001T\022\t\n\004grad\"\001T\022\023\n\007indices\"\010Tindices\022\r\n\010momentum\"\001T\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\"\027\n\013use_locking\022\004bool\032\002(\000\"\030\n\014use_nesterov\022\004bool\032\002(\000\210\001\001\n\313\001\n\"ResourceSparseApplyProximalAdagrad\022\007\n\003var\030\024\022\t\n\005accum\030\024\022\007\n\002lr\"\001T\022\007\n\002l1\"\001T\022\007\n\002l2\"\001T\022\t\n\004grad\"\001T\022\023\n\007indices\"\010Tindices\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\"\027\n\013use_locking\022\004bool\032\002(\000\210\001\001\n\313\001\n*ResourceSparseApplyProximalGradientDescent\022\007\n\003var\030\024\022\n\n\005alpha\"\001T\022\007\n\002l1\"\001T\022\007\n\002l2\"\001T\022\t\n\004grad\"\001T\022\023\n\007indices\"\010Tindices\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\"\027\n\013use_locking\022\004bool\032\002(\000\210\001\001\n\336\001\n\032ResourceSparseApplyRMSProp\022\007\n\003var\030\024\022\006\n\002ms\030\024\022\007\n\003mom\030\024\022\007\n\002lr\"\001T\022\010\n\003rho\"\001T\022\r\n\010momentum\"\001T\022\014\n\007epsilon\"\001T\022\t\n\004grad\"\001T\022\023\n\007indices\"\010Tindices\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\"\027\n\013use_locking\022\004bool\032\002(\000\210\001\001\n\352\001\n\023SparseApplyAdadelta\022\013\n\003var\"\001T\200\001\001\022\r\n\005accum\"\001T\200\001\001\022\024\n\014accum_update\"\001T\200\001\001\022\007\n\002lr\"\001T\022\010\n\003rho\"\001T\022\014\n\007epsilon\"\001T\022\t\n\004grad\"\001T\022\023\n\007indices\"\010Tindices\032\013\n\003out\"\001T\200\001\001\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\"\027\n\013use_locking\022\004bool\032\002(\000\n\325\001\n\022SparseApplyAdagrad\022\013\n\003var\"\001T\200\001\001\022\r\n\005accum\"\001T\200\001\001\022\007\n\002lr\"\001T\022\t\n\004grad\"\001T\022\023\n\007indices\"\010Tindices\032\013\n\003out\"\001T\200\001\001\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\"\027\n\013use_locking\022\004bool\032\002(\000\"\030\n\014update_slots\022\004bool\032\002(\001\n\225\002\n\024SparseApplyAdagradDA\022\013\n\003var\"\001T\200\001\001\022\034\n\024gradient_accumulator\"\001T\200\001\001\022$\n\034gradient_squared_accumulator\"\001T\200\001\001\022\t\n\004grad\"\001T\022\023\n\007indices\"\010Tindices\022\007\n\002lr\"\001T\022\007\n\002l1\"\001T\022\007\n\002l2\"\001T\022\017\n\013global_step\030\t\032\013\n\003out\"\001T\200\001\001\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\"\027\n\013use_locking\022\004bool\032\002(\000\n\200\002\n\032SparseApplyCenteredRMSProp\022\013\n\003var\"\001T\200\001\001\022\n\n\002mg\"\001T\200\001\001\022\n\n\002ms\"\001T\200\001\001\022\013\n\003mom\"\001T\200\001\001\022\007\n\002lr\"\001T\022\010\n\003rho\"\001T\022\r\n\010momentum\"\001T\022\014\n\007epsilon\"\001T\022\t\n\004grad\"\001T\022\023\n\007indices\"\010Tindices\032\013\n\003out\"\001T\200\001\001\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\"\027\n\013use_locking\022\004bool\032\002(\000\n\351\001\n\017SparseApplyFtrl\022\013\n\003var\"\001T\200\001\001\022\r\n\005accum\"\001T\200\001\001\022\016\n\006linear\"\001T\200\001\001\022\t\n\004grad\"\001T\022\023\n\007indices\"\010Tindices\022\007\n\002lr\"\001T\022\007\n\002l1\"\001T\022\007\n\002l2\"\001T\022\r\n\010lr_power\"\001T\032\013\n\003out\"\001T\200\001\001\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\"\027\n\013use_locking\022\004bool\032\002(\000\n\376\001\n\021SparseApplyFtrlV2\022\013\n\003var\"\001T\200\001\001\022\r\n\005accum\"\001T\200\001\001\022\016\n\006linear\"\001T\200\001\001\022\t\n\004grad\"\001T\022\023\n\007indices\"\010Tindices\022\007\n\002lr\"\001T\022\007\n\002l1\"\001T\022\007\n\002l2\"\001T\022\021\n\014l2_shrinkage\"\001T\022\r\n\010lr_power\"\001T\032\013\n\003out\"\001T\200\001\001\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\"\027\n\013use_locking\022\004bool\032\002(\000\n\345\001\n\023SparseApplyMomentum\022\013\n\003var\"\001T\200\001\001\022\r\n\005accum\"\001T\200\001\001\022\007\n\002lr\"\001T\022\t\n\004grad\"\001T\022\023\n\007indices\"\010Tindices\022\r\n\010momentum\"\001T\032\013\n\003out\"\001T\200\001\001\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\"\027\n\013use_locking\022\004bool\032\002(\000\"\030\n\014use_nesterov\022\004bool\032\002(\000\n\325\001\n\032SparseApplyProximalAdagrad\022\013\n\003var\"\001T\200\001\001\022\r\n\005accum\"\001T\200\001\001\022\007\n\002lr\"\001T\022\007\n\002l1\"\001T\022\007\n\002l2\"\001T\022\t\n\004grad\"\001T\022\023\n\007indices\"\010Tindices\032\013\n\003out\"\001T\200\001\001\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\"\027\n\013use_locking\022\004bool\032\002(\000\n\321\001\n\"SparseApplyProximalGradientDescent\022\013\n\003var\"\001T\200\001\001\022\n\n\005alpha\"\001T\022\007\n\002l1\"\001T\022\007\n\002l2\"\001T\022\t\n\004grad\"\001T\022\023\n\007indices\"\010Tindices\032\013\n\003out\"\001T\200\001\001\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\"\027\n\013use_locking\022\004bool\032\002(\000\n\354\001\n\022SparseApplyRMSProp\022\013\n\003var\"\001T\200\001\001\022\n\n\002ms\"\001T\200\001\001\022\013\n\003mom\"\001T\200\001\001\022\007\n\002lr\"\001T\022\010\n\003rho\"\001T\022\r\n\010momentum\"\001T\022\014\n\007epsilon\"\001T\022\t\n\004grad\"\001T\022\023\n\007indices\"\010Tindices\032\013\n\003out\"\001T\200\001\001\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\"\027\n\013use_locking\022\004bool\032\002(\000")
| 36.830179 | 23,622 | 0.609195 |
16355f52eea0add171343a06f37239cbd62c3878 | 7,892 | py | Python | external/eospy/eospy/keys.py | unification-com/haiku-node-prototype | ea77aa90f6b3f08d004be1c24e6b8d62e83bc66b | [
"MIT"
] | 3 | 2018-06-15T18:02:05.000Z | 2018-07-06T02:32:18.000Z | external/eospy/eospy/keys.py | unification-com/haiku-node-prototype | ea77aa90f6b3f08d004be1c24e6b8d62e83bc66b | [
"MIT"
] | 4 | 2018-08-17T06:51:34.000Z | 2018-08-17T08:39:24.000Z | external/eospy/eospy/keys.py | unification-com/haiku-node-prototype | ea77aa90f6b3f08d004be1c24e6b8d62e83bc66b | [
"MIT"
] | null | null | null | import base58
import os
import ecdsa
import re
from binascii import hexlify, unhexlify
from .utils import sha256, ripemd160, str_to_hex, hex_to_int
import hashlib
import time
import struct
def check_wif(key) :
if isinstance(key, str) :
try :
EOSKey(key)
return True
except Exception as ex:
pass
return False
class EOSKey :
def __init__(self, private_str='') :
''' '''
if private_str :
private_key, format, key_type = self._parse_key(private_str)
self._sk = ecdsa.SigningKey.from_string(unhexlify(private_key), curve=ecdsa.SECP256k1)
else :
prng = self._create_entropy()
self._sk = ecdsa.SigningKey.generate(curve=ecdsa.SECP256k1, entropy=prng)
self._vk = self._sk.get_verifying_key()
def __str__(self) :
return self.to_public()
def _parse_key(self, private_str) :
''' '''
match = re.search('^PVT_([A-Za-z0-9]+)_([A-Za-z0-9]+)$', private_str)
if not match :
# legacy WIF - format
version_key = self._check_decode(private_str, 'sha256x2')
# ensure first 2 chars == 0x80
version = int(version_key[0:2],16)
if not version == 0x80 :
raise ValueError('Expected version 0x80, instead got {0}', version)
private_key = version_key[2:]
key_type = 'K1'
format = 'WIF'
else :
key_type, key_string = match.groups()
private_key = self._check_decode(key_string, key_type)
format = 'PVT'
return (private_key, format, key_type)
def _create_entropy(self) :
''' '''
ba = bytearray(os.urandom(32))
seed = sha256(ba)
return ecdsa.util.PRNG(seed)
def _check_encode(self, key_buffer, key_type=None) :
''' '''
if isinstance(key_buffer, bytes) :
key_buffer = key_buffer.decode()
check = key_buffer
if key_type == 'sha256x2' :
first_sha = sha256(unhexlify(check))
chksum = sha256(unhexlify(first_sha))[:8]
else :
if key_type :
check += hexlify(bytearray(key_type,'utf-8')).decode()
chksum = ripemd160(unhexlify(check))[:8]
return base58.b58encode(unhexlify(key_buffer+chksum))
def _check_decode(self, key_string, key_type=None) :
''' '''
buffer = hexlify(base58.b58decode(key_string)).decode()
chksum = buffer[-8:]
key = buffer[:-8]
if key_type == 'sha256x2' :
# legacy
first_sha = sha256(unhexlify(key))
newChk = sha256(unhexlify(first_sha))[:8]
else :
check = key
if key_type :
check += hexlify(bytearray(key_type, 'utf-8')).decode()
newChk = ripemd160(unhexlify(check))[:8]
#print('newChk: '+newChk)
if chksum != newChk :
raise ValueError('checksums do not match: {0} != {1}'.format(chksum, newChk))
return key
def _recover_key(self, digest, signature, i) :
''' Recover the public key from the sig
http://www.secg.org/sec1-v2.pdf
'''
curve = ecdsa.SECP256k1.curve
G = ecdsa.SECP256k1.generator
order = ecdsa.SECP256k1.order
yp = (i %2)
r, s = ecdsa.util.sigdecode_string(signature, order)
x = r + (i // 2 ) * order
alpha = ((x * x * x) + (curve.a() * x) + curve.b()) % curve.p()
beta = ecdsa.numbertheory.square_root_mod_prime(alpha, curve.p())
y = beta if (beta - yp) % 2 == 0 else curve.p() - beta
# generate R
R = ecdsa.ellipticcurve.Point(curve, x, y, order)
e = ecdsa.util.string_to_number(digest)
# compute Q
Q = ecdsa.numbertheory.inverse_mod(r, order) * (s * R + (-e % order) * G)
# verify message
if not ecdsa.VerifyingKey.from_public_point(Q, curve=ecdsa.SECP256k1).verify_digest(signature, digest,
sigdecode=ecdsa.util.sigdecode_string) :
return None
return ecdsa.VerifyingKey.from_public_point(Q, curve=ecdsa.SECP256k1)
def _recovery_pubkey_param(self, digest, signature) :
''' Use to derive a number that will allow for the easy recovery
of the public key from the signature
'''
for i in range(0,4) :
p = self._recover_key(digest, signature, i)
if (p.to_string() == self._vk.to_string() ) :
return i
def _compress_pubkey(self) :
''' '''
order = self._sk.curve.generator.order()
p = self._vk.pubkey.point
x_str = ecdsa.util.number_to_string(p.x(), order)
hex_data = bytearray(chr(2 + (p.y() & 1)), 'utf-8')
compressed = hexlify(hex_data + x_str).decode()
return compressed
def to_public(self) :
''' '''
cmp = self._compress_pubkey()
return 'EOS' + self._check_encode(cmp).decode()
def to_wif(self) :
''' '''
pri_key = '80' + hexlify(self._sk.to_string()).decode()
return self._check_encode(pri_key, 'sha256x2').decode()
def sign(self, digest) :
''' '''
cnt = 0
# convert digest to hex string
digest = unhexlify(digest)
while 1 :
cnt +=1
if not cnt % 10 :
print('Still searching for a signature. Tried {} times.'.format(cnt))
# get deterministic k
k = ecdsa.rfc6979.generate_k( self._sk.curve.generator.order(),
self._sk.privkey.secret_multiplier,
hashlib.sha256,
bytearray(sha256(digest + struct.pack('d', time.time())), 'utf-8') # use time to randomize
)
# sign the message
sigder = self._sk.sign_digest(digest, sigencode=ecdsa.util.sigencode_der, k=k)
# reformat sig
r, s = ecdsa.util.sigdecode_der(sigder, self._sk.curve.generator.order())
sig = ecdsa.util.sigencode_string(r, s, self._sk.curve.generator.order())
# ensure signature is canonical
if isinstance(sigder[5],int) :
lenR = sigder[3]
else :
lenR = str_to_hex(sigder[3])
if isinstance(sigder[5 + lenR], int) :
lenS = sigder[5 + lenR]
else :
lenS = str_to_hex(sigder[5 + lenR])
if lenR is 32 and lenS is 32 :
# derive recover parameter
i = self._recovery_pubkey_param(digest, sig)
# compressed
i += 4
# compact
i += 27
break
# pack
sigstr = struct.pack('<B', i) + sig
# encode
return 'SIG_K1_' + self._check_encode(hexlify(sigstr), 'K1').decode()
def verify(self, encoded_sig, digest) :
''' '''
# remove SIG_ prefix
encoded_sig = encoded_sig[4:]
# remove curve prefix
curvePre = encoded_sig[:3].strip('_')
if curvePre != 'K1' :
raise TypeError('Unsupported curve prefix {}'.format(curvePre))
decoded_sig = self._check_decode(encoded_sig[3:], curvePre)
# first 2 bytes are recover param
recover_param = hex_to_int(decoded_sig[:2]) - 4 - 27
# use sig
sig = decoded_sig[2:]
# verify sig
p = self._recover_key(unhexlify(digest), unhexlify(sig), recover_param)
return p.verify_digest(unhexlify(sig), unhexlify(digest), sigdecode=ecdsa.util.sigdecode_string)
| 38.125604 | 132 | 0.542448 |
2dc69b5f4275014944d2c32708689e9b9af13f42 | 103 | py | Python | Chapter03/forking_process.py | ibiscum/Learning-Concurrency-in-Python | d3f0320ad2a80c46b37de331bf335b80df0d3ed9 | [
"MIT"
] | null | null | null | Chapter03/forking_process.py | ibiscum/Learning-Concurrency-in-Python | d3f0320ad2a80c46b37de331bf335b80df0d3ed9 | [
"MIT"
] | null | null | null | Chapter03/forking_process.py | ibiscum/Learning-Concurrency-in-Python | d3f0320ad2a80c46b37de331bf335b80df0d3ed9 | [
"MIT"
] | null | null | null | import time
import multiprocess
def my_process():
print("My Process Starting")
time.sleep(5)
| 12.875 | 32 | 0.708738 |
2b23102b80e801618580ecb628d9431c2efc066b | 15,018 | py | Python | examples/stencil_grid/stencil_kernel.py | zanellia/ctree | d53d29d972ac06b60d8fc4fb81f5553d2ffbfda0 | [
"BSD-2-Clause"
] | 16 | 2015-01-11T21:10:26.000Z | 2021-09-28T11:39:56.000Z | examples/stencil_grid/stencil_kernel.py | zanellia/ctree | d53d29d972ac06b60d8fc4fb81f5553d2ffbfda0 | [
"BSD-2-Clause"
] | 12 | 2015-01-01T01:55:49.000Z | 2019-06-23T16:41:12.000Z | examples/stencil_grid/stencil_kernel.py | mbdriscoll/ctree | e15538ecdf3aaa3bb2f210701d29334fd5e4ec40 | [
"BSD-2-Clause"
] | 4 | 2015-07-23T20:48:36.000Z | 2021-08-02T12:17:37.000Z | """
This version was taken from the stencil_specializer project and has all asp
stuff removed in order to work on a direct c-tree llvm implementation
The main driver, intercepts the kernel() call and invokes the other components.
Stencil kernel classes are subclassed from the StencilKernel class
defined here. At initialization time, the text of the kernel() method
is parsed into a Python AST, then converted into a StencilModel by
stencil_python_front_end. The kernel() function is replaced by
shadow_kernel(), which intercepts future calls to kernel().
During each call to kernel(), stencil_unroll_neighbor_iter is called
to unroll neighbor loops, stencil_convert is invoked to convert the
model to C++, and an external compiler tool is invoked to generate a
binary which then efficiently completes executing the call. The binary
is cached for future calls.
"""
import numpy
import math
import inspect
import ast
# from examples.stencil_grid.stencil_python_front_end import *
# from examples.stencil_grid.stencil_unroll_neighbor_iter import *
# from examples.stencil_grid.stencil_optimize_cpp import *
# from examples.stencil_grid.stencil_convert import *
from copy import deepcopy
import logging
logging.basicConfig(level=20)
from ctree.transformations import PyBasicConversions
from ctree.jit import LazySpecializedFunction
from ctree.c.types import *
from ctree.frontend import get_ast
from ctree.visitors import NodeTransformer, NodeVisitor
from ctree.c.nodes import *
from ctree.omp.nodes import *
# logging.basicConfig(filename='tmp.txt',
# filemode='w',
# format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',
# datefmt='%H:%M:%S',
# level=20)
class StencilConvert(LazySpecializedFunction):
def __init__(self, func, entry_point, input_grids, output_grid, constants):
self.input_grids = input_grids
self.output_grid = output_grid
self.constants = constants
super(StencilConvert, self).__init__(get_ast(func))
def args_to_subconfig(self, args):
conf = ()
for arg in args:
conf += ((len(arg), arg.dtype, arg.ndim, arg.shape),)
return conf
def get_tuning_driver(self):
from ctree.tune import (
BruteForceTuningDriver,
IntegerParameter,
MinimizeTime
)
params = [IntegerParameter("block_factor", 4, 8),
IntegerParameter("unroll_factor", 1, 4)]
return BruteForceTuningDriver(params, MinimizeTime())
def transform(self, tree, program_config):
"""Convert the Python AST to a C AST."""
param_types = []
for arg in program_config[0]:
param_types.append(NdPointer(arg[1], arg[2], arg[3]))
kernel_sig = FuncType(Void(), param_types)
tune_cfg = program_config[1]
# block_factor = 2**tune_cfg['block_factor']
unroll_factor = 2**tune_cfg['unroll_factor']
for transformer in [StencilTransformer(self.input_grids,
self.output_grid,
self.constants
),
PyBasicConversions()]:
tree = transformer.visit(tree)
first_For = tree.find(For)
inner_For = FindInnerMostLoop().find(first_For)
# self.block(inner_For, first_For, block_factor)
self.unroll(inner_For, unroll_factor)
# remove self param
# TODO: Better way to do this?
params = tree.find(FunctionDecl, name="kernel").params
params.pop(0)
self.gen_array_macro_definition(tree, params)
entry_point = tree.find(FunctionDecl, name="kernel")
entry_point.set_typesig(kernel_sig)
return tree, entry_point.get_type().as_ctype()
def gen_array_macro_definition(self, tree, arg_names):
first_for = tree.find(For)
for index, arg in enumerate(self.input_grids + (self.output_grid,)):
defname = "_%s_array_macro" % arg_names[index]
params = ','.join(["_d"+str(x) for x in range(arg.dim)])
params = "(%s)" % params
calc = "((_d%d)" % (arg.dim - 1)
for x in range(arg.dim - 1):
dim = str(int(arg.data.strides[x]/arg.data.itemsize))
calc += "+((_d%s) * %s)" % (str(x), dim)
calc += ")"
first_for.insert_before(Define(defname+params, calc))
def unroll(self, for_node, factor):
# Determine the leftover iterations after unrolling
initial = for_node.init.right.value
end = for_node.test.right.value
leftover_begin = int((end - initial + 1) / factor) * factor + initial
new_end = leftover_begin - 1
new_incr = AddAssign(SymbolRef(for_node.incr.arg.name), factor)
new_body = for_node.body[:]
for x in range(1, factor):
new_extension = deepcopy(for_node.body)
new_extension = map(UnrollReplacer(for_node.init.left.name,
x).visit, new_extension)
new_body.extend(new_extension)
leftover_For = For(Assign(for_node.init.left,
Constant(leftover_begin)),
for_node.test,
for_node.incr,
for_node.body)
for_node.test = LtE(for_node.init.left.name, new_end)
for_node.incr = new_incr
for_node.body = new_body
if not leftover_begin >= end:
for_node.body.append(leftover_For)
#def block(self, tree, factor):
class FindInnerMostLoop(NodeVisitor):
def __init__(self):
self.inner_most = None
def find(self, node):
self.visit(node)
return self.inner_most
def visit_For(self, node):
self.inner_most = node
map(self.visit, node.body)
class UnrollReplacer(NodeTransformer):
def __init__(self, loopvar, incr):
self.loopvar = loopvar
self.incr = incr
self.in_new_scope = False
self.inside_for = False
super(UnrollReplacer, self).__init__()
def visit_SymbolRef(self, node):
if node.name == self.loopvar:
return Add(node, Constant(self.incr))
return SymbolRef(node.name)
class StencilTransformer(NodeTransformer):
def __init__(self, input_grids, output_grid, constants):
# TODO: Give these wrapper classes?
self.input_grids = input_grids
self.output_grid = output_grid
self.ghost_depth = output_grid.ghost_depth
self.next_fresh_var = 0
self.output_index = None
self.neighbor_grid_name = None
self.kernel_target = None
self.offset_list = None
self.var_list = []
self.input_dict = {}
self.constants = constants
super(StencilTransformer, self).__init__()
def visit_FunctionDef(self, node):
for index, arg in enumerate(node.args.args[1:]):
# PYTHON3 vs PYTHON2
if hasattr(arg, 'arg'):
arg = arg.arg
else:
arg = arg.id
if index < len(self.input_grids):
self.input_dict[arg] = self.input_grids[index]
else:
self.output_grid_name = arg
node.body = list(map(self.visit, node.body))
return node
def gen_fresh_var(self):
self.next_fresh_var += 1
return "x%d" % self.next_fresh_var
def visit_For(self, node):
if type(node.iter) is ast.Call and \
type(node.iter.func) is ast.Attribute:
if node.iter.func.attr is 'interior_points':
dim = len(self.output_grid.shape)
self.kernel_target = node.target.id
curr_node = None
ret_node = None
for d in range(dim):
target = SymbolRef(self.gen_fresh_var())
self.var_list.append(target.name)
for_loop = For(
Assign(SymbolRef(target.name, Int()),
Constant(self.ghost_depth)),
LtE(target,
Constant(
self.output_grid.shape[d] -
self.ghost_depth - 1)
),
PostInc(target),
[])
if d == 0:
ret_node = for_loop
else:
curr_node.body = [for_loop]
if d == dim - 2:
curr_node.body.insert(0, OmpParallelFor())
elif d == dim - 1:
curr_node.body.insert(0, OmpIvDep())
curr_node = for_loop
self.output_index = self.gen_fresh_var()
pt = [SymbolRef(x) for x in self.var_list]
macro = self.gen_array_macro(self.output_grid_name, pt)
curr_node.body = [Assign(SymbolRef(self.output_index, Int()),
macro)]
for elem in map(self.visit, node.body):
if type(elem) == list:
curr_node.body.extend(elem)
else:
curr_node.body.append(elem)
self.kernel_target = None
return ret_node
elif node.iter.func.attr is 'neighbors':
neighbors_id = node.iter.args[1].n
grid_name = node.iter.func.value.id
grid = self.input_dict[grid_name]
zero_point = tuple([0 for x in range(grid.dim)])
self.neighbor_target = node.target.id
self.neighbor_grid_name = grid_name
body = []
statement = node.body[0]
for x in grid.neighbors(zero_point, neighbors_id):
self.offset_list = list(x)
for statement in node.body:
body.append(self.visit(deepcopy(statement)))
self.neighbor_target = None
return body
return node
# Handle array references
def visit_Subscript(self, node):
grid_name = node.value.id
target = node.slice.value
if isinstance(target, ast.Name):
target = target.id
if target == self.kernel_target:
if grid_name is self.output_grid_name:
return ArrayRef(SymbolRef(self.output_grid_name),
SymbolRef(self.output_index))
elif grid_name in self.input_dict:
grid = self.input_dict[grid_name]
pt = list(map(lambda x: SymbolRef(x), self.var_list))
index = self.gen_array_macro(grid_name, pt)
return ArrayRef(SymbolRef(grid_name), index)
elif grid_name == self.neighbor_grid_name:
pt = list(map(lambda x, y: Add(SymbolRef(x), SymbolRef(y)),
self.var_list, self.offset_list))
index = self.gen_array_macro(grid_name, pt)
return ArrayRef(SymbolRef(grid_name), index)
elif isinstance(target, ast.Call):
return ArrayRef(SymbolRef(grid_name), self.visit(target))
return node
def visit_Call(self, node):
if node.func.id == 'distance':
zero_point = tuple([0 for _ in range(len(self.offset_list))])
return Constant(int(self.distance(zero_point, self.offset_list)))
elif node.func.id == 'int':
return Cast(Int(), self.visit(node.args[0]))
node.args = list(map(self.visit, node.args))
return node
def distance(self, x, y):
return math.sqrt(sum([(x[i]-y[i])**2 for i in range(0, len(x))]))
def gen_array_macro(self, arg, point):
name = "_%s_array_macro" % arg
return FunctionCall(SymbolRef(name), point)
def visit_AugAssign(self, node):
# TODO: Handle all types?
value = self.visit(node.value)
# HACK to get this to work, PyBasicConversions will skip this AugAssign node
# TODO Figure out why
value = PyBasicConversions().visit(value)
if type(node.op) is ast.Add:
return AddAssign(self.visit(node.target), value)
if type(node.op) is ast.Sub:
return SubAssign(self.visit(node.target), value)
def visit_Assign(self, node):
target = PyBasicConversions().visit(self.visit(node.targets[0]))
value = PyBasicConversions().visit(self.visit(node.value))
return Assign(target, value)
def visit_Name(self, node):
if node.id in self.constants.keys():
return Constant(self.constants[node.id])
return node
# may want to make this inherit from something else...
class StencilKernel(object):
def __init__(self, with_cilk=False):
# we want to raise an exception if there is no kernel()
# method defined.
try:
dir(self).index("kernel")
except ValueError:
raise Exception("No kernel method defined.")
# get text of kernel() method and parse into a StencilModel
# self.kernel_src = inspect.getsource(self.kernel)
# print(self.kernel_src)
# self.kernel_ast = ast.parse(self.remove_indentation(self.kernel_src))
# print(ast.dump(self.kernel_ast, include_attributes=True))
# self.model = StencilPythonFrontEnd().parse(self.kernel_ast)
# print(ast.dump(self.model, include_attributes=True))
self.model = self.kernel
# print(self.new_kernel)
self.pure_python = False
self.pure_python_kernel = self.kernel
self.should_unroll = True
self.should_cacheblock = False
self.block_size = 1
# replace kernel with shadow version
self.kernel = self.shadow_kernel
self.specialized_sizes = None
self.with_cilk = with_cilk
self.constants = {}
def shadow_kernel(self, *args):
if self.pure_python:
return self.pure_python_kernel(*args)
if not self.specialized_sizes or\
self.specialized_sizes != [y.shape for y in args]:
self.specialized = StencilConvert(
self.model, "kernel", args[0:-1], args[-1], self.constants)
self.specialized_sizes = [arg.shape for arg in args]
with Timer() as t:
self.specialized(*[arg.data for arg in args])
self.specialized.report(time=t)
import time
class Timer:
def __enter__(self):
self.start = time.clock()
return self
def __exit__(self, *args):
self.end = time.clock()
self.interval = self.end - self.start
| 38.409207 | 96 | 0.583233 |
cc16e36d108c0b762fb5424a885b8ba7348ef602 | 6,647 | py | Python | monet/graph.py | stjordanis/MONeT-1 | 98a5c7d149ca19c8c64069dbd8f27ce7f97bf3af | [
"MIT"
] | 161 | 2020-10-28T02:21:50.000Z | 2022-03-11T05:06:16.000Z | monet/graph.py | kiminh/MONeT | 83302c12e8fd3d1c8b2496928c843f0e84226cc8 | [
"MIT"
] | 4 | 2020-10-28T02:27:43.000Z | 2021-03-31T00:04:43.000Z | monet/graph.py | kiminh/MONeT | 83302c12e8fd3d1c8b2496928c843f0e84226cc8 | [
"MIT"
] | 15 | 2020-10-28T02:32:12.000Z | 2021-12-23T13:20:23.000Z | import torch
from functools import lru_cache
class Node:
def __init__(self, shape):
self.shape = shape
class Input(Node):
def __repr__(self):
return '<Input %s>' % str(list(self.shape))
class Param(Node):
def __repr__(self):
return '<Param %s>' % str(list(self.shape))
class ComputeNode(Node):
class Arg:
pass
class V(Arg):
def __init__(self, v):
self.value = v
def __repr__(self):
return '<V %s>' % str(self.value)
class D(Arg):
def __init__(self, index, requires_grad=False):
self.index = index
self.requires_grad = requires_grad
def __repr__(self):
return '<D %d %d>' % (self.index, self.requires_grad)
def __init__(self, shape, nodeid, op, args, has_backward, is_depthwise=False):
super().__init__(shape)
self._op = op
self._args = args
self.id = nodeid
self._has_backward = has_backward
self._is_depthwise = is_depthwise
@property
@lru_cache(maxsize=512)
def op(self):
return self._op
@property
def args(self):
return self._args
@property
def has_backward(self):
return self._has_backward
@property
@lru_cache(maxsize=512)
def is_depthwise(self):
return self._is_depthwise
@property
@lru_cache(maxsize=128)
def dependencies(self):
return [(a.index, a.requires_grad) for a in self._args if isinstance(a, self.D)]
def __repr__(self):
return '<Op %s %s>' % (str(self._op), str(list(self.shape)))
class Graph:
def __init__(self):
self._nodes = []
self._outputs = []
def _add_node(self, node):
self._nodes.append(node)
return len(self._nodes)-1
def _add_input(self, shape):
return self._add_node(Input(shape))
def _add_param(self, shape):
return self._add_node(Param(shape))
def _add_op(self, shape, op, args, has_backward=False, is_depthwise=False):
nodeid = len(self._nodes)
return self._add_node(ComputeNode(shape, nodeid, op, args, has_backward, is_depthwise))
def _add_output(self, output_id):
self._outputs.append(output_id)
@property
def nodes(self):
return self._nodes
@classmethod
def create(cls, model, input_shape=(3, 224, 224)):
# create a graph of the forward pass
# JIT trace the model
args = (torch.ones((23,) + input_shape),)
graph, torch_out = torch.jit._get_trace_graph(model, args, _force_outplace=False, _return_inputs_states=False)
torch._C._jit_pass_constant_propagation(graph)
torch._C._jit_pass_inline(graph)
torch._C._jit_pass_dce(graph)
torch._C._jit_pass_lint(graph)
params = torch.jit._unique_state_dict(model)
assert len(list(graph.inputs())) == len(args) + len(params)
node_id = {}
r = cls()
arg_and_param_shape = [list(a.shape) for a in args] + [list(p.shape) for p in params.values()]
for k, i in enumerate(graph.inputs()):
if k < len(args):
node_id[i.unique()] = r._add_input([-1]+arg_and_param_shape[k][1:])
else:
node_id[i.unique()] = r._add_param(arg_and_param_shape[k])
const = {}
# Track connected nodes in the graph
track = set()
track.add("input.1")
for node in graph.nodes():
if node.kind()!="aten::size":
for ip in node.inputs():
if ip.debugName() in track or "input" in ip.debugName():
track.add(node.output().debugName())
if "input" in node.output().debugName():
track.add(node.output().debugName())
list_contents = {}
for n in graph.nodes():
assert n.kind() != 'prim::GetAttr'
if n.kind() == 'prim::Constant':
const[n.output().unique()] = n['value'] if n.hasAttribute('value') else None
elif len(n.kind()) > 6 and n.kind()[:6] == 'aten::':
args = []
for i in n.inputs():
iu = i.unique()
if iu in list_contents:
iu_list = list_contents[iu]
else:
iu_list = [iu]
for iu in iu_list:
if iu in const:
args.append(ComputeNode.V(const[iu]))
elif iu in node_id:
if i.debugName() not in track and (not isinstance(r._nodes[node_id[iu]], Input)) and (not isinstance(r._nodes[node_id[iu]], Param)): # Doing this for addmm and transpose
for ii in i.node().inputs():
iiu = ii.unique()
assert (isinstance(r._nodes[node_id[iiu]], Input) or isinstance(r._nodes[node_id[iiu]], Param)) == True
args.append(ComputeNode.D(node_id[iiu], ii.requires_grad()))
else:
args.append(ComputeNode.D(node_id[iu], i.requires_grad()))
else:
raise ValueError('Nodes %s disconnected' % repr(i))
has_backward = False
if n.output().debugName() in track:
has_backward = True
# Identify depthwise conv
is_depthwise = False
if n.kind() == "aten::_convolution":
assert isinstance(args[8], ComputeNode.V)
if args[8].value > 1 and args[8].value == r.nodes[args[0].index].shape[1]:
is_depthwise = True
# Add node to graph
node_id[n.output().unique()] = r._add_op([s if s != 23 else -1 for s in n.output().type().sizes()],
n.kind(), args, has_backward, is_depthwise)
elif n.kind() in ['prim::ListConstruct', 'prim::TupleConstruct']:
list_contents[n.output().unique()] = [i.unique() for i in n.inputs()]
else:
print('Unknown OP', n.kind(), n)
# Identify outputs
for op in graph.outputs():
if op.node().kind()[:6] == 'aten::':
r._add_output(node_id[op.unique()])
elif op.node().kind() == 'prim::TupleConstruct':
for i in op.node().inputs():
r._add_output(node_id[i.unique()])
return r
| 35.92973 | 197 | 0.531368 |
eedf083efa2a3dfc8c3bd3e271315707a901d0bb | 8,641 | py | Python | text2array/iterators.py | kmkurn/text2array | 910c349fcbd85299b137c132f218a26860211a6b | [
"Apache-2.0"
] | null | null | null | text2array/iterators.py | kmkurn/text2array | 910c349fcbd85299b137c132f218a26860211a6b | [
"Apache-2.0"
] | null | null | null | text2array/iterators.py | kmkurn/text2array | 910c349fcbd85299b137c132f218a26860211a6b | [
"Apache-2.0"
] | 1 | 2021-02-27T08:53:59.000Z | 2021-02-27T08:53:59.000Z | # Copyright 2019 Kemal Kurniawan
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import defaultdict
from random import Random
from typing import Any, Callable, Iterable, Iterator, Optional, Sequence, Sized
import statistics as stat
import warnings
from . import Batch, Sample
class BatchIterator(Iterable[Batch], Sized):
"""Iterator that produces batches of samples.
Example:
>>> from text2array import BatchIterator
>>> samples = [
... {'ws': ['a']},
... {'ws': ['a', 'b']},
... {'ws': ['b', 'b']},
... ]
>>> iter_ = BatchIterator(samples, batch_size=2)
>>> for b in iter_:
... print(list(b))
...
[{'ws': ['a']}, {'ws': ['a', 'b']}]
[{'ws': ['b', 'b']}]
Args:
samples (~typing.Iterable[Sample]): Iterable of samples to batch.
batch_size: Maximum number of samples in each batch.
Note:
When ``samples`` is an instance of `~typing.Sized`, this iterator can
be passed to `len` to get the number of batches. Otherwise, a `TypeError`
is raised.
"""
def __init__(self, samples: Iterable[Sample], batch_size: int = 1) -> None:
if batch_size <= 0:
raise ValueError("batch size must be greater than 0")
self._samples = samples
self._bsz = batch_size
@property
def batch_size(self) -> int:
return self._bsz
def __len__(self) -> int:
n = len(self._samples) # type: ignore
b = self._bsz
return n // b + (1 if n % b != 0 else 0)
def __iter__(self) -> Iterator[Batch]:
it, exhausted = iter(self._samples), False
while not exhausted:
batch = Batch()
while not exhausted and len(batch) < self._bsz:
try:
batch.append(next(it))
except StopIteration:
exhausted = True
if batch:
yield batch
class ShuffleIterator(Iterable[Any], Sized):
r"""Iterator that shuffles a sequence of items before iterating.
When ``key`` is not given, this iterator performs ordinary shuffling using
`random.shuffle`. Otherwise, a noisy sorting is performed. The items are
sorted ascending by the value of the given key, plus some random noise
:math:`\epsilon \sim` Uniform :math:`(-z, z)`, where :math:`z` equals ``scale``
times the standard deviation of key values. This formulation means that ``scale``
regulates how noisy the sorting is. The larger it is, the more noisy the sorting
becomes, i.e. it resembles random shuffling more closely. In an extreme case
where ``scale=0``, this method just sorts the items by ``key``. This method is
useful when working with text data, where we want to shuffle the dataset and also
minimize padding by ensuring that sentences of similar lengths are not too far apart.
Example:
>>> from random import Random
>>> from text2array import ShuffleIterator
>>> samples = [
... {'ws': ['a', 'b', 'b']},
... {'ws': ['a']},
... {'ws': ['a', 'a', 'b', 'b', 'b', 'b']},
... ]
>>> iter_ = ShuffleIterator(samples, key=lambda s: len(s['ws']), rng=Random(1234))
>>> for s in iter_:
... print(s)
...
{'ws': ['a']}
{'ws': ['a', 'a', 'b', 'b', 'b', 'b']}
{'ws': ['a', 'b', 'b']}
Args:
items (~typing.Sequence[Any]): Sequence of items to shuffle and iterate over.
key (typing.Callable[[Any], int]): Callable to get the key value of an item.
scale: Value to regulate the noise of the sorting. Must not be negative.
rng: Random number generator to use for shuffling. Set this to ensure reproducibility.
If not given, an instance of `~random.Random` with the default seed is used.
"""
def __init__(
self,
items: Sequence[Any],
key: Optional[Callable[[Any], int]] = None,
scale: float = 1.0,
rng: Optional[Random] = None,
) -> None:
if scale < 0:
raise ValueError("scale cannot be less than 0")
if rng is None: # pragma: no cover
rng = Random()
self._items = items
self._key = key
self._scale = scale
self._rng = rng
def __len__(self) -> int:
return len(self._items)
def __iter__(self) -> Iterator[Sample]:
if self._key is None:
self._shuffle()
else:
self._shuffle_by_key()
return iter(self._items)
def _shuffle(self) -> None:
self._items = list(self._items)
self._rng.shuffle(self._items)
def _shuffle_by_key(self) -> None:
assert self._key is not None
std = stat.stdev(self._key(s) for s in self._items)
z = self._scale * std
noises = [self._rng.uniform(-z, z) for _ in range(len(self._items))]
indices = list(range(len(self._items)))
indices.sort(key=lambda i: self._key(self._items[i]) + noises[i]) # type: ignore
shuf_items = [self._items[i] for i in indices]
self._items = shuf_items
class BucketIterator(Iterable[Batch], Sized):
"""Iterator that batches together samples from the same bucket.
Example:
>>> from text2array import BucketIterator
>>> samples = [
... {'ws': ['a']},
... {'ws': ['a', 'b']},
... {'ws': ['b']},
... {'ws': ['c']},
... {'ws': ['b', 'b']},
... ]
>>> iter_ = BucketIterator(samples, key=lambda s: len(s['ws']), batch_size=2)
>>> for b in iter_:
... print(list(b))
...
[{'ws': ['a']}, {'ws': ['b']}]
[{'ws': ['c']}]
[{'ws': ['a', 'b']}, {'ws': ['b', 'b']}]
Args:
samples (~typing.Iterable[Sample]): Iterable of samples to batch.
key (typing.Callable[[Sample], Any]): Callable to get the bucket key of a sample.
batch_size: Maximum number of samples in each batch.
shuffle_bucket: Whether to shuffle every bucket before batching.
rng: Random number generator to use for shuffling. Set this to ensure reproducibility.
If not given, an instance of `~random.Random` with the default seed is used.
sort_bucket: Whether to sort every bucket before batching. When both ``shuffle_bucket``
and ``sort_bucket`` is ``True``, sorting will be ignored (but don't rely on this
behavior).
sort_bucket_by (typing.Callable[[Sample], Any]): Callable acting as the sort key
if ``sort_bucket=True``.
Note:
When ``samples`` is an instance of `~typing.Sized`, this iterator can
be passed to `len` to get the number of batches. Otherwise, a `TypeError`
is raised.
"""
def __init__(
self,
samples: Iterable[Sample],
key: Callable[[Sample], Any],
batch_size: int = 1,
shuffle_bucket: bool = False,
rng: Optional[Random] = None,
sort_bucket: bool = False,
sort_bucket_by: Optional[Callable[[Sample], Any]] = None,
) -> None:
if rng is None: # pragma: no cover
rng = Random()
if shuffle_bucket and sort_bucket:
warnings.warn(
"Both shuffle_bucket and sort_bucket is True; sort_bucket will have no effect"
)
self._bsz = batch_size
self._shuf = shuffle_bucket
self._rng = rng
bucket_dict = defaultdict(list)
for s in samples:
bucket_dict[key(s)].append(s)
self._buckets = bucket_dict.values()
if sort_bucket:
for bkt in self._buckets:
bkt.sort(key=sort_bucket_by)
@property
def batch_size(self):
return self._bsz
def __len__(self):
return sum(len(BatchIterator(ss, self._bsz)) for ss in self._buckets)
def __iter__(self):
for ss in self._buckets:
if self._shuf:
self._rng.shuffle(ss)
yield from BatchIterator(ss, self._bsz)
| 35.126016 | 95 | 0.580257 |
2b43eb1dbe6d09ada6337047d2da34382852a6ee | 1,392 | py | Python | Server_Client/client.py | mattgarbecki/RoundnetPositionTracker | 8e54c5fdb91e497184b66c9ff09e0e356d4d0e37 | [
"MIT"
] | null | null | null | Server_Client/client.py | mattgarbecki/RoundnetPositionTracker | 8e54c5fdb91e497184b66c9ff09e0e356d4d0e37 | [
"MIT"
] | null | null | null | Server_Client/client.py | mattgarbecki/RoundnetPositionTracker | 8e54c5fdb91e497184b66c9ff09e0e356d4d0e37 | [
"MIT"
] | 1 | 2020-05-13T23:51:06.000Z | 2020-05-13T23:51:06.000Z | import requests
import socket
import astropy.io.misc.asdf.tags.time.tests.test_time
import sensordroid
def getDataPoint():
sensordroid
return
def runClient(address, port):
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Connect the socket to the port where the server is listening
server_address = (address, port)
print('connecting to {} port {}'.format(*server_address))
sock.connect(server_address)
while True:
# CHANGE MESSAGE TO BE THE DATA THATS SENDING
message = input()
if message == "quit":
print("Closing client")
sock.sendall("quit".encode("UTF-8"))
break
message = message + "%" + str(time.time())
sock.sendall(message.encode("UTF-8"))
sock.close()
def manageClient():
LINK = "http://localhost:3500"
URL = LINK + "/address"
# PIPE UI INPUT HERE FOR GAME NAME
NAME = input("game name: ")
PARAMS = {'name':NAME}
r = requests.get(url = URL, params = PARAMS)
if r.status_code != 200:
print("ERROR: No Connection")
quit()
if r.text == "NONAME" or r.text == "INVALID":
print("ERROR: gamename not found")
quit()
DATA = r.text.strip("[]").split(",")
runClient(DATA[0].strip("\""), int(DATA[1]))
if __name__ == "__main__":
manageClient()
| 24.857143 | 66 | 0.604167 |
b7bef69db510c9e9ac644ec846c76e072b675620 | 2,263 | py | Python | slack_differ.py | robobario/slack_differ | 60ca4389a9faf36b5050847f88adcfc32775122e | [
"MIT"
] | null | null | null | slack_differ.py | robobario/slack_differ | 60ca4389a9faf36b5050847f88adcfc32775122e | [
"MIT"
] | null | null | null | slack_differ.py | robobario/slack_differ | 60ca4389a9faf36b5050847f88adcfc32775122e | [
"MIT"
] | null | null | null | #!/usr/bin/python
import sh
from os import listdir, mkdir
from os.path import join, realpath, dirname, isdir, isfile
import json
import sys
print "running slack_differ"
dname = dirname(realpath(__file__))
print "dirname : " + dname
script_dir = join(dname,'scripts')
print "script_dir : " + script_dir
if not isdir(script_dir):
print "no script dir found"
scripts = []
else:
scripts = [f for f in listdir(script_dir) if f.endswith(".json")]
for script in scripts:
try:
script_metadata = json.load(open(join(script_dir, script)))
old_file = join(dname, script + ".old")
new_file = join(dname, script + ".new")
new_out = open(new_file,'w')
command = sh.Command(join(script_dir,script_metadata["script"]))
print("running " + join(script_dir,script_metadata["script"]))
command(_out=new_out)
if not isfile(old_file):
sh.cp(new_file,old_file)
diff = sh.diff(old_file,new_file,_ok_code=[0,1])
if len(diff) > 0:
message = str(open(new_file).read())
payload = {
"channel":script_metadata["channel"],
"username":script_metadata["user"],
"text": script_metadata["title"],
"icon_emoji":script_metadata["emoji"],
"attachments" : [
{
"color":"good",
"fields":[
{
"title":"new value",
"value":message,
"short":False
}
]
}
]
}
payload = "payload=" + json.dumps(payload)
sh.mv(new_file,old_file)
sh.curl(["curl", "-X", "POST" ,"--data-urlencode" ,payload,script_metadata["slack_url"]])
except Exception as e:
print "failed on " + script +" :" + str(e)
if len(sys.argv) > 1 and sys.argv[1] == "example":
print("initialising example")
if not isdir(script_dir):
mkdir(script_dir)
script = open(join(script_dir,"example.sh"),'w')
script.write("#!/bin/bash\necho 'hi'")
metafile = open(join(script_dir,"example.json"),'w')
meta = {
"channel":"#example",
"user":"examplebot",
"title":"script title",
"emoji":":metal:",
"slack_url":"https://hook.slack.com/XXX/YYY",
"script":"example.sh"
}
metafile.write(json.dumps(meta))
| 31.873239 | 95 | 0.592134 |
6002076e36db07619287501f4e07304697ad3b4d | 282 | py | Python | fms/fms/doctype/announcement/announcement.py | sagar30051991/KF-FMS | e6e41343e2f90b09c73ef4f7a7ec35fd7be81a18 | [
"MIT"
] | null | null | null | fms/fms/doctype/announcement/announcement.py | sagar30051991/KF-FMS | e6e41343e2f90b09c73ef4f7a7ec35fd7be81a18 | [
"MIT"
] | null | null | null | fms/fms/doctype/announcement/announcement.py | sagar30051991/KF-FMS | e6e41343e2f90b09c73ef4f7a7ec35fd7be81a18 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2015, New Indictrans Technology Pvt Ltd and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class Announcement(Document):
pass
| 25.636364 | 72 | 0.787234 |
faf3274822022da014b74e39ab2416be71ca1782 | 1,584 | py | Python | src/modtorsiflex/__init__.py | cathedralpkg/TorsiFlex | 35f59b8813befdaac7a1c985216a7ead3012fb53 | [
"Unlicense"
] | 1 | 2022-02-02T01:04:36.000Z | 2022-02-02T01:04:36.000Z | src/modtorsiflex/__init__.py | cathedralpkg/TorsiFlex | 35f59b8813befdaac7a1c985216a7ead3012fb53 | [
"Unlicense"
] | null | null | null | src/modtorsiflex/__init__.py | cathedralpkg/TorsiFlex | 35f59b8813befdaac7a1c985216a7ead3012fb53 | [
"Unlicense"
] | 3 | 2021-12-28T14:21:32.000Z | 2022-03-14T13:18:07.000Z | '''
---------------------------
Licensing and Distribution
---------------------------
Program name: TorsiFlex
Version : 2021.3
License : MIT/x11
Copyright (c) 2021, David Ferro Costas (david.ferro@usc.es) and
Antonio Fernandez Ramos (qf.ramos@usc.es)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
---------------------------
*----------------------------------*
| Module : modtorsiflex |
| Sub-module : __init__ |
| Last Update: 2021/11/22 (Y/M/D) |
| Main Author: David Ferro-Costas |
*----------------------------------*
Just a __init__ to generate the module
'''
| 36.837209 | 78 | 0.688131 |
8bc98ef72a8c42cb4393d39ec6ac3e02d7565424 | 17,497 | py | Python | plugins/modules/oci_dns_resolver_actions.py | slmjy/oci-ansible-collection | 349c91e2868bf4706a6e3d6fb3b47fc622bfe11b | [
"Apache-2.0"
] | 108 | 2020-05-19T20:46:10.000Z | 2022-03-25T14:10:01.000Z | plugins/modules/oci_dns_resolver_actions.py | slmjy/oci-ansible-collection | 349c91e2868bf4706a6e3d6fb3b47fc622bfe11b | [
"Apache-2.0"
] | 90 | 2020-06-14T22:07:11.000Z | 2022-03-07T05:40:29.000Z | plugins/modules/oci_dns_resolver_actions.py | slmjy/oci-ansible-collection | 349c91e2868bf4706a6e3d6fb3b47fc622bfe11b | [
"Apache-2.0"
] | 42 | 2020-08-30T23:09:12.000Z | 2022-03-25T16:58:01.000Z | #!/usr/bin/python
# Copyright (c) 2020, 2021 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_dns_resolver_actions
short_description: Perform actions on a Resolver resource in Oracle Cloud Infrastructure
description:
- Perform actions on a Resolver resource in Oracle Cloud Infrastructure
- For I(action=change_compartment), moves a resolver into a different compartment along with its protected default view and any endpoints.
Zones in the default view are not moved. Requires a `PRIVATE` scope query parameter.
version_added: "2.9.0"
author: Oracle (@oracle)
options:
resolver_id:
description:
- The OCID of the target resolver.
type: str
aliases: ["id"]
required: true
compartment_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the compartment into which the resolver, along with
its protected default view and resolver endpoints, should be moved.
type: str
required: true
scope:
description:
- Specifies to operate only on resources that have a matching DNS scope.
type: str
choices:
- "GLOBAL"
- "PRIVATE"
action:
description:
- The action to perform on the Resolver.
type: str
required: true
choices:
- "change_compartment"
extends_documentation_fragment: [ oracle.oci.oracle ]
"""
EXAMPLES = """
- name: Perform action change_compartment on resolver
oci_dns_resolver_actions:
# required
resolver_id: "ocid1.resolver.oc1..xxxxxxEXAMPLExxxxxx"
compartment_id: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
action: change_compartment
# optional
scope: GLOBAL
"""
RETURN = """
resolver:
description:
- Details of the Resolver resource acted upon by the current operation
returned: on success
type: complex
contains:
compartment_id:
description:
- The OCID of the owning compartment.
returned: on success
type: str
sample: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
attached_vcn_id:
description:
- The OCID of the attached VCN.
returned: on success
type: str
sample: "ocid1.attachedvcn.oc1..xxxxxxEXAMPLExxxxxx"
display_name:
description:
- The display name of the resolver.
returned: on success
type: str
sample: display_name_example
freeform_tags:
description:
- Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace.
For more information, see L(Resource Tags,https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm).
- "**Example:** `{\\"Department\\": \\"Finance\\"}`"
returned: on success
type: dict
sample: {'Department': 'Finance'}
defined_tags:
description:
- Defined tags for this resource. Each key is predefined and scoped to a namespace.
For more information, see L(Resource Tags,https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm).
- "**Example:** `{\\"Operations\\": {\\"CostCenter\\": \\"42\\"}}`"
returned: on success
type: dict
sample: {'Operations': {'CostCenter': 'US'}}
id:
description:
- The OCID of the resolver.
returned: on success
type: str
sample: "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx"
time_created:
description:
- "The date and time the resource was created in \\"YYYY-MM-ddThh:mm:ssZ\\" format
with a Z offset, as defined by RFC 3339."
- "**Example:** `2016-07-22T17:23:59:60Z`"
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
time_updated:
description:
- "The date and time the resource was last updated in \\"YYYY-MM-ddThh:mm:ssZ\\"
format with a Z offset, as defined by RFC 3339."
- "**Example:** `2016-07-22T17:23:59:60Z`"
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
lifecycle_state:
description:
- The current state of the resource.
returned: on success
type: str
sample: ACTIVE
_self:
description:
- The canonical absolute URL of the resource.
returned: on success
type: str
sample: _self_example
default_view_id:
description:
- The OCID of the default view.
returned: on success
type: str
sample: "ocid1.defaultview.oc1..xxxxxxEXAMPLExxxxxx"
is_protected:
description:
- A Boolean flag indicating whether or not parts of the resource are unable to be explicitly managed.
returned: on success
type: bool
sample: true
endpoints:
description:
- Read-only array of endpoints for the resolver.
returned: on success
type: complex
contains:
name:
description:
- The name of the resolver endpoint. Must be unique, case-insensitive, within the resolver.
returned: on success
type: str
sample: name_example
endpoint_type:
description:
- The type of resolver endpoint. VNIC is currently the only supported type.
returned: on success
type: str
sample: VNIC
forwarding_address:
description:
- An IP address from which forwarded queries may be sent. For VNIC endpoints, this IP address must be part
of the subnet and will be assigned by the system if unspecified when isForwarding is true.
returned: on success
type: str
sample: forwarding_address_example
is_forwarding:
description:
- A Boolean flag indicating whether or not the resolver endpoint is for forwarding.
returned: on success
type: bool
sample: true
is_listening:
description:
- A Boolean flag indicating whether or not the resolver endpoint is for listening.
returned: on success
type: bool
sample: true
listening_address:
description:
- An IP address to listen to queries on. For VNIC endpoints this IP address must be part of the
subnet and will be assigned by the system if unspecified when isListening is true.
returned: on success
type: str
sample: listening_address_example
compartment_id:
description:
- The OCID of the owning compartment. This will match the resolver that the resolver endpoint is under
and will be updated if the resolver's compartment is changed.
returned: on success
type: str
sample: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
time_created:
description:
- "The date and time the resource was created in \\"YYYY-MM-ddThh:mm:ssZ\\" format
with a Z offset, as defined by RFC 3339."
- "**Example:** `2016-07-22T17:23:59:60Z`"
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
time_updated:
description:
- "The date and time the resource was last updated in \\"YYYY-MM-ddThh:mm:ssZ\\"
format with a Z offset, as defined by RFC 3339."
- "**Example:** `2016-07-22T17:23:59:60Z`"
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
lifecycle_state:
description:
- The current state of the resource.
returned: on success
type: str
sample: ACTIVE
_self:
description:
- The canonical absolute URL of the resource.
returned: on success
type: str
sample: _self_example
subnet_id:
description:
- The OCID of a subnet. Must be part of the VCN that the resolver is attached to.
returned: on success
type: str
sample: "ocid1.subnet.oc1..xxxxxxEXAMPLExxxxxx"
attached_views:
description:
- The attached views. Views are evaluated in order.
returned: on success
type: complex
contains:
view_id:
description:
- The OCID of the view.
returned: on success
type: str
sample: "ocid1.view.oc1..xxxxxxEXAMPLExxxxxx"
rules:
description:
- Rules for the resolver. Rules are evaluated in order.
returned: on success
type: complex
contains:
client_address_conditions:
description:
- A list of CIDR blocks. The query must come from a client within one of the blocks in order for the rule action
to apply.
returned: on success
type: list
sample: []
qname_cover_conditions:
description:
- A list of domain names. The query must be covered by one of the domains in order for the rule action to apply.
returned: on success
type: list
sample: []
action:
description:
- "The action determines the behavior of the rule. If a query matches a supplied condition, the action will
apply. If there are no conditions on the rule, all queries are subject to the specified action.
* `FORWARD` - Matching requests will be forwarded from the source interface to the destination address."
returned: on success
type: str
sample: FORWARD
destination_addresses:
description:
- IP addresses to which queries should be forwarded. Currently limited to a single address.
returned: on success
type: list
sample: []
source_endpoint_name:
description:
- Case-insensitive name of an endpoint, that is a sub-resource of the resolver, to use as the forwarding
interface. The endpoint must have isForwarding set to true.
returned: on success
type: str
sample: source_endpoint_name_example
sample: {
"compartment_id": "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx",
"attached_vcn_id": "ocid1.attachedvcn.oc1..xxxxxxEXAMPLExxxxxx",
"display_name": "display_name_example",
"freeform_tags": {'Department': 'Finance'},
"defined_tags": {'Operations': {'CostCenter': 'US'}},
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx",
"time_created": "2013-10-20T19:20:30+01:00",
"time_updated": "2013-10-20T19:20:30+01:00",
"lifecycle_state": "ACTIVE",
"_self": "_self_example",
"default_view_id": "ocid1.defaultview.oc1..xxxxxxEXAMPLExxxxxx",
"is_protected": true,
"endpoints": [{
"name": "name_example",
"endpoint_type": "VNIC",
"forwarding_address": "forwarding_address_example",
"is_forwarding": true,
"is_listening": true,
"listening_address": "listening_address_example",
"compartment_id": "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx",
"time_created": "2013-10-20T19:20:30+01:00",
"time_updated": "2013-10-20T19:20:30+01:00",
"lifecycle_state": "ACTIVE",
"_self": "_self_example",
"subnet_id": "ocid1.subnet.oc1..xxxxxxEXAMPLExxxxxx"
}],
"attached_views": [{
"view_id": "ocid1.view.oc1..xxxxxxEXAMPLExxxxxx"
}],
"rules": [{
"client_address_conditions": [],
"qname_cover_conditions": [],
"action": "FORWARD",
"destination_addresses": [],
"source_endpoint_name": "source_endpoint_name_example"
}]
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import (
oci_common_utils,
oci_wait_utils,
)
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIActionsHelperBase,
get_custom_class,
)
try:
from oci.dns import DnsClient
from oci.dns.models import ChangeResolverCompartmentDetails
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class ResolverActionsHelperGen(OCIActionsHelperBase):
"""
Supported actions:
change_compartment
"""
@staticmethod
def get_module_resource_id_param():
return "resolver_id"
def get_module_resource_id(self):
return self.module.params.get("resolver_id")
def get_get_fn(self):
return self.client.get_resolver
def get_resource(self):
return oci_common_utils.call_with_backoff(
self.client.get_resolver, resolver_id=self.module.params.get("resolver_id"),
)
def change_compartment(self):
action_details = oci_common_utils.convert_input_data_to_model_class(
self.module.params, ChangeResolverCompartmentDetails
)
return oci_wait_utils.call_and_wait(
call_fn=self.client.change_resolver_compartment,
call_fn_args=(),
call_fn_kwargs=dict(
resolver_id=self.module.params.get("resolver_id"),
change_resolver_compartment_details=action_details,
scope=self.module.params.get("scope"),
),
waiter_type=oci_wait_utils.NONE_WAITER_KEY,
operation="{0}_{1}".format(
self.module.params.get("action").upper(),
oci_common_utils.ACTION_OPERATION_KEY,
),
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=self.get_action_desired_states(
self.module.params.get("action")
),
)
ResolverActionsHelperCustom = get_custom_class("ResolverActionsHelperCustom")
class ResourceHelper(ResolverActionsHelperCustom, ResolverActionsHelperGen):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec(
supports_create=False, supports_wait=False
)
module_args.update(
dict(
resolver_id=dict(aliases=["id"], type="str", required=True),
compartment_id=dict(type="str", required=True),
scope=dict(type="str", choices=["GLOBAL", "PRIVATE"]),
action=dict(type="str", required=True, choices=["change_compartment"]),
)
)
module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_helper = ResourceHelper(
module=module,
resource_type="resolver",
service_client_class=DnsClient,
namespace="dns",
)
result = resource_helper.perform_action(module.params.get("action"))
module.exit_json(**result)
if __name__ == "__main__":
main()
| 39.675737 | 151 | 0.563125 |
7024726e6c75726b1d6913cb263146591311183c | 418 | py | Python | __Training__/Python - HackerRank/2. Basic Data Types/Finding the percentage.py | JUD210/Study-Note | 2add9db3f11d99370f49878f0c19e9caa60d2d02 | [
"MIT"
] | null | null | null | __Training__/Python - HackerRank/2. Basic Data Types/Finding the percentage.py | JUD210/Study-Note | 2add9db3f11d99370f49878f0c19e9caa60d2d02 | [
"MIT"
] | null | null | null | __Training__/Python - HackerRank/2. Basic Data Types/Finding the percentage.py | JUD210/Study-Note | 2add9db3f11d99370f49878f0c19e9caa60d2d02 | [
"MIT"
] | null | null | null | # https://www.hackerrank.com/challenges/finding-the-percentage/problem
n = int(input())
# 3
student_marks = {}
for _ in range(n):
name, *scores = input().split()
# Krishna 67 68 69
# Arjun 70 98 63
# Malika 52 56 60
scores = list(map(float, scores))
student_marks[name] = sum(scores) / len(scores)
query_name = input()
# Malika
print("{:.2f}".format(student_marks[query_name]))
# 56.00
| 17.416667 | 70 | 0.643541 |
e87f922c17b9089a645b1b05a99fd83e86d74ecf | 2,065 | py | Python | holmes/validators/blacklist.py | scorphus/holmes-api | 6b3c76d4299fecf2d8799d7b5c3c6a6442cacd59 | [
"MIT"
] | null | null | null | holmes/validators/blacklist.py | scorphus/holmes-api | 6b3c76d4299fecf2d8799d7b5c3c6a6442cacd59 | [
"MIT"
] | null | null | null | holmes/validators/blacklist.py | scorphus/holmes-api | 6b3c76d4299fecf2d8799d7b5c3c6a6442cacd59 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
from holmes.utils import get_domain_from_url, is_valid
from holmes.validators.base import Validator
from holmes.utils import _
class BlackListValidator(Validator):
@classmethod
def get_blacklist_parsed_value(cls, value):
return ', '.join([
'<a href="%s" target="_blank">Link #%s</a>' % (url, index)
for index, url in enumerate(value)
])
@classmethod
def get_violation_definitions(cls):
return {
'blacklist.domains': {
'title': _('Domain Blacklist'),
'description': _('Some links are blacklisted: %s'),
'value_parser': cls.get_blacklist_parsed_value,
'category': _('SEO'),
'generic_description': _(
'Detected domain blacklisted hyperlinks. '
'Links with this violation are those that have anchors '
'to websites added in Holmes\'s Black List configuration.'
),
'unit': 'list'
}
}
@classmethod
def get_default_violations_values(cls, config):
return {
'blacklist.domains': {
'value': config.BLACKLIST_DOMAIN,
'description': config.get_description('BLACKLIST_DOMAIN')
}
}
def validate(self):
blacklist_domains = self.get_violation_pref('blacklist.domains')
domains = []
links = self.get_links()
for link in links:
href = link.get('href')
if not is_valid(href):
continue
link_domain, link_domain_url = get_domain_from_url(href)
if link_domain in blacklist_domains:
domains.append(href)
if domains:
self.add_violation(
key='blacklist.domains',
value=domains,
points=100 * len(domains)
)
def get_links(self):
return self.review.data.get('page.all_links', None)
| 29.927536 | 78 | 0.550121 |
5943feb9a555751ad97da8b8829466a0b0e894d1 | 2,262 | py | Python | transitland/test_operator.py | transit-land/onestop-id-python-client | d03d8759d0758803519c51c6970213946a4078d4 | [
"MIT"
] | null | null | null | transitland/test_operator.py | transit-land/onestop-id-python-client | d03d8759d0758803519c51c6970213946a4078d4 | [
"MIT"
] | null | null | null | transitland/test_operator.py | transit-land/onestop-id-python-client | d03d8759d0758803519c51c6970213946a4078d4 | [
"MIT"
] | null | null | null | """Test Operator."""
import unittest
import util
from operator import Operator
class TestOperator(unittest.TestCase):
def setUp(self):
self.expect = util.example_export()
def _sanity(self, entity):
"""Perform sanity checks! After bootstrap_gtfs or from_json..."""
# More extensive checks, since json export includes nearly everything.
assert entity.geohash() == '9qs'
assert entity.onestop() == self.expect['onestopId']
assert len(entity.identifiers()) == 1
# Routes
assert len(entity.routes()) == 5
expect = ['r-9qsczp-40', 'r-9qt1-50',
'r-9qsb-20', 'r-9qscy-30', 'r-9qscy-10']
for i in entity.routes():
assert i.onestop() in expect
assert len(i.identifiers()) == 1
# Stops
assert len(entity.stops()) == 9
for i in entity.stops():
assert i.onestop() in self.expect['serves']
assert len(i.identifiers()) == 1
def test_init(self):
entity = Operator()
def test_geohash(self):
entity = util.example_feed().operator(self.expect['onestopId'])
assert entity.geohash() == '9qs'
def test_from_json(self):
feed = util.example_feed()
entity = util.example_feed().operator(self.expect['onestopId'])
roundtrip = Operator.from_json(entity.json())
self._sanity(roundtrip)
def test_json(self):
entity = util.example_feed().operator(self.expect['onestopId'])
data = entity.json()
for k in ['name','onestopId','type']:
assert data[k] == self.expect[k]
assert len(data['features']) == 14
def test_routes(self):
entity = util.example_feed().operator(self.expect['onestopId'])
assert len(entity.routes()) == 5
def test_route(self):
entity = util.example_feed().operator(self.expect['onestopId'])
for i in entity.routes():
assert entity.route(i.onestop())
with self.assertRaises(ValueError):
entity.route('none')
def test_stops(self):
entity = util.example_feed().operator(self.expect['onestopId'])
assert len(entity.stops()) == 9
def test_stop(self):
entity = util.example_feed().operator(self.expect['onestopId'])
for i in entity.stops():
assert entity.stop(i.onestop())
with self.assertRaises(ValueError):
entity.stop('none')
| 31.859155 | 74 | 0.651636 |
cca0cfd65ae7e00b14583c214174fdf2b0828e7f | 633 | py | Python | vcx/wrappers/python3/setup.py | evernym/indy-sdk | 714d449353518f929d9787d3156af785e2a42ccb | [
"Apache-2.0"
] | 5 | 2018-04-09T12:26:28.000Z | 2019-06-12T01:45:30.000Z | vcx/wrappers/python3/setup.py | evernym/indy-sdk | 714d449353518f929d9787d3156af785e2a42ccb | [
"Apache-2.0"
] | 9 | 2019-01-22T22:31:54.000Z | 2019-04-11T21:45:09.000Z | vcx/wrappers/python3/setup.py | evernym/indy-sdk | 714d449353518f929d9787d3156af785e2a42ccb | [
"Apache-2.0"
] | 19 | 2018-04-25T16:08:43.000Z | 2022-01-11T10:18:38.000Z | from setuptools import setup, find_packages
import os
PKG_VERSION = os.environ.get('PACKAGE_VERSION') or '0.2.4'
PKG_NAME = os.environ.get('PACKAGE_NAME') or 'python3-wrapper-vcx'
def get_version():
try:
return os.environ['VCX_VERSION']
except KeyError:
return '0.2.0'
setup(
name=PKG_NAME,
version=get_version(),
description='Python 3 wrapper for libcxs',
long_description='None...for now',
author='Devin Fisher, Ryan Marsh, Mark Hadley, Doug Wightman',
author_email='ryan.marsh@evernym.com',
include_package_data=True,
packages=find_packages(exclude=['demo', 'tests'])
)
| 26.375 | 66 | 0.696682 |
67d3173dc87d11acdf55f9134ba7a7426c31319b | 12,769 | py | Python | src/platform_vision/scripts/platform_vision/SlaveCameraController.py | ahmohamed1/activeStereoVisionPlatform | 6c928ca242e4de68c7b15a8748bff1d9f7fa1382 | [
"MIT"
] | null | null | null | src/platform_vision/scripts/platform_vision/SlaveCameraController.py | ahmohamed1/activeStereoVisionPlatform | 6c928ca242e4de68c7b15a8748bff1d9f7fa1382 | [
"MIT"
] | null | null | null | src/platform_vision/scripts/platform_vision/SlaveCameraController.py | ahmohamed1/activeStereoVisionPlatform | 6c928ca242e4de68c7b15a8748bff1d9f7fa1382 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import rospy
import cv2
import sys
import numpy as np
import math
import argparse
from geometry_msgs.msg import Twist
from geometry_msgs.msg import Vector3, Pose2D
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
from std_msgs.msg import Int64
from std_msgs.msg import Float64
from std_msgs.msg import Bool
import os.path
import platform_vision.PNCC as PNCC
import platform_vision.BaseFeatureMatching as BaseFeatureMatching
from platform_controller.vergincyDepthClass import DrawTrackingSystem
VERBOSE = True
DEBUG = True
class SlaveCameraController:
def __init__(self, activeTilitController=False,algorithmToUse= 'PNCC', scaleDown = 0):
self.drawTrackingSystem = DrawTrackingSystem()
cv2.namedWindow('Slave Camera', cv2.WINDOW_NORMAL)
cv2.resizeWindow('Slave Camera', (640,420))
cv2.moveWindow('Slave Camera', 1000, 600)
self.motorPublisher = rospy.Publisher('/right/pan/move', Float64, queue_size=2)
self.left_image_sub = rospy.Subscriber('/stereo/left/image_raw', Image, self.left_image_callback)
self.right_image_sub = rospy.Subscriber('/stereo/right/image_raw', Image, self.right_image_callback)
self.templateSizeSub = rospy.Subscriber('/templateSize', Pose2D, self.templateSizeCallBack)
self.bridge = CvBridge()
self.OnTargetPublisher = rospy.Publisher('/right/onTarget', Bool, queue_size=1)
self.fileExcite = False
self.activeTilitController = activeTilitController
if self.activeTilitController:
self.slaveTiltMotorPub = rospy.Publisher('/right/tilt/move', Float64, queue_size=2)
self.motorMinLimitTilt = -37
self.motorMaxLimitTilt = 37
self.left_image = None
self.right_image = None
self.savenumber = 0
self.motorCountForTerminate = 0
# Define the pyramid algorithm
self.ScaleDown = scaleDown
if self.ScaleDown > 0:
# self.imageSize = np.array([920, 640])
self.imageSize = np.array([2048/self.ScaleDown , 1080/self.ScaleDown])
self.templateSize = 80
self.thresholdMotorController = np.array([20,6])
pyramidLevel = 4
self.scaleTemplate = 0.5
else:
self.imageSize = np.array([2048 , 1080])
self.templateSize = 200
self.thresholdMotorController = np.array([50,10])
pyramidLevel = 7
self.scaleTemplate = 1.0
self.algorithmToUse = algorithmToUse
self.featueMatchingAlgorithmState = False
if self.algorithmToUse == 'PNCC':
self.fastMatchingPyramid = PNCC.FastMatchingPyramid(self.imageSize, pyramidLevel=pyramidLevel,
windowSize=self.templateSize, grayImage = False,
showImage = True,drawDifferencesInImage= True,
operatingName = 'Slave ')
elif self.algorithmToUse == 'kaze' or self.algorithmToUse == 'FLANN' or self.algorithmToUse == 'Brute':
self.featueMatchingAlgorithmState = True
self.trackingFeature = BaseFeatureMatching.BaseFeatureMatching()
self.exponatialGain = [0.0025, 0.0035]
self.mapExponatialValue = [0.3, 0.35]
self.motorMinLimit = -75
self.motorMaxLimit = 75
self.currentPos = [0.0, 0.0]
self.stepDistance = 0.0001
self.motorPos = [Float64(), Float64()]
i = 0
r = rospy.Rate(10) # 10hz
while (i < 5):
self.motorPos[0].data = self.currentPos[0]
self.motorPos[1].data = self.currentPos[1]
# set the motor to the zero position
self.motorPublisher.publish(self.motorPos[0])
self.slaveTiltMotorPub.publish(self.motorPos[1])
r.sleep()
i +=1
self.currentPos[1] = 8
while (i < 5):
self.motorPos[1].data = self.currentPos[1]
# set the motor to the zero position
self.slaveTiltMotorPub.publish(self.motorPos[1])
r.sleep()
i +=1
# sleep for 0.5 seconds
rospy.sleep(.5)
self.terminateButton = 1
def __del__(self):
for i in range(10):
self.currentPos = [0.0, 0.0]
self.motorPublisher.publish(self.motorPos[0])
self.slaveTiltMotorPub.publish(self.motorPos[1])
def saveImage(self, templateImage):
self.savenumber += 1
tempImgStr = '/home/abdulla/dev/Data/' + str(self.savenumber) + 'template.jpg'
leftImgStr = '/home/abdulla/dev/Data/' + str(self.savenumber) + 'left.jpg'
rightImgStr = '/home/abdulla/dev/Data/' + str(self.savenumber) + 'right.jpg'
self.fileExcite = os.path.isfile(tempImgStr)
while self.fileExcite:
self.savenumber += 1
tempImgStr = '/home/abdulla/dev/Data/' + str(self.savenumber) + 'template.jpg'
self.fileExcite = os.path.isfile(tempImgStr)
print (self.savenumber)
cv2.imwrite(tempImgStr, templateImage)
cv2.imwrite(rightImgStr, self.right_image)
cv2.imwrite(leftImgStr, self.left_image)
print ('Image saved')
def my_mouse_callback(self, event,x,y,flags,param):
if event==cv2.EVENT_LBUTTONDOWN:
if self.algorithmToUse == 'PNCC':
self.saveImage(self.fastMatchingPyramid.getTemplate())
pass
if event==cv2.EVENT_RBUTTONDOWN:
self.terminateButton += 1
def moveToZero(self, pan=0.0, tilt=0.0):
print('Motor move to ZERO position!!!')
for i in range(10):
self.motorPos[0].data = pan
self.motorPos[1].data = tilt
self.currentPos = [0.0, 0.0]
# set the motor to the zero position
self.motorPublisher.publish(self.motorPos[0])
self.slaveTiltMotorPub.publish(self.motorPos[1])
def moveMotor(self,value):
state = False
speed = 0
speed = np.sign(-value) * math.exp(abs(value)*self.exponatialGain[0])*self.mapExponatialValue[0]
if abs(value) > self.thresholdMotorController[0] :
self.currentPos[0] += speed
self.motorPos[0].data = self.currentPos[0]
# print("Motor speed: ", self.currentPos)
if self.currentPos[0] < self.motorMaxLimit and self.currentPos[0] > self.motorMinLimit :
self.motorPublisher.publish(self.motorPos[0])
elif abs(value) < self.thresholdMotorController[0] and abs(value) > self.thresholdMotorController[1]:
self.currentPos[0] -= value * 0.001
self.motorPos[0].data = self.currentPos[0]
# print("Motor speed: ", self.currentPos)
self.motorPublisher.publish(self.motorPos[0])
else:
# print ("Pan Center Position")
state = True
return state
def TiltMoveMotor(self,value):
state = False
TiltSpeed = 0
TiltSpeed = np.sign(-value) * math.exp(abs(value)*self.exponatialGain[1])*self.mapExponatialValue[1]
if abs(value) > self.thresholdMotorController[0] :
self.currentPos[1] += TiltSpeed
self.motorPos[1].data = self.currentPos[1]
# print("Motor speed: ", self.TiltCurrentPos)
if self.currentPos[1] < self.motorMaxLimitTilt and self.currentPos[1] > self.motorMinLimitTilt:
self.slaveTiltMotorPub.publish(self.motorPos[1])
elif abs(value) < self.thresholdMotorController[0] and abs(value) > self.thresholdMotorController[1]:
self.currentPos[1] -= value * 0.001
self.motorPos[1].data = self.currentPos[1]
# print("Motor speed: ", self.TiltCurrentPos)
self.slaveTiltMotorPub.publish(self.motorPos[1])
else:
# print ("Tilt Center Position")
state = True
return state
def convertROSToCV(self, data):
try:
cv_image = self.bridge.imgmsg_to_cv2(data, 'bgr8')
if self.ScaleDown:
return cv2.resize(cv_image, (self.imageSize[0], self.imageSize[1]))
else:
return cv_image
except CvBridgeError, e:
print e
def left_image_callback(self, data):
self.left_image = self.convertROSToCV(data)
def right_image_callback(self, data):
self.right_image = self.convertROSToCV(data)
def templateSizeCallBack(self, data):
# templateSize = data.data
# self.moveToZero(pan=5, tilt=10)
if self.ScaleDown > 0:
templateSize = [data.x/self.ScaleDown, data.y/self.ScaleDown]
else:
templateSize = [data.x, data.y]
if self.algorithmToUse == 'PNCC':
# self.fastMatchingPyramid.setTemplateSize(int((templateSize*self.scaleTemplate)/1.8))
self.fastMatchingPyramid.setTemplateSize2D(templateSize)
def computeTheCenterUsingDifferentAlgorithm(self, template,image):
centerPoint = None
if self.algorithmToUse == 'PNCC':
self.fastMatchingPyramid.createTemplate(template, self.imageSize/2)
# cv2.imshow("template image", self.fastMatchingPyramid.getTemplate())
# print("1")
_img, centerPoint = self.fastMatchingPyramid.trackObject(image)
# print("2")
cv2.imshow('Slave Camera', _img)
elif self.algorithmToUse == 'feature' or self.featueMatchingAlgorithmState:
Size = self.templateSize
template = template[self.imageSize[1]/2-Size:self.imageSize[1]/2+Size, self.imageSize[0]/2-Size:self.imageSize[0]/2+Size ]
# _img, centerPoint = self.trackingFeature.BruteForceMatchingwithSIFTDescriptorsandRatioTest(template, image)
_img, centerPoint = self.trackingFeature.algorithmDictionary[self.algorithmToUse](template, image) # kaze, FLANN, Brute
# _img, centerPoint = self.trackingFeature.kaze_match(template, image)
cv2.imshow('Slave Camera', _img)
return centerPoint
def trackObject(self, visualAttention = False):
if visualAttention:
self.motorCountForTerminate =0;
rate = rospy.Rate(60) # 10hz
cv2.setMouseCallback('Slave Camera', self.my_mouse_callback)
panMotorstate = None
tiltMotorState = None
while not rospy.is_shutdown():
rate.sleep()
# Publish the coordinate
self.drawTrackingSystem.calculateThePosition()
# print [x,y,z]
if self.left_image is not None and self.right_image is not None:
centerPoint = self.computeTheCenterUsingDifferentAlgorithm(self.left_image,self.right_image )
differences = self.calculateDifferences(centerPoint)
panMotorstate = self.moveMotor(differences[0])
if self.activeTilitController:
tiltMotorState = self.TiltMoveMotor(differences[1])
ikey = cv2.waitKey(3)
if ikey == ord('q'):
self.moveToZero()
exit()
elif ikey == ord('s'):
if self.algorithmToUse == 'PNCC':
# self.saveImage(self.fastMatchingPyramid.getTemplate())
pass
if self.terminateButton == 2 :
break
# if visualAttention == True:
boolState = Bool()
boolState.data = False
self.OnTargetPublisher.publish(boolState)
if panMotorstate and tiltMotorState:
self.motorCountForTerminate += 1
# print("count: " , self.motorCountForTerminate)
if self.motorCountForTerminate > 35:
# print('Target centered')
self.motorCountForTerminate = 0
boolState.data = True
self.OnTargetPublisher.publish(boolState)
# break
[x,y,z] = self.drawTrackingSystem.calculateThePosition()
return [x,y,z]
def calculateDifferences(self, centerPoint):
if centerPoint is not None:
return centerPoint - self.imageSize/2
else:
return np.array([0 , 0])
def buildPyramid(self, image, maxleval):
"""Build image pyramid for level [0,...,maxlevel]
"""
imgpyr = [image]
aux = image
for i in range(0,maxleval):
aux = cv2.pyrDown(aux)
imgpyr.append(aux)
imgpyr.reverse()
return imgpyr
| 41.457792 | 134 | 0.608662 |
19f9dec0a5382290dfe8ea07a917e0029022e9b7 | 2,249 | py | Python | jupyterlab/PreviewSearchPage.py | Larz60p/MakerProjectApril2019 | 2fd4d68aa66c1f4ad3b01f6a9589a078319280d7 | [
"MIT"
] | 1 | 2019-04-25T22:53:52.000Z | 2019-04-25T22:53:52.000Z | jupyterlab/PreviewSearchPage.py | Larz60p/MakerProjectApril2019 | 2fd4d68aa66c1f4ad3b01f6a9589a078319280d7 | [
"MIT"
] | null | null | null | jupyterlab/PreviewSearchPage.py | Larz60p/MakerProjectApril2019 | 2fd4d68aa66c1f4ad3b01f6a9589a078319280d7 | [
"MIT"
] | null | null | null | # PreviesSearchPage.py
from selenium import webdriver
from selenium.webdriver.common.by import By
from bs4 import BeautifulSoup
import BusinessPaths
import time
import PrettifyPage
import CreateDict
import json
import sys
class PreviewSearchPage:
def __init__(self):
self.bpath = BusinessPaths.BusinessPaths()
self.pp = PrettifyPage.PrettifyPage()
self.cd = CreateDict.CreateDict()
self.analyze_page()
def start_browser(self):
caps = webdriver.DesiredCapabilities().FIREFOX
caps["marionette"] = True
self.browser = webdriver.Firefox(capabilities=caps)
def stop_browser(self):
self.browser.close()
def save_page(self, filename):
soup = BeautifulSoup(self.browser.page_source, "lxml")
with filename.open('w') as fp:
fp.write(self.pp.prettify(soup, 2))
def analyze_page(self):
self.start_browser()
self.get_search_page('Andover')
self.stop_browser()
def get_search_page(self, searchitem):
# pick city with multiple pages
url = self.bpath.base_url
self.browser.get(url)
time.sleep(2)
print(f'Main Page URL: {self.browser.current_url}')
self.browser.find_element(By.XPATH, '/html/body/div[2]/div[4]/div/form/div/div/span[1]/select/option[3]').click()
searchbox = self.browser.find_element(By.XPATH, '//*[@id="query"]')
searchbox.clear()
searchbox.send_keys(searchitem)
self.browser.find_element(By.XPATH, '/html/body/div[2]/div[4]/div/form/div/div/span[3]/button').click()
time.sleep(2)
print(f'Results Page 1 URL: {self.browser.current_url}')
# get page 2
# find next page button and click
self.browser.find_element(By.XPATH, '/html/body/div[2]/div/div[2]/div[3]/div[2]/div/span[1]/a/icon').click()
time.sleep(2)
print(f'Results Page 2 URL: {self.browser.current_url}')
# Get url of a detail page
self.browser.find_element(By.XPATH, '/html/body/div[2]/div/div[2]/table/tbody/tr[1]/td[1]/a').click()
time.sleep(2)
print(f'Detail Page URL: {self.browser.current_url}')
if __name__ == '__main__':
PreviewSearchPage()
| 34.075758 | 121 | 0.649622 |
d78f98c344ff3704bf6b81bef097dee15b204be1 | 6,805 | py | Python | zeitsprung/scraping.py | munterfinger/zeitsprung | 8b1a539069cd0d5508b5ce419fc2ba8c26aaecf2 | [
"MIT"
] | null | null | null | zeitsprung/scraping.py | munterfinger/zeitsprung | 8b1a539069cd0d5508b5ce419fc2ba8c26aaecf2 | [
"MIT"
] | 109 | 2020-10-03T16:41:30.000Z | 2021-09-16T21:03:34.000Z | zeitsprung/scraping.py | munterfinger/zeitsprung | 8b1a539069cd0d5508b5ce419fc2ba8c26aaecf2 | [
"MIT"
] | null | null | null | from bs4 import BeautifulSoup
from datetime import datetime, timezone
from io import BytesIO
from json import loads
from pathlib import Path
from pydub import AudioSegment
from requests import get
from time import sleep
from typing import Union
from zeitsprung.base import Base
from zeitsprung.database import SQLiteEngine
class Scraper(Base):
"""Class for scraping and preprocessing the data from the 'www.zeitsprung.fm' website."""
def __init__(self, data_folder: str, update_interval: int = 24*60*60,
reset: bool = False, verbose: bool = True) -> None:
"""
Class constructor for the Scraper class.
Parameters
----------
data_folder : str
Folder to store the database and audio files. Is created or if existing, the files will bind to this.
update_interval : int, default 24*60*60
Interval to wait for updating after the last episode is fetched.
reset : bool, default False
Ignore and reset an existing database.?
verbose : bool, default True
Print messages about the activities conducted by a class instance.
"""
super().__init__(verbose)
self.created_at = datetime.now(timezone.utc).replace(microsecond=0).isoformat()
self.data_folder = Path(data_folder)
self.db = SQLiteEngine(self.data_folder / 'zeitsprung.db')
self.update_interval = update_interval
self.verbose = verbose
if (self.data_folder / 'zeitsprung.db').exists() and reset:
self._print(f"Overwriting existing directory structure in '{data_folder}'.")
Path(data_folder).mkdir(parents=True, exist_ok=True)
(Path(data_folder) / 'audio').mkdir(parents=True, exist_ok=True)
self.db.setup_schema()
elif (self.data_folder / 'zeitsprung.db').exists() and not reset:
self._print(f"Binding to existing directory structure in '{data_folder}'.")
else:
self._print(f"Creating directory structure in '{data_folder}'.")
Path(data_folder).mkdir(parents=True, exist_ok=True)
(Path(data_folder) / 'audio').mkdir(parents=True, exist_ok=True)
self.db.setup_schema()
self.current_episode = self.db.query_last_episode_id()
def __str__(self) -> str:
"""
Print function of the class.
Returns
-------
str
A string, which describes the class instance.
"""
return f"Scraper created at '{self.created_at}' with db connection to " \
f"'{self.db.db_file}', current episode is 'ZS{self.current_episode}'."
def get_episode_meta(self, i: int) -> Union[list, None]:
"""
Gets the episodes meta data (title, description, publication and modified at date) and stores it to the
database.
Parameters
----------
i : int
Number of the episode (i - 1).
Returns
-------
list:
List containing the meta data of the episode.
"""
url = f"https://www.zeitsprung.fm/podcast/zs{'0'+str(i) if i < 10 else str(i)}/"
self._print(f'Requesting meta data of episode {i}: {url}')
html_doc = get(url)
if html_doc.status_code == 200:
soup = BeautifulSoup(html_doc.content, 'html.parser')
script_content = loads(soup.find("script").contents[0])
title = soup.find('title').get_text(strip=True).split(":")
return [
i,
datetime.fromisoformat(self.search_key('datePublished', script_content['@graph'])),
datetime.fromisoformat(self.search_key('dateModified', script_content['@graph'])),
title[0],
title[1][1:],
soup.find("meta", {"property": "og:description"}).get('content'),
soup.find("meta", {"property": "og:url"}).get('content'),
None if soup.find("ul", {"class": "episode_download_list"}) is None else soup.find(
"ul", {"class": "episode_download_list"}).find_all('a')[0].get('href')
]
else: # html_doc.status_code == 404:
return None
@staticmethod
def search_key(key, dict_obj):
for entry in dict_obj:
if key in entry:
return entry[key]
def get_episode_audio(self, url: str) -> Union[AudioSegment, None]:
"""
Downloads the audio of a specified episode.
Parameters
----------
url : str
URL to download the audio from.
Returns
-------
AudioSegment:
The audio of the episode.
"""
if url is not None:
self._print(f"Fetching audio file from {url}")
audio_mp3 = BytesIO(get(url, allow_redirects=True).content)
audio = AudioSegment.from_file(audio_mp3)
return audio
else:
self._print('No audio file available for this episode.')
return None
def save_episode_audio(self, audio: AudioSegment, file_name: str) -> None:
"""
Save the audio file of an episode and as '.wav' file.
Parameters
----------
audio : AudioSegment
Audio file to save.
file_name : str
File name with path, where the file should be saved to.
Returns
-------
None
"""
self._print(f"Exporting audio sequence to file '{file_name}'")
audio.export(file_name, format="wav")
def run(self) -> None:
"""
Start the scraper, which will download the meta data and audio files of all not yet existing episodes in the
database.
Returns
-------
None
"""
while True:
meta_row = self.get_episode_meta(self.current_episode + 1)
if meta_row is not None:
self.db.insert_meta_row(meta_row)
audio = self.get_episode_audio(meta_row[7])
if audio is not None:
audio_row = [
self.current_episode + 1,
self.data_folder / 'audio' / f'{str(self.current_episode + 1).zfill(3)}.wav',
round(audio.duration_seconds),
audio.frame_rate,
audio.frame_width
]
self.save_episode_audio(audio, audio_row[1])
self.db.insert_audio_row(audio_row)
self.current_episode += 1
else:
self._print(f"Episode not yet published, pausing for {int(self.update_interval/(60*60))} hours.")
sleep(self.update_interval)
| 36.390374 | 116 | 0.574137 |
e5d9ec84f3a938f06ee5218780c541b0b6a8fb9a | 1,789 | py | Python | decrypt_oracle/setup.py | Alan01252/aws-encryption-sdk-python | 67cf41715b9841d374be07e39fd9ded9fb824280 | [
"Apache-2.0"
] | null | null | null | decrypt_oracle/setup.py | Alan01252/aws-encryption-sdk-python | 67cf41715b9841d374be07e39fd9ded9fb824280 | [
"Apache-2.0"
] | null | null | null | decrypt_oracle/setup.py | Alan01252/aws-encryption-sdk-python | 67cf41715b9841d374be07e39fd9ded9fb824280 | [
"Apache-2.0"
] | null | null | null | """API Gateway + Lambda decryption oracle using the AWS Encryption SDK for Python."""
import os
import re
from setuptools import find_packages, setup
VERSION_RE = re.compile(r"""__version__ = ['"]([0-9.]+)['"]""")
HERE = os.path.abspath(os.path.dirname(__file__))
def read(*args):
"""Read complete file contents."""
return open(os.path.join(HERE, *args)).read()
def get_version():
"""Read the version from this module."""
init = read("src", "aws_encryption_sdk_decrypt_oracle", "__init__.py")
return VERSION_RE.search(init).group(1)
def get_requirements():
"""Read the requirements file."""
requirements = read("requirements-actual.txt")
return [r for r in requirements.strip().splitlines()]
setup(
name="aws-encryption-sdk-decrypt-oracle",
packages=find_packages("src"),
package_dir={"": "src"},
version=get_version(),
author="Amazon Web Services",
maintainer="Amazon Web Services",
author_email="aws-cryptools@amazon.com",
url="https://github.com/awslabs/aws-encryption-sdk-python",
description="API Gateway + Lambda decryption oracle using the AWS Encryption SDK for Python",
long_description=read("README.rst"),
keywords="aws-encryption-sdk aws kms encryption",
license="Apache License 2.0",
install_requires=get_requirements(),
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Natural Language :: English",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: Implementation :: CPython",
"Topic :: Security",
"Topic :: Security :: Cryptography",
],
)
| 33.12963 | 97 | 0.667412 |
59270d72e76d7b6eb00ef832efda884d1036031a | 5,243 | py | Python | models/train_classifier.py | dakcicek/disaster_response | c1abd82a22fa8be912eea362664106c60acc4f3d | [
"MIT"
] | null | null | null | models/train_classifier.py | dakcicek/disaster_response | c1abd82a22fa8be912eea362664106c60acc4f3d | [
"MIT"
] | null | null | null | models/train_classifier.py | dakcicek/disaster_response | c1abd82a22fa8be912eea362664106c60acc4f3d | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# # ML Pipeline Preparation
import nltk
nltk.download('punkt')
nltk.download('wordnet')
import re
import sys
import pickle
import pandas as pd
import numpy as np
from sqlalchemy import create_engine
import nltk
nltk.download(['punkt', 'wordnet', 'stopwords'])
from nltk.corpus import stopwords
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.stem.porter import PorterStemmer
from nltk import word_tokenize, sent_tokenize
from sklearn.pipeline import Pipeline
from sklearn.multioutput import MultiOutputClassifier
from sklearn.metrics import precision_score, recall_score, f1_score
from sklearn.model_selection import learning_curve,GridSearchCV,RandomizedSearchCV
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.metrics import make_scorer
from time import time
from nltk.stem import SnowballStemmer
from string import punctuation
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
from sklearn.ensemble import AdaBoostClassifier
def load_data(database_filepath):
"""
This function loads data from given database path
and returns a dataframe
Input:
database_filepath: database file path
Output:
X: traing message list
Y: training target
category names
"""
# load data from database
engine = create_engine('sqlite:///'+ database_filepath)
df = pd.read_sql_table('DisasterResponse',engine)
# define features and target
X = df.message
y = df.iloc[:,4:]
category_names = list(df.columns[4:])
return X, y
def tokenize(text):
"""
Tokenization function to process the text data to normalize, lemmatize, and tokenize text.
Input: Text data
Output: List of clean tokens
"""
# remove punctations
text = ''.join([c for c in text if c not in punctuation])
#tokenize text
tokens = word_tokenize(text)
# initiate lemmatizer
lemmatizer = WordNetLemmatizer()
clean_tokens = []
for token in tokens:
# lemmatize, normalize case, and remove leading/trailing white space
clean_tok = lemmatizer.lemmatize(token).lower().strip()
clean_tokens.append(clean_tok)
return clean_tokens
def build_pipeline():
print('Building pipeline..')
"""
improved_pipeline = Pipeline([
('vect', CountVectorizer(tokenizer=tokenize, stop_words='english')),
('tfidf', TfidfTransformer(norm='l2')),
('clf', MultiOutputClassifier(RandomForestClassifier(random_state=42, n_estimators=60,max_depth=4,n_jobs=4,verbose=3 )))
])
return improved_pipeline
"""
# model pipeline
grid_search_pipeline = Pipeline([
('vect', CountVectorizer(tokenizer=tokenize, stop_words='english')),
('tfidf', TfidfTransformer(norm='l2')),
('clf', MultiOutputClassifier((AdaBoostClassifier())))
])
parameters = {
'clf__estimator__n_estimators' : [5,10,50],
'clf__estimator__learning_rate' : [0.5,1],
}
grid_search_clf = GridSearchCV(grid_search_pipeline, parameters)
return grid_search_clf
def evaluate_model(model, X_test, Y_test):
"""
Prints the classification report for the given model and test data
Input:
model: trained model
X_test: test data for the predication
Y_test: true test labels for the X_test data
Output:
None
"""
#Accuracy over all
y_pred = model.predict(X_test)
print('Overall Accuracy: {}'.format( accuracy_score(Y_test.iloc[:,1:].values, np.array([x[1:] for x in y_pred]))))
# Calculate the accuracy for each of them.
for i in range(len(Y_test.columns)):
print("Category Name:", Y_test.columns[i],"\n", classification_report(Y_test.iloc[:, i].values, y_pred[:, i]))
print('Accuracy of %25s: %.2f' %(Y_test.columns[i], accuracy_score(Y_test.iloc[:, i].values, y_pred[:,i])))
def save_model(model, model_file_name):
pickle.dump(model, open(model_file_name, 'wb'))
def main():
if len(sys.argv) == 3:
database_filepath, model_filepath = sys.argv[1:]
print('Loading data...\n DATABASE: {}'.format(database_filepath))
X, Y = load_data(database_filepath)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)
print('Building model...')
model = build_pipeline()
print('Training model...')
model.fit(X_train, Y_train)
print('Evaluating model...')
evaluate_model(model, X_test, Y_test)
print('Saving model...\n MODEL: {}'.format(model_filepath))
save_model(model, model_filepath)
print('Trained model saved!')
else:
print('Please provide the filepath of the disaster messages database '
'as the first argument and the filepath of the pickle file to '
'save the model to as the second argument. \n\nExample: python '
'train_classifier.py ../data/DisasterResponse.db classifier.pkl')
if __name__ == '__main__':
main()
| 32.565217 | 128 | 0.692924 |
12514602e85629e507f4571067c346e3dc161f50 | 6,641 | py | Python | utils/tag_util.py | LindgeW/BiaffineNER | 0ae179e9ff731362f6c8ba6d0b24485ad45e8bbf | [
"Apache-2.0"
] | 13 | 2020-10-24T08:09:27.000Z | 2022-03-04T02:56:50.000Z | utils/tag_util.py | LindgeW/BiaffineNER | 0ae179e9ff731362f6c8ba6d0b24485ad45e8bbf | [
"Apache-2.0"
] | 4 | 2021-04-10T10:06:37.000Z | 2022-03-08T10:25:53.000Z | utils/tag_util.py | LindgeW/BiaffineNER | 0ae179e9ff731362f6c8ba6d0b24485ad45e8bbf | [
"Apache-2.0"
] | 3 | 2020-10-26T06:38:17.000Z | 2021-08-19T18:29:49.000Z | '''
CWS / POS标签规范:
1、BI
2、BIS
3、BIES
NER标签规范:
1、BIO
2、BISO
3、BIESO
'''
def bi2bies(bi_tags):
tag_len = len(bi_tags)
for i, t in enumerate(bi_tags):
if t == 'B':
if i + 1 == tag_len or 'I' != bi_tags[i+1]:
bi_tags[i] = 'S'
elif t == 'I':
if i + 1 == tag_len or 'I' != bi_tags[i+1]:
bi_tags[i] = 'E'
return bi_tags
# BIO -> BIOES
def bio2bioes(bio_tags):
tag_len = len(bio_tags)
for i, t in enumerate(bio_tags):
if 'B-' in t and (i+1 == tag_len or 'I-' not in bio_tags[i+1]):
_type = bio_tags[i].split('-')[1]
bio_tags[i] = 'S-' + _type
elif 'I-' in t and (i+1 == tag_len or 'I-' not in bio_tags[i+1]):
_type = bio_tags[i].split('-')[1]
bio_tags[i] = 'E-' + _type
return bio_tags
# =============================CWS============================== #
# BI
def extract_cws_bi_span(tag_seq):
spans = []
s = 0
n = len(tag_seq)
start = False
for i, tag in enumerate(tag_seq):
if tag == 'B':
s = i
if i + 1 == n or tag_seq[i + 1] != 'I':
spans.append((s, i))
start = False
else:
start = True
elif tag == 'I':
if start:
if i + 1 == n or tag_seq[i + 1] != 'I':
spans.append((s, i))
start = False
else:
start = False
return spans
# BIS
def extract_cws_bis_span(tag_seq):
spans = []
s = 0
n = len(tag_seq)
start = False
for i, tag in enumerate(tag_seq):
if tag == 'S':
spans.append((i, i))
start = False
elif tag == 'B':
s = i
start = True
elif tag == 'I':
if start:
if i + 1 == n or tag_seq[i + 1] != 'I':
spans.append((s, i))
start = False
else:
start = False
return spans
# BIES / BMES
def extract_cws_bies_span(tag_seq):
spans = []
s = 0
start = False
for i, tag in enumerate(tag_seq):
if tag == 'S':
spans.append((i, i))
start = False
elif tag == 'B':
s = i
start = True
elif tag == 'E':
if start:
spans.append((s, i))
start = False
else:
if tag not in ['I', 'M']:
start = False
return spans
# =============================NER============================== #
def extract_ner_bio_span(tag_seq: list):
span_res = []
n = len(tag_seq)
s = 0
type_b = None
start = False
for i, tag in enumerate(tag_seq):
if tag == 'O':
start = False
elif tag.startswith('B-'):
s = i
type_b = tag.split('-')[1]
if i + 1 == n or not tag_seq[i+1].startswith('I-'):
span_res.append((s, i, type_b))
start = False
else:
start = True
elif tag.startswith('I-'):
if start and tag.split('-')[1] == type_b:
if i + 1 == n or not tag_seq[i+1].startswith('I-'):
span_res.append((s, i, type_b))
start = False
return span_res
def extract_ner_biso_span(tag_seq):
spans = []
s = 0
n = len(tag_seq)
start = False
type_b = None
for i, tag in enumerate(tag_seq):
if tag == 'O':
start = False
elif tag.startswith('S-'):
spans.append((i, i, tag.split('-')[1]))
start = False
elif tag.startswith('B-'):
s = i
start = True
type_b = tag.split('-')[1]
elif tag.startswith('I-'):
if start and type_b == tag.split('-')[1]:
if i + 1 == n or not tag_seq[i+1].startswith('I-'):
spans.append((s, i, type_b))
start = False
return spans
def extract_ner_bieso_span(tag_seq):
spans = []
s = 0
start = False
type_b = None
for i, tag in enumerate(tag_seq):
if tag == 'O':
start = False
elif tag.startswith('S-'):
spans.append((i, i, tag.split('-')[1]))
start = False
elif tag.startswith('B-'):
s = i
type_b = tag.split('-')[1]
start = True
elif tag.startswith('E-'):
if start and tag.split('-')[1] == type_b:
spans.append((s, i, type_b))
start = False
else:
if tag.split('-')[1] != type_b:
start = False
return spans
def seq_match(main_str, sub_strs: list):
N = len(main_str)
match_spans = set()
for sub_str in sub_strs:
L = len(sub_str)
for i in range(N - L + 1):
if main_str[i: i+L] == sub_str:
match_spans.add((i, i+L-1))
return match_spans
def span2tags(spans, seq_len, default_tag='O'):
'''
:param spans: [(s, e, cls), ...]
:param seq_len: sequence length
:param default_tag: default tag in sequence
:return:
'''
tags = [default_tag] * seq_len
for one_span in spans:
if len(one_span) == 3:
s, e, cls = one_span
cls = '-'+cls
elif len(one_span) == 2:
s, e = one_span
cls = ''
else:
raise ValueError
tags[s] = 'B' + cls
tags[e] = 'E' + cls
if s == e:
tags[s] = 'S' + cls
elif s < e:
tags[s+1: e] = ['I' + cls] * (e - s - 1)
else:
raise IndexError
return tags
def test_():
# x = 'I I S B S I B X I S B I I S S B I S S B I I'.split()
# x = 'I I B B X I B B I I B I B I B B I B I B I I'.split()
# x = 'B I E S B E S S B I I E B I S B E S E'.split()
# y = extract_cws_bi_span(x)
# y = extract_cws_bis_span(x)
# y = extract_cws_bies_span(x)
# x = 'O I-per S-org S-org I-per S-org B-loc I-loc E-loc O B-org E-org O B-per E-per B-loc E-per O S-LOC E-LOC'.split()
x = 'O B-PER I-PER I-LOC B-PER I-PER B-LOC B-ORG I-ORG O B-LOC I-LOC I-PER'.split()
y = extract_ner_bio_span(x)
# y = extract_ner_biso_span(x)
# y = extract_ner_bieso_span(x)
print(y)
spans = [(1, 2), (3, 3), (4, 6)]
tags = span2tags(spans, 10)
print(tags)
main_str = '我是一个目前在阿里巴巴实习的研究生,方向是NLP'
sub_str = ['阿里巴巴', 'NLP']
res = seq_match(main_str, sub_str)
tags = span2tags(res, len(main_str))
print(res)
print(tags)
| 26.043137 | 123 | 0.455504 |
1853d6c5b5a086a995ab5e8a253edec8d84dccfa | 4,032 | py | Python | mistral/rpc/kombu/kombu_listener.py | saadkhan44/mistral | 4d95c032c7dafbc1a0079af17424db1d9a5851c5 | [
"Apache-2.0"
] | 3 | 2015-08-28T04:57:56.000Z | 2017-03-27T10:59:56.000Z | mistral/rpc/kombu/kombu_listener.py | saadkhan44/mistral | 4d95c032c7dafbc1a0079af17424db1d9a5851c5 | [
"Apache-2.0"
] | 21 | 2015-04-14T22:41:53.000Z | 2019-02-20T09:30:10.000Z | mistral/rpc/kombu/kombu_listener.py | saadkhan44/mistral | 4d95c032c7dafbc1a0079af17424db1d9a5851c5 | [
"Apache-2.0"
] | 12 | 2015-08-14T02:27:37.000Z | 2020-12-31T10:09:21.000Z | # Copyright (c) 2016 Intel Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
from kombu.mixins import ConsumerMixin
import six
import threading
from oslo_log import log as logging
from mistral.rpc.kombu import base as kombu_base
LOG = logging.getLogger(__name__)
class KombuRPCListener(ConsumerMixin):
def __init__(self, connections, callback_queue):
self._results = {}
self._connections = itertools.cycle(connections)
self._callback_queue = callback_queue
self._thread = None
self.connection = six.next(self._connections)
self.ready = threading.Event()
def add_listener(self, correlation_id):
self._results[correlation_id] = six.moves.queue.Queue()
def remove_listener(self, correlation_id):
if correlation_id in self._results:
del self._results[correlation_id]
def get_consumers(self, Consumer, channel):
consumers = [Consumer(
self._callback_queue,
callbacks=[self.on_message],
accept=['pickle', 'json']
)]
self.ready.set()
return consumers
def start(self):
if self._thread is None:
self._thread = threading.Thread(target=self.run)
self._thread.daemon = True
self._thread.start()
def on_message(self, response, message):
"""Callback on response.
This method is automatically called when a response is incoming and
decides if it is the message we are waiting for - the message with the
result.
:param response: the body of the amqp message already deserialized
by kombu
:param message: the plain amqp kombu.message with additional
information
"""
LOG.debug("Got response: {0}".format(response))
try:
message.ack()
except Exception as e:
LOG.exception("Failed to acknowledge AMQP message: %s", e)
else:
LOG.debug("AMQP message acknowledged.")
correlation_id = message.properties['correlation_id']
queue = self._results.get(correlation_id)
if queue:
result = {
kombu_base.TYPE: 'error'
if message.properties.get('type') == 'error'
else None,
kombu_base.RESULT: response
}
queue.put(result)
else:
LOG.debug(
"Got a response, but seems like no process is waiting for"
"it [correlation_id={0}]".format(correlation_id)
)
def get_result(self, correlation_id, timeout):
return self._results[correlation_id].get(block=True, timeout=timeout)
def on_connection_error(self, exc, interval):
self.ready.clear()
self.connection = six.next(self._connections)
LOG.debug("Broker connection failed: %s", exc)
LOG.debug(
"Sleeping for %s seconds, then retrying connection",
interval
)
def wait_ready(self, timeout=10.0):
"""Waits for the listener to successfully declare the consumer
:param timeout: timeout for waiting in seconds
:return: same as :func:`~threading.Event.wait`
:rtype: bool
"""
if self.ready.wait(timeout=timeout):
return self.connection
else:
return False
| 31.5 | 78 | 0.62004 |
06712c8de3e40d29268f0df73a91cbf96d883c9a | 2,207 | py | Python | numpy_serial.py | berquist/gemm | 4eca3f6c4f20199b8d64d8cf78f926d79131f69f | [
"MIT"
] | null | null | null | numpy_serial.py | berquist/gemm | 4eca3f6c4f20199b8d64d8cf78f926d79131f69f | [
"MIT"
] | null | null | null | numpy_serial.py | berquist/gemm | 4eca3f6c4f20199b8d64d8cf78f926d79131f69f | [
"MIT"
] | null | null | null | from __future__ import division
from __future__ import print_function
import time
import numpy as np
def numpy_vec(scale, X, Y, result):
result = (scale * X) + Y
return
def numpy_loop(N, scale, X, Y, result):
for i in range(N):
result[i] = (scale * X[i]) + Y[i]
return
def main():
N = 2 * 1000 * 1000
num_run_times = 5
# penalty for using a double (np.float64)?
scale = 2.0
arrayX = np.empty(shape=(N), dtype=np.float32)
arrayY = np.empty(shape=(N), dtype=np.float32)
result = np.empty(shape=(N), dtype=np.float32)
print('Number of repeats: {}'.format(num_run_times))
# initialize array values
# ...do it the naive way for now
for i in range(N):
arrayX[i] = i
arrayY[i] = i
result[i] = 0.0
list_X = [i for i in range(N)]
list_Y = [i for i in range(N)]
list_r = [0.0 for i in range(N)]
min_numpy_vec = 1.0e30
for i in range(num_run_times):
start_time = time.time()
numpy_vec(scale, arrayX, arrayY, result)
end_time = time.time()
min_numpy_vec = min(min_numpy_vec, end_time - start_time)
print('[numpy vec]:\t\t{}'.format(min_numpy_vec))
# clear out the buffer
result[:] = 0.0
min_numpy_loop = 1.0e30
for i in range(num_run_times):
start_time = time.time()
numpy_loop(N, scale, arrayX, arrayY, result)
end_time = time.time()
min_numpy_loop = min(min_numpy_loop, end_time - start_time)
print('[numpy loop]:\t\t{}'.format(min_numpy_loop))
# clear out the buffer
result[:] = 0.0
min_list_loop = 1.0e30
for i in range(num_run_times):
start_time = time.time()
numpy_loop(N, scale, list_X, list_Y, list_r)
end_time = time.time()
min_list_loop = min(min_list_loop, end_time - start_time)
print('[list loop]:\t\t{}'.format(min_list_loop))
print('[numpy loop]/[numpy vec]:\t\t{}'.format(min_numpy_loop / min_numpy_vec))
print('[numpy loop]/[list loop]:\t\t{}'.format(min_numpy_loop / min_list_loop))
print('[list loop]/[numpy vec]:\t\t{}'.format(min_list_loop / min_numpy_vec))
return
if __name__ == '__main__':
main()
| 25.079545 | 83 | 0.615768 |
725c0b570f414f846ae3ba4da2e911dfdd4cd0e3 | 9,706 | py | Python | setup.py | roschler/mythril | 586b321a9468716946bf67592ac2be6c8502ed4c | [
"MIT"
] | null | null | null | setup.py | roschler/mythril | 586b321a9468716946bf67592ac2be6c8502ed4c | [
"MIT"
] | null | null | null | setup.py | roschler/mythril | 586b321a9468716946bf67592ac2be6c8502ed4c | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
from setuptools.command.install import install
import os
# Package version (vX.Y.Z). It must match git tag being used for CircleCI
# deployment; otherwise the build will failed.
VERSION = "v0.17.5"
class VerifyVersionCommand(install):
"""Custom command to verify that the git tag matches our version"""
description = 'verify that the git tag matches our version'
def run(self):
tag = os.getenv('CIRCLE_TAG')
if (tag != VERSION):
info = "Git tag: {0} does not match the version of this app: {1}".format(tag, VERSION)
sys.exit(info)
long_description = '''
Mythril is a security analysis tool for Ethereum smart contracts. It
uses concolic analysis to detect various types of issues. Use it to
analyze source code or as a nmap-style black-box blockchain scanner (an
"ethermap" if you will).
Installation and setup
----------------------
Install from Pypi:
.. code:: bash
$ pip install mythril
Or, clone the GitHub repo to install the newest master branch:
.. code:: bash
$ git clone https://github.com/b-mueller/mythril/
$ cd mythril
$ python setup.py install
Note that Mythril requires Python 3.5 to work.
Function signatures
~~~~~~~~~~~~~~~~~~~
Whenever you disassemble or analyze binary code, Mythril will try to
resolve function names using its local signature database. The database
must be provided at ``~/.mythril/signatures.json``. You can start out
with the `default file <signatures.json>`__ as follows:
::
$ cd ~/.mythril
$ wget https://raw.githubusercontent.com/b-mueller/mythril/master/signatures.json
When you analyze Solidity code, new function signatures are added to the
database automatically.
Security analysis
-----------------
Run ``myth -x`` with one of the input options described below to run the
analysis. This will run the Python modules in the
`/analysis/modules <https://github.com/b-mueller/mythril/tree/master/mythril/analysis/modules>`__
directory.
Mythril detects a range of `security issues <security_checks.md>`__,
including integer underflows, owner-overwrite-to-Ether-withdrawal, and
others. However, the analysis will not detect business logic issues and
is not equivalent to formal verification.
Analyzing Solidity code
~~~~~~~~~~~~~~~~~~~~~~~
In order to work with Solidity source code files, the `solc command line
compiler <http://solidity.readthedocs.io/en/develop/using-the-compiler.html>`__
needs to be installed and in path. You can then provide the source
file(s) as positional arguments, e.g.:
.. code:: bash
$ myth -x myContract.sol
Alternatively, compile the code on `Remix <http://remix.ethereum.org>`__
and pass the runtime binary code to Mythril:
.. code:: bash
$ myth -x -c "0x5060(...)"
If you have multiple interdependent contracts, pass them to Mythril as
separate input files. Mythril will map the first contract to address
"0x0000(..)", the second one to "0x1111(...)", and so forth (make sure
that contract addresses are set accordingly in the source). The contract
passed in the first argument will be executed as the "main" contract.
.. code:: bash
$ myth -x myContract.sol myLibrary.sol
Working with on-chain contracts
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
To analyze contracts on the blockchain you need an Ethereum node. By
default, Mythril will query a local node via RPC. Alternatively, you can
use `INFURA <https://infura.io>`__:
::
$ myth --infura-mainnet -x -a 0x5c436ff914c458983414019195e0f4ecbef9e6dd
If you are planning to do batch operations or use the contract search
features, running a
`go-ethereum <https://github.com/ethereum/go-ethereum>`__ node is
recommended. Start your local node as follows:
.. code:: bash
$ geth --rpc --rpcapi eth,debug --syncmode fast
Specify the target contract with the ``-a`` option:
.. code:: bash
$ myth -x -a 0x5c436ff914c458983414019195e0f4ecbef9e6dd -v1
Adding the ``-l`` flag will cause Mythril to automatically retrieve
dependencies, such as library contracts:
.. code:: bash
$ myth -x -a 0xEbFD99838cb0c132016B9E117563CB41f2B02264 -l -v1
Control flow graph
------------------
The ``-g FILENAME`` option generates an `interactive jsViz
graph <http://htmlpreview.github.io/?https://github.com/b-mueller/mythril/blob/master/static/mythril.html>`__:
.. code:: bash
$ myth -g ./graph.html -a 0xEbFD99838cb0c132016B9E117563CB41f2B02264 -l
.. figure:: https://raw.githubusercontent.com/b-mueller/mythril/master/static/callgraph7.png
:alt: Call graph
callgraph
[STRIKEOUT:The "bounce" effect, while awesome (and thus enabled by
default), sometimes messes up the graph layout.] Try adding the
``--enable-physics`` flag for a very entertaining "bounce" effect that
unfortunately completely destroys usability.
Blockchain exploration
----------------------
Mythril builds its own contract database to enable fast search
operations. This enables operations like those described in the
`legendary "Mitch Brenner" blog
post <https://medium.com/@rtaylor30/how-i-snatched-your-153-037-eth-after-a-bad-tinder-date-d1d84422a50b>`__
in [STRIKEOUT:seconds] minutes instead of days. Unfortunately, the
initial sync process is slow. You don't need to sync the whole
blockchain right away though: If you abort the syncing process with
``ctrl+c``, it will be auto-resumed the next time you run the
``--init-db`` command.
.. code:: bash
$ myth --init-db
Starting synchronization from latest block: 4323706
Processing block 4323000, 3 individual contracts in database
(...)
The default behavior is to only sync contracts with a non-zero balance.
You can disable this behavior with the ``--sync-all`` flag, but be aware
that this will result in a huge (as in: dozens of GB) database.
Searching from the command line
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The search feature allows you to find contract instances that contain
specific function calls and opcode sequences. It supports simple boolean
expressions, such as:
.. code:: bash
$ myth --search "func#changeMultisig(address)#"
$ myth --search "code#PUSH1 0x50,POP#"
$ myth --search "func#changeMultisig(address)# and code#PUSH1 0x50#"
Reading contract storage
~~~~~~~~~~~~~~~~~~~~~~~~
You can read the contents of storage slots from a deployed contract as
follows.
.. code:: bash
./myth --storage 0 -a "0x76799f77587738bfeef09452df215b63d2cfb08a"
0x0000000000000000000000000000000000000000000000000000000000000003
Utilities
---------
Disassembler
~~~~~~~~~~~~
Use the ``-d`` flag to disassemble code. The disassembler accepts a
bytecode string or a contract address as its input.
.. code:: bash
$ myth -d -c "0x6060"
0 PUSH1 0x60
Specifying an address via ``-a ADDRESS`` will download the contract code
from your node.
.. code:: bash
$ myth -d -a "0x2a0c0dbecc7e4d658f48e01e3fa353f44050c208"
0 PUSH1 0x60
2 PUSH1 0x40
4 MSTORE
(...)
1135 - FUNCTION safeAdd(uint256,uint256) -
1136 CALLVALUE
1137 ISZERO
Finding cross-references
~~~~~~~~~~~~~~~~~~~~~~~~
It is often useful to find other contracts referenced by a particular
contract. E.g.:
.. code:: bash
$ myth --search "code#DELEGATECALL#"
Matched contract with code hash 07459966443977122e639cbf7804c446
Address: 0x76799f77587738bfeef09452df215b63d2cfb08a, balance: 1000000000000000
$ myth --xrefs -a 0x76799f77587738bfeef09452df215b63d2cfb08a
5b9e8728e316bbeb692d22daaab74f6cbf2c4691
Calculating function hashes
~~~~~~~~~~~~~~~~~~~~~~~~~~~
To print the Keccak hash for a given function signature:
.. code:: bash
$ myth --hash "setOwner(address)"
0x13af4035
Credit
------
- JSON RPC library is adapted from
`ethjsonrpc <https://github.com/ConsenSys/ethjsonrpc>`__ (it doesn't
seem to be maintained anymore, and I needed to make some changes to
it).
- The signature data in ``signatures.json`` was initially obtained from
the `Ethereum Function Signature
Database <https://www.4byte.directory>`__.
'''
setup(
name='mythril',
version=VERSION[1:],
description='Security analysis tool for Ethereum smart contracts',
long_description=long_description,
url='https://github.com/b-mueller/mythril',
author='Bernhard Mueller',
author_email='bernhard.mueller11@gmail.com',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Topic :: Software Development :: Disassemblers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
keywords='hacking disassembler security ethereum',
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
install_requires=[
'ethereum>=2.3.0',
'ZODB>=5.3.0',
'z3-solver>=4.5',
'laser-ethereum>=0.17.5',
'requests',
'BTrees',
'py-solc',
'plyvel',
'pytest',
'eth_abi>=1.0.0',
'eth-utils>=1.0.1',
'eth-account>=0.1.0a2',
'eth-hash>=0.1.0',
'eth-keyfile>=0.5.1',
'eth-keys>=0.2.0b3',
'eth-rlp>=0.1.0',
'eth-tester>=0.1.0b21',
'coverage',
'jinja2',
'attrs',
'rlp<1.0.0'
],
python_requires='>=3.5',
extras_require={
},
include_package_data=True,
scripts=['myth'],
cmdclass = {
'verify': VerifyVersionCommand,
}
)
| 28.297376 | 110 | 0.69081 |
aa0d7e637ae90024f2569283fe896cec9a02645c | 145 | py | Python | src/common/rucio/lfn.py | ESCAPE-WP2/rucio-analysis | 81e6a7c9df790e5ec0833b21f30843f6ae906855 | [
"MIT"
] | 4 | 2021-04-14T06:29:27.000Z | 2022-03-23T17:38:16.000Z | src/common/rucio/lfn.py | ESCAPE-WP2/rucio-analysis | 81e6a7c9df790e5ec0833b21f30843f6ae906855 | [
"MIT"
] | 4 | 2020-10-06T17:43:49.000Z | 2021-04-07T16:45:18.000Z | src/common/rucio/lfn.py | ESCAPE-WP2/rucio-analysis | 81e6a7c9df790e5ec0833b21f30843f6ae906855 | [
"MIT"
] | 1 | 2021-07-22T20:46:15.000Z | 2021-07-22T20:46:15.000Z | class LFN():
def __init__(self, abspath):
self._abspath = abspath
@property
def abspath(self):
return self._abspath
| 18.125 | 32 | 0.613793 |
30d970d302bb42c9f0b6fee3fefedf099b849f4b | 1,064 | py | Python | pylearn2/utils/bit_strings.py | jacobpeplinskiV2/pylearn2 | 478903d84bfa2b317e78be4e0d78588c2f58496e | [
"BSD-3-Clause"
] | null | null | null | pylearn2/utils/bit_strings.py | jacobpeplinskiV2/pylearn2 | 478903d84bfa2b317e78be4e0d78588c2f58496e | [
"BSD-3-Clause"
] | null | null | null | pylearn2/utils/bit_strings.py | jacobpeplinskiV2/pylearn2 | 478903d84bfa2b317e78be4e0d78588c2f58496e | [
"BSD-3-Clause"
] | null | null | null | """Utilities for manipulating binary strings/masks."""
__author__ = "David Warde-Farley"
__copyright__ = "Copyright 2012, Universite de Montreal"
__credits__ = ["David Warde-Farley"]
__license__ = "3-clause BSD"
__email__ = "wardefar@iro"
__maintainer__ = "David Warde-Farley"
import numpy as np
from six.moves import xrange
def all_bit_strings(bits, dtype='uint8'):
"""
Create a matrix of all binary strings of a given width as the rows.
Parameters
----------
bits : int
The number of bits to count through.
dtype : str or dtype object
The dtype of the returned array.
Returns
-------
bit_strings : ndarray, shape (2 ** bits, bits)
The numbers from 0 to 2 ** bits - 1 as binary numbers, most
significant bit first.
Notes
-----
Obviously the memory requirements of this are exponential in the first
argument, so use with caution.
"""
return np.array([[int(x) for x in np.binary_repr(i, width=bits)]
for i in xrange(0, 2 ** bits)], dtype=dtype)
| 28 | 74 | 0.650376 |
1cf3113af9ae1fb20330e2b6dd79f573019fab4e | 2,482 | py | Python | products_and_services_client/models/currency.py | pitzer42/opbk-br-quickstart | b3f86b2e5f82a6090aaefb563614e174a452383c | [
"MIT"
] | 2 | 2021-02-07T23:58:36.000Z | 2021-02-08T01:03:25.000Z | products_and_services_client/models/currency.py | pitzer42/opbk-br-quickstart | b3f86b2e5f82a6090aaefb563614e174a452383c | [
"MIT"
] | null | null | null | products_and_services_client/models/currency.py | pitzer42/opbk-br-quickstart | b3f86b2e5f82a6090aaefb563614e174a452383c | [
"MIT"
] | null | null | null | # coding: utf-8
"""
API's OpenData do Open Banking Brasil
As API's descritas neste documento são referentes as API's da fase OpenData do Open Banking Brasil. # noqa: E501
OpenAPI spec version: 1.0.0-rc5.2
Contact: apiteam@swagger.io
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class Currency(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
}
attribute_map = {
}
def __init__(self): # noqa: E501
"""Currency - a model defined in Swagger""" # noqa: E501
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Currency, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Currency):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 29.2 | 117 | 0.555197 |
bfc6268b5ed6b41ceff83e9e92afb39a3b80d509 | 285 | wsgi | Python | application.wsgi | dangoldin/bots | f0f80edb97ad48ed25862d79385b019497df135d | [
"MIT"
] | 2 | 2016-06-29T04:02:49.000Z | 2018-11-26T02:48:49.000Z | application.wsgi | dangoldin/bots | f0f80edb97ad48ed25862d79385b019497df135d | [
"MIT"
] | null | null | null | application.wsgi | dangoldin/bots | f0f80edb97ad48ed25862d79385b019497df135d | [
"MIT"
] | null | null | null | import os, sys, logging
logging.basicConfig(stream=sys.stderr)
PROJECT_DIR = '/var/www/bots'
activate_this = os.path.join(PROJECT_DIR, 'bin', 'activate_this.py')
execfile(activate_this, dict(__file__=activate_this))
sys.path.append(PROJECT_DIR)
from bots import app as application
| 23.75 | 68 | 0.785965 |
a9786de68f0fb8651492a8f2146adf40a7baabe6 | 2,441 | py | Python | Intergovernmental/Internet/scripts/create-filtered-lists.py | public-law/datasets | 664c3bc3b7b4d4bb1e7001127fba7d321940ce77 | [
"CC-BY-4.0"
] | 1 | 2022-03-27T06:42:31.000Z | 2022-03-27T06:42:31.000Z | Intergovernmental/Internet/scripts/create-filtered-lists.py | public-law/datasets | 664c3bc3b7b4d4bb1e7001127fba7d321940ce77 | [
"CC-BY-4.0"
] | 1 | 2022-03-26T19:20:02.000Z | 2022-03-26T19:20:02.000Z | Intergovernmental/Internet/scripts/create-filtered-lists.py | public-law/datasets | 664c3bc3b7b4d4bb1e7001127fba7d321940ce77 | [
"CC-BY-4.0"
] | null | null | null | #!/usr/bin/env python
import json
import re
from typing import Pattern
INPUT_FILE = "../all-asns-org-names.json_lines"
EDU = re.compile(
r""" academy
| college
| \bedu
| \-esd
| institut
| library
| school
| schule
| suny
| universidad
| university """,
re.IGNORECASE | re.VERBOSE,
)
GOV = re.compile(
r""" administration
| agency
| authority
| board
| bureau
| city\Wof
| commission
| county
| court
| department
| \bDNIC\b
| gov
| justica
| justice
| ministerio
| ministry
| municipal
| police
| policia
| public\Wutilities\Wcommission
| revenue
| state\Wof
| territory
| tribunal
| united nations """,
re.IGNORECASE | re.VERBOSE,
)
MEDIA = re.compile(
r""" broadcasting
| journal
| \bmedia\b
| news
| \bpress
| publish
| reuters
| televis
| times
| verlag
| washpost
| zdf """,
re.IGNORECASE | re.VERBOSE,
)
NGO = re.compile(
r""" association
| \bassoc\b
| church
| committee
| credit\bunion
| foundation
| \bfund\b
| nonprofit
| non-profit
| society
| \bUN\b
| united\bnation """,
re.IGNORECASE | re.VERBOSE,
)
DATA_FILTERS = [
{"regex": EDU, "file_slug": "edu"},
{"regex": GOV, "file_slug": "gov"},
{"regex": MEDIA, "file_slug": "media"},
{"regex": NGO, "file_slug": "ngo"},
]
def json_line_to_tsv(line: str) -> str:
"""Produce the standard TSV format:
Country ASN Name + OrgName
"""
data = json.loads(line)
return f"{data['Country']}\t{data['ASNumber']}\t{data['ASName']} {data['OrgName']}".strip()
for data_filter in DATA_FILTERS:
regex: Pattern[str] = data_filter["regex"]
slug: str = data_filter["file_slug"]
output_file = f"../asn-{slug}-list.tsv"
with open(INPUT_FILE, "r", encoding="utf8") as f:
records = f.readlines()
matching_records = [r for r in records if regex.search(r)]
tsv_records = [json_line_to_tsv(j) for j in matching_records]
with open(output_file, "w", encoding="utf8") as f:
f.writelines([record + "\n" for record in tsv_records])
| 21.60177 | 95 | 0.523146 |
e62afce86729b04baeb4333a9d1f4846e57bb592 | 2,319 | py | Python | iis-3rdparty-madis/src/main/resources/eu/dnetlib/iis/3rdparty/scripts/madis/functions/aggregate/util.py | mpol/iis | fbf7129bbd131fbf824a0d3fc8a0afde367794e2 | [
"Apache-2.0"
] | 20 | 2015-09-19T21:17:23.000Z | 2022-03-01T10:37:59.000Z | iis-3rdparty-madis/src/main/resources/eu/dnetlib/iis/3rdparty/scripts/madis/functions/aggregate/util.py | mpol/iis | fbf7129bbd131fbf824a0d3fc8a0afde367794e2 | [
"Apache-2.0"
] | 1,054 | 2015-09-11T06:51:27.000Z | 2022-03-30T09:46:54.000Z | iis-3rdparty-madis/src/main/resources/eu/dnetlib/iis/3rdparty/scripts/madis/functions/aggregate/util.py | mpol/iis | fbf7129bbd131fbf824a0d3fc8a0afde367794e2 | [
"Apache-2.0"
] | 80 | 2015-12-09T12:41:52.000Z | 2022-02-16T11:46:42.000Z | __docformat__ = 'reStructuredText en'
class expandgroups:
"""
.. function:: expandgroups(args) -> args
Expands the contents of groups. Usefull for debugging group related queries.
Examples:
Directed graph:
>>> table1('''
... 1 2
... 2 3
... 1 4
... 2 5
... 3 3
... ''')
>>> sql("select expandgroups(a,b) from table1")
C1 | C2
-------
1 | 2
2 | 3
1 | 4
2 | 5
3 | 3
>>> sql("select expandgroups(a,b) as gr from table1")
gr1 | gr2
---------
1 | 2
2 | 3
1 | 4
2 | 5
3 | 3
>>> sql("select a,expandgroups(b) as gr from table1 group by a")
a | gr
------
1 | 2
1 | 4
2 | 3
2 | 5
3 | 3
"""
registered=True
def __init__(self):
self.rows=[]
def step(self, *args):
self.rows.append(args)
def final(self):
yield tuple(('C'+str(x) for x in xrange(1,len(self.rows[0])+1)))
for r in self.rows:
yield r
class showgroups:
"""
.. function:: showgroups(args) -> string
Shows the contents of groups. Usefull for debugging group related queries.
Examples:
Directed graph:
>>> table1('''
... 1 2
... 2 3
... 1 4
... 2 5
... 3 3
... ''')
>>> sql("select showgroups(a,b) from table1") # doctest: +NORMALIZE_WHITESPACE
showgroups(a,b)
--------------------
<BLANKLINE>
1 2
2 3
1 4
2 5
3 3
>>> sql("select showgroups(b) as gr from table1 group by a")
gr
----
<BLANKLINE>
2
4
<BLANKLINE>
3
5
<BLANKLINE>
3
"""
registered=True
def __init__(self):
self.rows=[]
def step(self, *args):
self.rows.append(args)
def final(self):
return '\n'+'\n'.join(['\t'.join([unicode(x) for x in r]) for r in self.rows])
if not ('.' in __name__):
"""
This is needed to be able to test the function, put it at the end of every
new function you create
"""
import sys
import setpath
from functions import *
testfunction()
if __name__ == "__main__":
reload(sys)
sys.setdefaultencoding('utf-8')
import doctest
doctest.testmod()
| 17.70229 | 86 | 0.490298 |
6a0d7571c975ed90297c5548a8e3a9bc0f83406d | 1,080 | py | Python | tests/zad2_ParameterizedPackage.py | cati97/laboratorium-7-cati97 | 0eaa3ca43a8868a96f32bfe9462e441b56d694fc | [
"MIT"
] | null | null | null | tests/zad2_ParameterizedPackage.py | cati97/laboratorium-7-cati97 | 0eaa3ca43a8868a96f32bfe9462e441b56d694fc | [
"MIT"
] | null | null | null | tests/zad2_ParameterizedPackage.py | cati97/laboratorium-7-cati97 | 0eaa3ca43a8868a96f32bfe9462e441b56d694fc | [
"MIT"
] | 1 | 2020-11-17T16:37:32.000Z | 2020-11-17T16:37:32.000Z | import unittest
from src.sample.zad2 import *
from nose.tools import assert_equal
from parameterized import parameterized, parameterized_class
@parameterized([
(1, 'I'),
(2, 'II'),
(3, 'III'),
(4, 'IV'),
(5, 'V'),
(6, 'VI'),
(9, 'IX'),
(10, 'X'),
])
def test_first_method(input, expected):
assert_equal(roman(input), expected)
class RomanParameterizedPackage(unittest.TestCase):
@parameterized.expand([
("small", 27, 'XXVII'),
("medium", 163, 'CLXIII'),
("large", 911, 'CMXI'),
])
def test_expand(self, name, input, expected):
self.assertEqual(roman(input), expected)
@parameterized_class(('number', 'expected'), [
(48, "XLVIII"),
(49, "XLIX"),
(59, "LIX"),
(93, "XCIII"),
(141, "CXLI"),
(402, "CDII"),
(575, "DLXXV"),
(1024, "MXXIV"),
(3000, "MMM"),
])
class RomanParameterizedPackage2(unittest.TestCase):
def test_other_parameterized(self):
self.assertEqual(roman(self.number), self.expected)
if __name__ == '__main__':
unittest.main()
| 21.176471 | 60 | 0.593519 |
41d18c2267ff48dbc29b9e8be1174d8b90b29038 | 4,307 | py | Python | trajopt/envs/quanser/qube/ctrl.py | JoeMWatson/trajopt | 8b98718721e0c373cd7dc01a35f42447c1134713 | [
"MIT"
] | 1 | 2019-10-17T08:42:17.000Z | 2019-10-17T08:42:17.000Z | trajopt/envs/quanser/qube/ctrl.py | JoeMWatson/trajopt | 8b98718721e0c373cd7dc01a35f42447c1134713 | [
"MIT"
] | null | null | null | trajopt/envs/quanser/qube/ctrl.py | JoeMWatson/trajopt | 8b98718721e0c373cd7dc01a35f42447c1134713 | [
"MIT"
] | null | null | null | import autograd.numpy as np
from trajopt.envs.quanser.qube.base import QubeDynamics
class PDCtrl:
"""
Slightly tweaked PD controller (increases gains if `x_des` not reachable).
Accepts `th_des` and drives Qube to `x_des = (th_des, 0.0, 0.0, 0.0)`
Flag `done` is set when `|x_des - x| < tol`.
Tweak: increase P-gain on `th` if velocity is zero but the goal is still
not reached (useful for counteracting resistance from the power cord).
"""
def __init__(self, K=None, th_des=0.0, tol=5e-2):
self.done = False
self.K = K if K is not None else [5.0, 0.0, 0.5, 0.0]
self.th_des = th_des
self.tol = tol
def __call__(self, x):
th, al, thd, ald = x
K, th_des, tol = self.K, self.th_des, self.tol
all_but_th_squared = al ** 2 + thd ** 2 + ald ** 2
err = np.sqrt((th_des - th) ** 2 + all_but_th_squared)
if not self.done and err < tol:
self.done = True
elif th_des and np.sqrt(all_but_th_squared) < tol / 5.0:
# Increase P-gain on `th` when struggling to reach `th_des`
K[0] += 0.01 * K[0]
return np.array([K[0]*(th_des - th) - K[1]*al - K[2]*thd - K[3]*ald])
class GoToLimCtrl:
"""Go to joint limits by applying `u_max`; save limit value in `th_lim`."""
def __init__(self, positive=True):
self.done = False
self.th_lim = 10.0
self.sign = 1 if positive else -1
self.u_max = 1.0
self.cnt = 0
self.cnt_done = 200
def __call__(self, x):
th, _, thd, _ = x
if np.abs(th - self.th_lim) > 0:
self.cnt = 0
self.th_lim = th
else:
self.cnt += 1
self.done = self.cnt == self.cnt_done
return np.array([self.sign * self.u_max])
class CalibrCtrl:
"""Go to joint limits, find midpoint, go to the midpoint."""
def __init__(self):
self.done = False
self.go_right = GoToLimCtrl(positive=True)
self.go_left = GoToLimCtrl(positive=False)
self.go_center = PDCtrl()
def __call__(self, x):
u = np.array([0.0])
if not self.go_right.done:
u = self.go_right(x)
elif not self.go_left.done:
u = self.go_left(x)
elif not self.go_center.done:
if self.go_center.th_des == 0.0:
self.go_center.th_des = \
(self.go_left.th_lim + self.go_right.th_lim) / 2
u = self.go_center(x)
elif not self.done:
self.done = True
return u
class EnergyCtrl:
"""PD controller on energy."""
def __init__(self, Er, mu, a_max):
self.Er = Er # reference energy (J)
self.mu = mu # P-gain on the energy (m/s/J)
self.a_max = a_max # max acceleration of the pendulum pivot (m/s^2)
self._dyn = QubeDynamics() # dynamics parameters of the robot
def __call__(self, x):
_, al, _, ald = x
Jp = self._dyn.Mp * self._dyn.Lp ** 2 / 12
Ek = 0.5 * Jp * ald ** 2
Ep = 0.5 * self._dyn.Mp * self._dyn.g * self._dyn.Lp * (1. - np.cos(al + np.pi))
E = Ek + Ep
acc = np.clip(self.mu * (E - self.Er) * np.sign(ald * np.cos(al + np.pi)),
-self.a_max, self.a_max)
trq = self._dyn.Mr * self._dyn.Lr * acc
voltage = self._dyn.Rm / self._dyn.km * trq
return np.array([voltage])
class SwingUpCtrl:
"""Hybrid controller (EnergyCtrl, PDCtrl) switching based on alpha."""
def __init__(self, ref_energy, energy_gain, acc_max,
alpha_max_pd_enable=20.0, pd_gain=None):
# Set up the energy pumping controller
self.en_ctrl = EnergyCtrl(ref_energy, energy_gain, acc_max)
# Set up the PD controller
cos_al_delta = np.cos(2. * np.pi - np.deg2rad(alpha_max_pd_enable))
self.pd_enabled = lambda cos_al: cos_al > cos_al_delta
pd_gain = pd_gain if pd_gain is not None else [-1.5, 25.0, -1.5, 2.5]
self.pd_ctrl = PDCtrl(K=pd_gain)
def __call__(self, obs):
th, al, th_d, al_d = obs
if self.pd_enabled(np.cos(al)):
obs[1] = (2. * np.pi * al) if al > np.pi else al
return self.pd_ctrl(obs)
else:
return self.en_ctrl(obs)
| 33.648438 | 88 | 0.565823 |
7d3d8d37b2a9b590f7db08e40c9c28ababb1383e | 2,734 | py | Python | sub_rgbd_and_cloud.py | felixchenfy/ros_pub_and_sub_rgbd_and_cloud | 28e481689e81a08c491f859a78c0058c86d4ebee | [
"MIT"
] | 8 | 2019-12-23T09:42:38.000Z | 2021-09-06T20:49:30.000Z | sub_rgbd_and_cloud.py | felixchenfy/ros_pub_and_sub_rgbd_and_cloud | 28e481689e81a08c491f859a78c0058c86d4ebee | [
"MIT"
] | null | null | null | sub_rgbd_and_cloud.py | felixchenfy/ros_pub_and_sub_rgbd_and_cloud | 28e481689e81a08c491f859a78c0058c86d4ebee | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
This script gives exampe code for subscribing:
(1) Color image.
(2) Depth image.
(3) Camera info.
(4) Point cloud (subscribed as open3d format).
'''
from utils.lib_ros_rgbd_pub_and_sub import ColorImageSubscriber, DepthImageSubscriber, CameraInfoSubscriber
from utils.lib_ros_point_cloud_pub_and_sub import PointCloudSubscriber
from utils.lib_rgbd import MyCameraInfo
import numpy as np
import rospy
# -- Set ROS topic names for subscribing.
NS = "test_data/" # ROS topic namespace.
COLOR_TOPIC_NAME = NS + "color"
DEPTH_TOPIC_NAME = NS + "depth"
CAMERA_INFO_TOPIC_NAME = NS + "camera_info"
CLOUD_TOPIC_NAME = NS + "point_cloud"
# -- Subscribe data and print.
def main():
# -- Set subscribers.
sub_color = ColorImageSubscriber(COLOR_TOPIC_NAME)
sub_depth = DepthImageSubscriber(DEPTH_TOPIC_NAME)
sub_camera_info = CameraInfoSubscriber(CAMERA_INFO_TOPIC_NAME)
sub_cloud = PointCloudSubscriber(CLOUD_TOPIC_NAME)
# -- Loop and subscribe.
cnt_1, cnt_2, cnt_3, cnt_4 = 0, 0, 0, 0 # color, depth, camera_info, cloud
while not rospy.is_shutdown():
# Color.
if sub_color.has_image():
color = sub_color.get_image()
cnt_1 += 1
rospy.loginfo("Subscribe {}: color image, "
"shape={}".format(
cnt_1, color.shape))
# Depth.
if sub_depth.has_image():
depth = sub_depth.get_image()
cnt_2 += 1
rospy.loginfo("Subscribe {}: depth image, "
"shape={}".format(
cnt_2, depth.shape))
# Camera_info.
if sub_camera_info.has_camera_info():
ros_camera_info = sub_camera_info.get_camera_info()
cnt_3 += 1
rospy.loginfo("Subscribe {}: camera_info, "
"fx={}, fy={}.".format(
cnt_3,
ros_camera_info.K[0],
ros_camera_info.K[4],
))
my_camera_info = MyCameraInfo(ros_camera_info=ros_camera_info)
# Point_cloud.
if sub_cloud.has_cloud():
open3d_cloud = sub_cloud.get_cloud()
cnt_4 += 1
num_points = np.asarray(open3d_cloud.points).shape[0]
rospy.loginfo("Subscribe {}: point cloud, "
"{} points.".format(
cnt_4, num_points))
rospy.sleep(0.1)
if __name__ == '__main__':
node_name = "sub_rgbd_and_cloud"
rospy.init_node(node_name)
main()
rospy.logwarn("Node `{}` stops.".format(node_name))
| 32.164706 | 107 | 0.581931 |
d33333631f3719c0dc35826c097597615bc54dcc | 31,634 | py | Python | tensor2tensor/utils/decoding.py | YunseokJANG/tensor2tensor | 2451614b930c73b2b8dd891b4fc5838d99a151a6 | [
"Apache-2.0"
] | 1 | 2021-07-13T17:37:11.000Z | 2021-07-13T17:37:11.000Z | tensor2tensor/utils/decoding.py | YunseokJANG/tensor2tensor | 2451614b930c73b2b8dd891b4fc5838d99a151a6 | [
"Apache-2.0"
] | null | null | null | tensor2tensor/utils/decoding.py | YunseokJANG/tensor2tensor | 2451614b930c73b2b8dd891b4fc5838d99a151a6 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Decoding utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import operator
import os
import re
import time
import numpy as np
import six
from six.moves import input # pylint: disable=redefined-builtin
from tensor2tensor.data_generators import problem as problem_lib
from tensor2tensor.data_generators import text_encoder
from tensor2tensor.data_generators import text_problems
from tensor2tensor.utils import mlperf_log
from tensor2tensor.utils import registry
import tensorflow as tf
FLAGS = tf.flags.FLAGS
# Number of samples to draw for an image input (in such cases as captioning)
IMAGE_DECODE_LENGTH = 100
def decode_hparams(overrides=""):
"""Hyperparameters for decoding."""
hp = tf.contrib.training.HParams(
save_images=False,
log_results=True,
extra_length=100,
batch_size=0,
beam_size=4,
alpha=0.6,
eos_penalty=0.0,
block_size=0,
guess_and_check_top_k=0,
guess_and_check_epsilon=-1,
return_beams=False,
write_beam_scores=False,
max_input_size=-1,
identity_output=False,
num_samples=-1,
delimiter="\n",
decode_to_file=None,
decode_in_memory=False,
summaries_log_dir="decode", # Directory to write hook summaries.
shards=1, # How many shards of data to decode (treating 1 as None).
shard_id=0, # Which shard are we decoding if more than 1 above.
shards_start_offset=0, # Number of the first shard to decode.
num_decodes=1,
force_decode_length=False,
display_decoded_images=False,
# Multi-problem decoding task id.
multiproblem_task_id=-1,
# Used for video decoding.
frames_per_second=10,
skip_eos_postprocess=False,
# Creates a blue/red border covering border_percent of the frame.
border_percent=2,
# Maximum number of videos displayed.
# Total number of videos are max_display_outputs * num_decodes
max_display_outputs=10,
# Used in computation of VGG feature based video metrics.
# Set this to be the path to a trained VGG ckpt to output
# useful metrics.
vgg_ckpt_path=None,
# Used for MLPerf compliance logging.
mlperf_decode_step=0.0,
mlperf_threshold=25.0,
mlperf_success=False)
hp.parse(overrides)
return hp
def log_decode_results(inputs,
outputs,
problem_name,
prediction_idx,
inputs_vocab,
targets_vocab,
targets=None,
save_images=False,
output_dir=None,
identity_output=False,
log_results=True):
"""Log inference results."""
# TODO(lukaszkaiser) refactor this into feature_encoder
is_video = "video" in problem_name or "gym" in problem_name
if is_video:
def fix_and_save_video(vid, prefix):
save_path_template = os.path.join(
output_dir,
"%s_%s_%05d_{:05d}.png" % (problem_name, prefix, prediction_idx))
# this is only required for predictions
if vid.shape[-1] == 1:
vid = np.squeeze(vid, axis=-1)
save_video(vid, save_path_template)
tf.logging.info("Saving video: {}".format(prediction_idx))
fix_and_save_video(inputs, "inputs")
fix_and_save_video(outputs, "outputs")
fix_and_save_video(targets, "targets")
is_image = "image" in problem_name
is_text2class = isinstance(registry.problem(problem_name),
text_problems.Text2ClassProblem)
skip_eos_postprocess = is_image or is_text2class
decoded_inputs = None
if is_image and save_images:
save_path = os.path.join(
output_dir, "%s_prediction_%d.jpg" % (problem_name, prediction_idx))
show_and_save_image(inputs / 255., save_path)
elif inputs is not None and inputs_vocab:
if identity_output:
decoded_inputs = " ".join(map(str, inputs.flatten()))
else:
decoded_inputs = inputs_vocab.decode(_save_until_eos(
inputs, skip_eos_postprocess))
if log_results and not is_video:
tf.logging.info("Inference results INPUT: %s" % decoded_inputs)
decoded_targets = None
decoded_outputs = None
if identity_output:
decoded_outputs = " ".join(map(str, outputs.flatten()))
if targets is not None:
decoded_targets = " ".join(map(str, targets.flatten()))
else:
decoded_outputs = targets_vocab.decode(_save_until_eos(
outputs, skip_eos_postprocess))
if targets is not None and log_results:
decoded_targets = targets_vocab.decode(_save_until_eos(
targets, skip_eos_postprocess))
if log_results and not is_video:
tf.logging.info("Inference results OUTPUT: %s" % decoded_outputs)
if targets is not None and log_results and not is_video:
tf.logging.info("Inference results TARGET: %s" % decoded_targets)
return decoded_inputs, decoded_outputs, decoded_targets
def decode_from_dataset(estimator,
problem_name,
hparams,
decode_hp,
decode_to_file=None,
dataset_split=None,
checkpoint_path=None):
"""Perform decoding from dataset."""
tf.logging.info("Performing local inference from dataset for %s.",
str(problem_name))
# We assume that worker_id corresponds to shard number.
shard = decode_hp.shard_id if decode_hp.shards > 1 else None
# Setup decode output directory for any artifacts that may be written out
output_dir = os.path.join(estimator.model_dir, "decode")
tf.gfile.MakeDirs(output_dir)
# If decode_hp.batch_size is specified, use a fixed batch size
if decode_hp.batch_size:
hparams.batch_size = decode_hp.batch_size
hparams.use_fixed_batch_size = True
dataset_kwargs = {
"shard": shard,
"dataset_split": dataset_split,
"max_records": decode_hp.num_samples
}
# Build the inference input function
problem = hparams.problem
infer_input_fn = problem.make_estimator_input_fn(
tf.estimator.ModeKeys.PREDICT, hparams, dataset_kwargs=dataset_kwargs)
predictions, output_dirs = [], []
for decode_id in range(decode_hp.num_decodes):
tf.logging.info("Decoding {}".format(decode_id))
# Create decode directory if not in-memory decoding.
if not decode_hp.decode_in_memory:
output_dir = os.path.join(estimator.model_dir, "decode_%05d" % decode_id)
tf.gfile.MakeDirs(output_dir)
output_dirs.append(output_dir)
result = decode_once(estimator,
problem_name,
hparams,
infer_input_fn,
decode_hp,
decode_to_file,
output_dir,
log_results=not decode_hp.decode_in_memory,
checkpoint_path=checkpoint_path)
if decode_hp.decode_in_memory:
output_dirs = [output_dir]
predictions.append(result)
if decode_hp.decode_to_file:
decode_hp.decode_to_file = _decode_filename(
decode_hp.decode_to_file, problem_name, decode_hp)
run_postdecode_hooks(DecodeHookArgs(
estimator=estimator,
problem=problem,
output_dirs=output_dirs,
hparams=hparams,
decode_hparams=decode_hp,
predictions=predictions
), dataset_split)
return predictions
def decode_once(estimator,
problem_name,
hparams,
infer_input_fn,
decode_hp,
decode_to_file,
output_dir,
log_results=True,
checkpoint_path=None):
"""Decodes once."""
# Get the predictions as an iterable
predictions = estimator.predict(infer_input_fn,
checkpoint_path=checkpoint_path)
if not log_results:
return list(predictions)
# Prepare output file writers if decode_to_file passed
decode_to_file = decode_to_file or decode_hp.decode_to_file
if decode_to_file:
output_filepath = _decode_filename(decode_to_file, problem_name, decode_hp)
parts = output_filepath.split(".")
parts[-1] = "targets"
target_filepath = ".".join(parts)
parts[-1] = "inputs"
input_filepath = ".".join(parts)
output_file = tf.gfile.Open(output_filepath, "w")
target_file = tf.gfile.Open(target_filepath, "w")
input_file = tf.gfile.Open(input_filepath, "w")
problem_hparams = hparams.problem_hparams
# Inputs vocabulary is set to targets if there are no inputs in the problem,
# e.g., for language models where the inputs are just a prefix of targets.
has_input = "inputs" in problem_hparams.vocabulary
inputs_vocab_key = "inputs" if has_input else "targets"
inputs_vocab = problem_hparams.vocabulary[inputs_vocab_key]
targets_vocab = problem_hparams.vocabulary["targets"]
num_eval_samples = 0
for num_predictions, prediction in enumerate(predictions):
num_eval_samples += 1
num_predictions += 1
inputs = prediction.get("inputs")
targets = prediction.get("targets")
outputs = prediction.get("outputs")
# Log predictions
decoded_outputs = []
decoded_scores = []
if decode_hp.return_beams:
output_beams = np.split(outputs, decode_hp.beam_size, axis=0)
scores = None
if "scores" in prediction:
scores = np.split(prediction["scores"], decode_hp.beam_size, axis=0)
for i, beam in enumerate(output_beams):
tf.logging.info("BEAM %d:" % i)
score = scores and scores[i]
decoded = log_decode_results(
inputs,
beam,
problem_name,
num_predictions,
inputs_vocab,
targets_vocab,
save_images=decode_hp.save_images,
output_dir=output_dir,
identity_output=decode_hp.identity_output,
targets=targets,
log_results=decode_hp.log_results)
decoded_outputs.append(decoded)
if decode_hp.write_beam_scores:
decoded_scores.append(score)
else:
decoded = log_decode_results(
inputs,
outputs,
problem_name,
num_predictions,
inputs_vocab,
targets_vocab,
save_images=decode_hp.save_images,
output_dir=output_dir,
identity_output=decode_hp.identity_output,
targets=targets,
log_results=decode_hp.log_results)
decoded_outputs.append(decoded)
# Write out predictions if decode_to_file passed
if decode_to_file:
for i, (d_input, d_output, d_target) in enumerate(decoded_outputs):
# Skip if all padding
if d_input and re.match("^({})+$".format(text_encoder.PAD), d_input):
continue
beam_score_str = ""
if decode_hp.write_beam_scores:
beam_score_str = "\t%.2f" % decoded_scores[i]
output_file.write(str(d_output) + beam_score_str + decode_hp.delimiter)
target_file.write(str(d_target) + decode_hp.delimiter)
input_file.write(str(d_input) + decode_hp.delimiter)
if (decode_hp.num_samples >= 0 and
num_predictions >= decode_hp.num_samples):
break
mlperf_log.transformer_print(key=mlperf_log.EVAL_SIZE,
value=num_eval_samples,
hparams=hparams)
if decode_to_file:
output_file.close()
target_file.close()
input_file.close()
def decode_from_file(estimator,
filename,
hparams,
decode_hp,
decode_to_file=None,
checkpoint_path=None):
"""Compute predictions on entries in filename and write them out."""
if not decode_hp.batch_size:
decode_hp.batch_size = 32
tf.logging.info(
"decode_hp.batch_size not specified; default=%d" % decode_hp.batch_size)
# Inputs vocabulary is set to targets if there are no inputs in the problem,
# e.g., for language models where the inputs are just a prefix of targets.
p_hp = hparams.problem_hparams
has_input = "inputs" in p_hp.vocabulary
inputs_vocab_key = "inputs" if has_input else "targets"
inputs_vocab = p_hp.vocabulary[inputs_vocab_key]
targets_vocab = p_hp.vocabulary["targets"]
problem_name = FLAGS.problem
filename = _add_shard_to_filename(filename, decode_hp)
tf.logging.info("Performing decoding from file (%s)." % filename)
sorted_inputs, sorted_keys = _get_sorted_inputs(filename, decode_hp.delimiter)
num_decode_batches = (len(sorted_inputs) - 1) // decode_hp.batch_size + 1
if estimator.config.use_tpu:
length = getattr(hparams, "length", hparams.max_length)
batch_ids = []
for line in sorted_inputs:
ids = inputs_vocab.encode(line.strip()) + [1]
if len(ids) < length:
ids.extend([0] * (length - len(ids)))
else:
ids = ids[:length]
batch_ids.append(ids)
np_ids = np.array(batch_ids, dtype=np.int32)
def input_fn(params):
batch_size = params["batch_size"]
dataset = tf.data.Dataset.from_tensor_slices({"inputs": np_ids})
dataset = dataset.map(
lambda ex: {"inputs": tf.reshape(ex["inputs"], (length, 1, 1))})
dataset = dataset.batch(batch_size)
return dataset
else:
def input_fn():
input_gen = _decode_batch_input_fn(
num_decode_batches, sorted_inputs,
inputs_vocab, decode_hp.batch_size,
decode_hp.max_input_size, task_id=decode_hp.multiproblem_task_id)
gen_fn = make_input_fn_from_generator(input_gen)
example = gen_fn()
return _decode_input_tensor_to_features_dict(example, hparams)
decodes = []
result_iter = estimator.predict(input_fn, checkpoint_path=checkpoint_path)
start_time = time.time()
total_time_per_step = 0
total_cnt = 0
def timer(gen):
while True:
try:
start_time = time.time()
item = next(gen)
elapsed_time = time.time() - start_time
yield elapsed_time, item
except StopIteration:
break
for elapsed_time, result in timer(result_iter):
if decode_hp.return_beams:
beam_decodes = []
beam_scores = []
output_beams = np.split(result["outputs"], decode_hp.beam_size, axis=0)
scores = None
if "scores" in result:
if np.isscalar(result["scores"]):
result["scores"] = result["scores"].reshape(1)
scores = np.split(result["scores"], decode_hp.beam_size, axis=0)
for k, beam in enumerate(output_beams):
tf.logging.info("BEAM %d:" % k)
score = scores and scores[k]
_, decoded_outputs, _ = log_decode_results(
result["inputs"],
beam,
problem_name,
None,
inputs_vocab,
targets_vocab,
log_results=decode_hp.log_results)
beam_decodes.append(decoded_outputs)
if decode_hp.write_beam_scores:
beam_scores.append(score)
if decode_hp.write_beam_scores:
decodes.append("\t".join([
"\t".join([d, "%.2f" % s])
for d, s in zip(beam_decodes, beam_scores)
]))
else:
decodes.append("\t".join(beam_decodes))
else:
_, decoded_outputs, _ = log_decode_results(
result["inputs"],
result["outputs"],
problem_name,
None,
inputs_vocab,
targets_vocab,
log_results=decode_hp.log_results)
decodes.append(decoded_outputs)
total_time_per_step += elapsed_time
total_cnt += result["outputs"].shape[-1]
tf.logging.info("Elapsed Time: %5.5f" % (time.time() - start_time))
tf.logging.info("Averaged Single Token Generation Time: %5.7f "
"(time %5.7f count %d)" %
(total_time_per_step / total_cnt,
total_time_per_step, total_cnt))
# If decode_to_file was provided use it as the output filename without change
# (except for adding shard_id if using more shards for decoding).
# Otherwise, use the input filename plus model, hp, problem, beam, alpha.
decode_filename = decode_to_file if decode_to_file else filename
if not decode_to_file:
decode_filename = _decode_filename(decode_filename, problem_name, decode_hp)
else:
decode_filename = _add_shard_to_filename(decode_filename, decode_hp)
tf.logging.info("Writing decodes into %s" % decode_filename)
outfile = tf.gfile.Open(decode_filename, "w")
for index in range(len(sorted_inputs)):
outfile.write("%s%s" % (decodes[sorted_keys[index]], decode_hp.delimiter))
outfile.flush()
outfile.close()
output_dir = os.path.join(estimator.model_dir, "decode")
tf.gfile.MakeDirs(output_dir)
run_postdecode_hooks(DecodeHookArgs(
estimator=estimator,
problem=hparams.problem,
output_dirs=[output_dir],
hparams=hparams,
decode_hparams=decode_hp,
predictions=list(result_iter)
), None)
def _add_shard_to_filename(filename, decode_hp):
if decode_hp.shards > 1:
shard_id = decode_hp.shard_id + decode_hp.shards_start_offset
filename = filename + ("%.3d" % shard_id)
return filename
def _decode_filename(base_filename, problem_name, decode_hp):
"""Generates decode filename.
Args:
base_filename: A string, base of the decode filename.
problem_name: A string, name of the problem.
decode_hp: HParams for decoding.
Returns:
A string, produced decode filename.
"""
if decode_hp.shards > 1:
base_filename = _add_shard_to_filename(base_filename, decode_hp)
if ("beam{beam}.alpha{alpha}.decodes".format(
beam=str(decode_hp.beam_size), alpha=str(decode_hp.alpha))
in base_filename):
return base_filename
else:
return (
"{base}.{model}.{hp}.{problem}.beam{beam}.alpha{alpha}.decodes".format(
base=base_filename,
model=FLAGS.model,
hp=FLAGS.hparams_set,
problem=problem_name,
beam=str(decode_hp.beam_size),
alpha=str(decode_hp.alpha)))
def make_input_fn_from_generator(gen):
"""Use py_func to yield elements from the given generator."""
first_ex = six.next(gen)
flattened = tf.contrib.framework.nest.flatten(first_ex)
types = [t.dtype for t in flattened]
shapes = [[None] * len(t.shape) for t in flattened]
first_ex_list = [first_ex]
def py_func():
if first_ex_list:
example = first_ex_list.pop()
else:
example = six.next(gen)
return tf.contrib.framework.nest.flatten(example)
def input_fn():
flat_example = tf.py_func(py_func, [], types)
_ = [t.set_shape(shape) for t, shape in zip(flat_example, shapes)]
example = tf.contrib.framework.nest.pack_sequence_as(first_ex, flat_example)
return example
return input_fn
def decode_interactively(estimator, hparams, decode_hp, checkpoint_path=None):
"""Interactive decoding."""
is_image = "image" in hparams.problem.name
is_text2class = isinstance(hparams.problem,
text_problems.Text2ClassProblem)
skip_eos_postprocess = (
is_image or is_text2class or decode_hp.skip_eos_postprocess)
def input_fn():
gen_fn = make_input_fn_from_generator(
_interactive_input_fn(hparams, decode_hp))
example = gen_fn()
example = _interactive_input_tensor_to_features_dict(example, hparams)
return example
result_iter = estimator.predict(input_fn, checkpoint_path=checkpoint_path)
for result in result_iter:
targets_vocab = hparams.problem_hparams.vocabulary["targets"]
if decode_hp.return_beams:
beams = np.split(result["outputs"], decode_hp.beam_size, axis=0)
scores = None
if "scores" in result:
if np.isscalar(result["scores"]):
result["scores"] = result["scores"].reshape(1)
scores = np.split(result["scores"], decode_hp.beam_size, axis=0)
for k, beam in enumerate(beams):
tf.logging.info("BEAM %d:" % k)
beam_string = targets_vocab.decode(_save_until_eos(
beam, skip_eos_postprocess))
if scores is not None:
tf.logging.info("\"%s\"\tScore:%f" % (beam_string, scores[k]))
else:
tf.logging.info("\"%s\"" % beam_string)
else:
if decode_hp.identity_output:
tf.logging.info(" ".join(map(str, result["outputs"].flatten())))
else:
tf.logging.info(
targets_vocab.decode(_save_until_eos(
result["outputs"], skip_eos_postprocess)))
def _decode_batch_input_fn(num_decode_batches, sorted_inputs, vocabulary,
batch_size, max_input_size, task_id=-1):
"""Generator to produce batches of inputs."""
tf.logging.info(" batch %d" % num_decode_batches)
for b in range(num_decode_batches):
tf.logging.info("Decoding batch %d" % b)
batch_length = 0
batch_inputs = []
for inputs in sorted_inputs[b * batch_size:(b + 1) * batch_size]:
input_ids = vocabulary.encode(inputs)
if max_input_size > 0:
# Subtract 1 for the EOS_ID.
input_ids = input_ids[:max_input_size - 1]
final_id = text_encoder.EOS_ID if task_id < 0 else task_id
input_ids.append(final_id)
batch_inputs.append(input_ids)
if len(input_ids) > batch_length:
batch_length = len(input_ids)
final_batch_inputs = []
for input_ids in batch_inputs:
assert len(input_ids) <= batch_length
x = input_ids + [0] * (batch_length - len(input_ids))
final_batch_inputs.append(x)
yield {
"inputs": np.array(final_batch_inputs).astype(np.int32),
}
def _interactive_input_fn(hparams, decode_hp):
"""Generator that reads from the terminal and yields "interactive inputs".
Due to temporary limitations in tf.learn, if we don't want to reload the
whole graph, then we are stuck encoding all of the input as one fixed-size
numpy array.
We yield int32 arrays with shape [const_array_size]. The format is:
[num_samples, decode_length, len(input ids), <input ids>, <padding>]
Args:
hparams: model hparams
decode_hp: decode hparams
Yields:
numpy arrays
Raises:
Exception: when `input_type` is invalid.
"""
num_samples = decode_hp.num_samples if decode_hp.num_samples > 0 else 1
decode_length = decode_hp.extra_length
input_type = "text"
p_hparams = hparams.problem_hparams
has_input = "inputs" in p_hparams.modality
vocabulary = p_hparams.vocabulary["inputs" if has_input else "targets"]
# This should be longer than the longest input.
const_array_size = 10000
# Import readline if available for command line editing and recall.
try:
import readline # pylint: disable=g-import-not-at-top,unused-variable
except ImportError:
pass
while True:
prompt = ("INTERACTIVE MODE num_samples=%d decode_length=%d \n"
" it=<input_type> ('text' or 'image' or 'label', default: "
"text)\n"
" ns=<num_samples> (changes number of samples, default: 1)\n"
" dl=<decode_length> (changes decode length, default: 100)\n"
" <%s> (decode)\n"
" q (quit)\n"
">" % (num_samples, decode_length,
"source_string" if has_input else "target_prefix"))
input_string = input(prompt)
if input_string == "q":
return
elif input_string[:3] == "ns=":
num_samples = int(input_string[3:])
elif input_string[:3] == "dl=":
decode_length = int(input_string[3:])
elif input_string[:3] == "it=":
input_type = input_string[3:]
else:
if input_type == "text":
input_ids = vocabulary.encode(input_string)
if has_input:
input_ids.append(text_encoder.EOS_ID)
x = [num_samples, decode_length, len(input_ids)] + input_ids
assert len(x) < const_array_size
x += [0] * (const_array_size - len(x))
features = {
"inputs": np.array(x).astype(np.int32),
}
elif input_type == "image":
input_path = input_string
img = vocabulary.encode(input_path)
features = {
"inputs": img.astype(np.int32),
}
elif input_type == "label":
input_ids = [int(input_string)]
x = [num_samples, decode_length, len(input_ids)] + input_ids
features = {
"inputs": np.array(x).astype(np.int32),
}
else:
raise Exception("Unsupported input type.")
for k, v in six.iteritems(
problem_lib.problem_hparams_to_features(p_hparams)):
features[k] = np.array(v).astype(np.int32)
yield features
def save_video(video, save_path_template):
"""Save frames of the videos into files."""
try:
from PIL import Image # pylint: disable=g-import-not-at-top
except ImportError as e:
tf.logging.warning(
"Showing and saving an image requires PIL library to be "
"installed: %s", e)
raise NotImplementedError("Image display and save not implemented.")
for i, frame in enumerate(video):
save_path = save_path_template.format(i)
with tf.gfile.Open(save_path, "wb") as sp:
Image.fromarray(np.uint8(frame)).save(sp)
def show_and_save_image(img, save_path):
"""Shows an image using matplotlib and saves it."""
try:
import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top
except ImportError as e:
tf.logging.warning(
"Showing and saving an image requires matplotlib to be "
"installed: %s", e)
raise NotImplementedError("Image display and save not implemented.")
plt.imshow(img)
with tf.gfile.Open(save_path, "wb") as sp:
plt.savefig(sp)
def _get_sorted_inputs(filename, delimiter="\n"):
"""Returning inputs sorted according to decreasing length.
This causes inputs of similar lengths to be processed in the same batch,
facilitating early stopping for short sequences.
Longer sequences are sorted first so that if you're going to get OOMs,
you'll see it in the first batch.
Args:
filename: path to file with inputs, 1 per line.
delimiter: str, delimits records in the file.
Returns:
a sorted list of inputs
"""
tf.logging.info("Getting sorted inputs")
with tf.gfile.Open(filename) as f:
text = f.read()
records = text.split(delimiter)
inputs = [record.strip() for record in records]
# Strip the last empty line.
if not inputs[-1]:
inputs.pop()
input_lens = [(i, -len(line.split())) for i, line in enumerate(inputs)]
sorted_input_lens = sorted(input_lens, key=operator.itemgetter(1))
# We'll need the keys to rearrange the inputs back into their original order
sorted_keys = {}
sorted_inputs = []
for i, (index, _) in enumerate(sorted_input_lens):
sorted_inputs.append(inputs[index])
sorted_keys[index] = i
return sorted_inputs, sorted_keys
def _save_until_eos(ids, skip=False):
"""Strips everything after the first <EOS> token, which is normally 1."""
ids = ids.flatten()
if skip:
return ids
try:
index = list(ids).index(text_encoder.EOS_ID)
return ids[0:index]
except ValueError:
# No EOS_ID: return the array as-is.
return ids
def _interactive_input_tensor_to_features_dict(feature_map, hparams):
"""Convert the interactive input format (see above) to a dictionary.
Args:
feature_map: dict with inputs.
hparams: model hyperparameters
Returns:
a features dictionary, as expected by the decoder.
"""
inputs = tf.convert_to_tensor(feature_map["inputs"])
input_is_image = False if len(inputs.get_shape()) < 3 else True
x = inputs
if input_is_image:
x = tf.image.resize_images(x, [299, 299])
x = tf.reshape(x, [1, 299, 299, -1])
x = tf.to_int32(x)
else:
# Remove the batch dimension.
num_samples = x[0]
length = x[2]
x = tf.slice(x, [3], tf.to_int32([length]))
x = tf.reshape(x, [1, -1, 1, 1])
# Transform into a batch of size num_samples to get that many random
# decodes.
x = tf.tile(x, tf.to_int32([num_samples, 1, 1, 1]))
p_hparams = hparams.problem_hparams
input_space_id = tf.constant(p_hparams.input_space_id)
target_space_id = tf.constant(p_hparams.target_space_id)
features = {}
features["input_space_id"] = input_space_id
features["target_space_id"] = target_space_id
features["decode_length"] = (
IMAGE_DECODE_LENGTH if input_is_image else inputs[1])
features["inputs"] = x
return features
def _decode_input_tensor_to_features_dict(feature_map, hparams):
"""Convert the interactive input format (see above) to a dictionary.
Args:
feature_map: dict with inputs.
hparams: model hyperparameters
Returns:
a features dictionary, as expected by the decoder.
"""
inputs = tf.convert_to_tensor(feature_map["inputs"])
input_is_image = False
x = inputs
p_hparams = hparams.problem_hparams
# Add a third empty dimension
x = tf.expand_dims(x, axis=[2])
x = tf.to_int32(x)
input_space_id = tf.constant(p_hparams.input_space_id)
target_space_id = tf.constant(p_hparams.target_space_id)
features = {}
features["input_space_id"] = input_space_id
features["target_space_id"] = target_space_id
features["decode_length"] = (
IMAGE_DECODE_LENGTH if input_is_image else tf.shape(x)[1] + 50)
features["inputs"] = x
return features
def get_step_from_ckpt_path(path):
return int(os.path.basename(path).split("-")[1])
def latest_checkpoint_step(ckpt_dir):
ckpt = tf.train.get_checkpoint_state(ckpt_dir)
if not ckpt:
return None
path = ckpt.model_checkpoint_path
return get_step_from_ckpt_path(path)
class DecodeHookArgs(collections.namedtuple(
"DecodeHookArgs",
["estimator", "problem", "output_dirs", "hparams",
"decode_hparams", "predictions"])):
pass
def run_postdecode_hooks(decode_hook_args, dataset_split):
"""Run hooks after decodes have run."""
hooks = decode_hook_args.problem.decode_hooks
if not hooks:
return
global_step = latest_checkpoint_step(decode_hook_args.estimator.model_dir)
if global_step is None:
tf.logging.info(
"Skipping decode hooks because no checkpoint yet available.")
return
tf.logging.info("Running decode hooks.")
parent_dir = os.path.join(decode_hook_args.output_dirs[0], os.pardir)
child_dir = decode_hook_args.decode_hparams.summaries_log_dir
if dataset_split is not None:
child_dir += "_{}".format(dataset_split)
final_dir = os.path.join(parent_dir, child_dir)
summary_writer = tf.summary.FileWriter(final_dir)
for hook in hooks:
# Isolate each hook in case it creates TF ops
with tf.Graph().as_default():
summaries = hook(decode_hook_args)
if summaries:
summary = tf.Summary(value=list(summaries))
summary_writer.add_summary(summary, global_step)
summary_writer.close()
tf.logging.info("Decode hooks done.")
| 34.877619 | 80 | 0.669786 |
51e31291379372af5e260295e3d4c305fad978a1 | 1,179 | py | Python | tests/test_galaxy_importer.py | alikins/galaxy-importer-1 | f9c7aebd826250dd47b022388d45ff5d33b91068 | [
"Apache-2.0"
] | null | null | null | tests/test_galaxy_importer.py | alikins/galaxy-importer-1 | f9c7aebd826250dd47b022388d45ff5d33b91068 | [
"Apache-2.0"
] | null | null | null | tests/test_galaxy_importer.py | alikins/galaxy-importer-1 | f9c7aebd826250dd47b022388d45ff5d33b91068 | [
"Apache-2.0"
] | null | null | null | # (c) 2012-2019, Ansible by Red Hat
#
# This file is part of Ansible Galaxy
#
# Ansible Galaxy is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by
# the Apache Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Ansible Galaxy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License
# along with Galaxy. If not, see <http://www.apache.org/licenses/>.
import pytest
from galaxy_importer import main
def test_parser():
parser = main.parse_args(['path/to/my_file.tar.gz'])
assert parser.file == 'path/to/my_file.tar.gz'
assert not parser.print_result
parser = main.parse_args(['my_file.tar.gz', '--print-result'])
assert parser.file == 'my_file.tar.gz'
assert parser.print_result
# SystemExit with missing required positional file argument
with pytest.raises(SystemExit):
main.parse_args(['--print-result'])
| 33.685714 | 72 | 0.732824 |
ab526aea26860fa082d9cd66287c90e3508a39bf | 19,527 | py | Python | test/world_test.py | Beliaar/bGrease | 24a0e3f2ed9ba57c8e3e83e16d16409ca2551c44 | [
"MIT"
] | 1 | 2018-07-17T03:53:43.000Z | 2018-07-17T03:53:43.000Z | test/world_test.py | Beliaar/bGrease | 24a0e3f2ed9ba57c8e3e83e16d16409ca2551c44 | [
"MIT"
] | null | null | null | test/world_test.py | Beliaar/bGrease | 24a0e3f2ed9ba57c8e3e83e16d16409ca2551c44 | [
"MIT"
] | null | null | null | import unittest
class TestComponent(dict):
world = None
runtime = 0
def __init__(self):
self.entities = set()
def set_world(self, world):
self.world = world
def add(self, entity, data=None):
self[entity] = data
self.entities.add(entity)
def step(self, dt):
self.runtime += dt
class TestSystem(object):
runtime = 0
world = None
order = 0
def set_world(self, world):
self.world = world
def step(self, dt):
self.runtime += dt
TestSystem.order += 1
self.order = TestSystem.order
class TestSystemInjector(TestSystem):
def __init__(self, name, system):
self.injected = False
self.name = name
self.system = system
def step(self, dt):
TestSystem.step(self, dt)
if not self.injected:
setattr(self.world.systems, self.name, self.system)
self.injected = True
class TestRenderer(object):
world = None
drawn = False
order = 0
def draw(self):
self.drawn = True
TestRenderer.order += 1
self.order = TestRenderer.order
def set_world(self, world):
self.world = world
class TestGL(object):
matrix_reset = False
cleared = False
GL_DEPTH_BUFFER_BIT = 1
GL_COLOR_BUFFER_BIT = 2
def glClear(self, bits):
self.cleared = bits
def glLoadIdentity(self):
self.matrix_reset = True
class TestClock(object):
def __init__(self, time_function=None):
self.scheduled = []
self.time_func = time_function
self.ticks = 0
def schedule_interval(self, what, interval):
self.scheduled.append((what, interval))
def schedule(self, what):
self.scheduled.append((what, None))
def unschedule(self, what):
for i in range(len(self.scheduled)):
if self.scheduled[i][0] == what:
del self.scheduled[i]
return
def tick(self, poll=True):
self.ticks += 1
class TestModeManager(object):
def __init__(self):
self.handlers = []
self.event_dispatcher = self
def push_handlers(self, handler):
self.handlers.append(handler)
def remove_handlers(self, handler):
self.handlers.remove(handler)
class WorldTestCase(unittest.TestCase):
def test_defaults(self):
from bGrease import World
world = World(clock_factory=TestClock)
self.assertEqual(world.step_rate, 60)
self.assertFalse(world.active)
self.assertTrue(world.running)
self.assertEqual(world.time, 0)
self.assertTrue((world.step, 1.0/60) in world.clock.scheduled)
def test_overrides(self):
from bGrease import World
world = World(step_rate=30, clock_factory=TestClock)
self.assertEqual(world.step_rate, 30)
self.assertTrue((world.step, 1.0/30) in world.clock.scheduled)
def test_create_entities_in_world(self):
from bGrease import World, Entity
world = World()
self.assertFalse(world.entities)
e1 = Entity(world)
e2 = Entity(world)
self.assertEqual(len(world.entities), 2)
self.assertTrue(e1 in world.entities)
self.assertTrue(e1.world is world)
self.assertTrue(e2 in world.entities)
self.assertTrue(e2.world is world)
self.assertNotEqual(e1, e2)
def test_worlds_disjoint(self):
from bGrease import World, Entity
world1 = World()
world2 = World()
self.assertTrue(world1 is not world2)
e1 = Entity(world1)
e2 = Entity(world2)
self.assertEqual(len(world1.entities), 1)
self.assertEqual(len(world2.entities), 1)
self.assertTrue(e1.world is world1)
self.assertTrue(e2.world is world2)
self.assertTrue(e1 in world1.entities)
self.assertFalse(e2 in world1.entities)
self.assertFalse(e1 in world2.entities)
self.assertTrue(e2 in world2.entities)
self.assertNotEqual(e1, e2)
def test_remove_entity(self):
from bGrease import World, Entity
world = World()
comp1 = world.components.one = TestComponent()
comp2 = world.components.two = TestComponent()
comp3 = world.components.three = TestComponent()
entity = Entity(world)
comp1.add(entity)
comp2.add(entity)
self.assertTrue(entity in world.entities)
self.assertTrue(entity in comp1)
self.assertTrue(entity in comp2)
self.assertFalse(entity in comp3)
world.entities.remove(entity)
self.assertFalse(entity in world.entities)
self.assertFalse(entity in comp1)
self.assertFalse(entity in comp2)
self.assertFalse(entity in comp3)
self.assertRaises(KeyError, world.entities.remove, entity)
def test_discard_entity(self):
from bGrease import World, Entity
world = World()
comp1 = world.components.one = TestComponent()
comp2 = world.components.two = TestComponent()
comp3 = world.components.three = TestComponent()
entity = Entity(world)
comp1.add(entity)
comp2.add(entity)
self.assertTrue(entity in world.entities)
self.assertTrue(entity in comp1)
self.assertTrue(entity in comp2)
self.assertFalse(entity in comp3)
world.entities.discard(entity)
self.assertFalse(entity in world.entities)
self.assertFalse(entity in comp1)
self.assertFalse(entity in comp2)
self.assertFalse(entity in comp3)
world.entities.discard(entity)
self.assertFalse(entity in world.entities)
self.assertFalse(entity in comp1)
self.assertFalse(entity in comp2)
self.assertFalse(entity in comp3)
def test_entity_extent_component_access(self):
from bGrease import World, Entity
from bGrease.entity import ComponentEntitySet
world = World()
comp = world.components.test = TestComponent()
e1 = Entity(world)
e2 = Entity(world)
comp.add(e1)
comp.add(e2)
extent = world[Entity]
comp_set = extent.test
self.assertEqual(comp_set, set([e1, e2]))
self.assertRaises(AttributeError, getattr, extent, "hummina")
def test_entity_extent_membership_simple(self):
from bGrease import World, Entity
class MyEntity(Entity):
pass
class Another(Entity):
pass
world = World()
self.assertFalse(world.entities)
extent = world[MyEntity]
self.assertFalse(extent.entities)
entity1 = MyEntity(world)
self.assertTrue(entity1 in extent.entities)
entity2 = MyEntity(world)
self.assertTrue(entity2 in extent.entities)
world.entities.remove(entity2)
self.assertTrue(entity1 in extent.entities)
self.assertFalse(entity2 in extent.entities)
entity3 = Another(world)
self.assertFalse(entity3 in extent.entities)
self.assertTrue(entity3 in world[Another].entities)
def test_entity_superclass_extents(self):
from bGrease import World, Entity
class Superentity(Entity):
pass
class Subentity(Superentity):
pass
class SubSubentity(Subentity):
pass
class Another(Entity):
pass
world = World()
super_extent = world[Superentity]
super = Superentity(world)
sub = Subentity(world)
subsub = SubSubentity(world)
another = Another(world)
self.assertTrue(super in super_extent.entities)
self.assertTrue(sub in super_extent.entities)
self.assertTrue(subsub in super_extent.entities)
self.assertFalse(another in super_extent.entities)
sub_extent = world[Subentity]
self.assertFalse(super in sub_extent.entities)
self.assertTrue(sub in sub_extent.entities)
self.assertTrue(subsub in sub_extent.entities)
self.assertFalse(another in sub_extent.entities)
subsub_extent = world[SubSubentity]
self.assertFalse(super in subsub_extent.entities)
self.assertFalse(sub in subsub_extent.entities)
self.assertTrue(subsub in subsub_extent.entities)
self.assertFalse(another in subsub_extent.entities)
another_extent = world[Another]
self.assertFalse(super in another_extent.entities)
self.assertFalse(sub in another_extent.entities)
self.assertFalse(subsub in another_extent.entities)
self.assertTrue(another in another_extent.entities)
world.entities.remove(subsub)
self.assertFalse(subsub in super_extent.entities)
self.assertFalse(subsub in sub_extent.entities)
self.assertFalse(subsub in subsub_extent.entities)
self.assertFalse(subsub in another_extent.entities)
def test_union_extent(self):
from bGrease import World, Entity
class Entity1(Entity):
pass
class Entity2(Entity1):
pass
class Entity3(Entity):
pass
world = World()
entities = [Entity1(world), Entity2(world), Entity2(world), Entity3(world)]
union_extent_1_2 = world[Entity1, Entity2]
self.assertEqual(union_extent_1_2.entities, set(entities[:-1]))
union_extent_2_3 = world[Entity2, Entity3]
self.assertEqual(union_extent_2_3.entities, set(entities[1:]))
union_extent_1_3 = world[Entity1, Entity3]
self.assertEqual(union_extent_1_3.entities, set(entities))
def test_full_extent(self):
from bGrease import World, Entity
class Entity1(Entity):
pass
class Entity2(Entity1):
pass
class Entity3(Entity):
pass
world = World()
full_extent = world[...]
self.assertEqual(world.entities, full_extent.entities)
entities = set([Entity1(world), Entity2(world), Entity3(world), Entity1(world)])
self.assertEqual(world.entities, entities)
self.assertEqual(full_extent.entities, entities)
self.assertEqual(world[...].entities, entities)
def test_configure_components(self):
from bGrease import World
comp1 = TestComponent()
comp2 = TestComponent()
comp3 = TestComponent()
world = World()
self.assertEqual(len(world.components), 0)
world.components.one = comp1
world.components.two = comp2
world.components.three = comp3
self.assertEqual(list(world.components), [comp1, comp2, comp3])
self.assertTrue(comp1.world is world)
self.assertTrue(comp2.world is world)
self.assertTrue(comp3.world is world)
def test_set_components(self):
from bGrease import World
comp1 = TestComponent()
comp2 = TestComponent()
comp3 = TestComponent()
world = World()
self.assertFalse(world.systems)
self.assertFalse(world.components)
self.assertRaises(AttributeError, getattr, world, 'foobar')
world.components.foobar = comp1
self.assertTrue(world.components.foobar is comp1)
self.assertTrue(comp1.world is world)
self.assertEqual(len(world.components), 1)
self.assertRaises(AttributeError, getattr, world, 'spam')
world.components.spam = comp2
self.assertTrue(world.components.spam is comp2)
self.assertTrue(comp2.world is world)
self.assertEqual(len(world.components), 2)
self.assertRaises(AttributeError, getattr, world, 'foobar')
world.components.foobar = comp3
self.assertTrue(world.components.foobar is comp3)
self.assertTrue(comp3.world is world)
self.assertEqual(len(world.components), 2)
self.assertEqual(list(world.components), [comp3, comp2])
def test_del_component(self):
from bGrease import World
world = World()
comp1 = world.components.one = TestComponent()
comp2 = world.components.two = TestComponent()
comp3 = world.components.three = TestComponent()
self.assertEqual(list(world.components), [comp1, comp2, comp3])
del world.components.two
self.assertEqual(list(world.components), [comp1, comp3])
del world.components.one
self.assertEqual(list(world.components), [comp3])
self.assertRaises(AttributeError, delattr, world, 'one')
def test_step_components(self):
from bGrease import World, Entity
world = World()
comp1 = world.components.one = TestComponent()
comp2 = world.components.two = TestComponent()
entity = Entity(world)
self.assertTrue(comp1.runtime == comp2.runtime == 0, comp1.runtime)
world.step(0.05)
self.assertEqual(comp1.runtime, 0.05)
self.assertEqual(comp2.runtime, 0.05)
world.step(0.06)
self.assertEqual(comp1.runtime, 0.11)
self.assertEqual(comp2.runtime, 0.11)
def test_join_components(self):
from bGrease import World, Entity
world = World()
comp1 = world.components.foo = TestComponent()
comp2 = world.components.bar = TestComponent()
comp3 = world.components.baz = TestComponent()
entity = Entity(world)
for i in range(20):
entity = object()
comp1.add(entity, i)
if i < 5:
comp2.add(entity, i * 10)
if i < 3:
comp3.add(entity, i * 100)
self.assertEqual(sorted(world.components.join('baz', 'bar', 'foo')), [
(0, 0, 0), (100, 10, 1), (200, 20, 2)])
self.assertEqual(sorted(world.components.join('foo', 'bar')), [
(0, 0), (1, 10), (2, 20), (3, 30), (4, 40)])
self.assertEqual(sorted(world.components.join('baz')), [
(0,), (100,), (200,)])
def test_illegal_part_name(self):
from bGrease import World
from bGrease.component import ComponentError
world = World()
self.assertRaises(ComponentError,
setattr, world.components, 'entities', TestComponent())
self.assertRaises(ComponentError,
setattr, world.systems, 'entities', TestSystem())
self.assertRaises(ComponentError,
setattr, world.renderers, 'entities', TestRenderer())
self.assertRaises(ComponentError,
setattr, world.components, '_reserved', TestComponent())
self.assertRaises(ComponentError,
setattr, world.systems, '_reserved', TestSystem())
self.assertRaises(ComponentError,
setattr, world.renderers, '_reserved', TestRenderer())
self.assertRaises(AttributeError,
setattr, world.components, 'insert', TestComponent())
self.assertRaises(AttributeError,
setattr, world.systems, 'insert', TestSystem())
self.assertRaises(AttributeError,
setattr, world.renderers, 'insert', TestRenderer())
def test_add_systems(self):
from bGrease import World
world = World()
self.assertFalse(world.systems)
sys1 = world.systems.one = TestSystem()
sys2 = world.systems.two = TestSystem()
sys3 = world.systems.three = TestSystem()
self.assertEqual(list(world.systems), [sys1, sys2, sys3])
self.assertTrue(world.systems.one is sys1)
self.assertTrue(world.systems.two is sys2)
self.assertTrue(world.systems.three is sys3)
self.assertTrue(sys1.world is world)
self.assertTrue(sys2.world is world)
self.assertTrue(sys3.world is world)
def test_del_systems(self):
from bGrease import World
world = World()
sys1 = world.systems.one = TestSystem()
sys2 = world.systems.two = TestSystem()
sys3 = world.systems.three = TestSystem()
self.assertEqual(list(world.systems), [sys1, sys2, sys3])
del world.systems.two
self.assertEqual(list(world.systems), [sys1, sys3])
del world.systems.one
self.assertEqual(list(world.systems), [sys3])
self.assertRaises(AttributeError, delattr, world, 'one')
def test_insert_system(self):
from bGrease import World
world = World()
sys1 = world.systems.sys1 = TestSystem()
sys2 = world.systems.sys2 = TestSystem()
sys3 = world.systems.sys3 = TestSystem()
inserted = TestSystem()
world.systems.insert('inserted', inserted, before='sys2')
self.assertEqual(list(world.systems), [sys1, inserted, sys2, sys3])
self.assertTrue(world.systems.inserted is inserted)
another = TestSystem()
world.systems.insert('another', another, before=world.systems.sys3)
self.assertTrue(world.systems.another is another)
self.assertEqual(list(world.systems), [sys1, inserted, sys2, another, sys3])
onemore = TestSystem()
world.systems.insert('onemore', onemore, index=1)
self.assertEqual(list(world.systems), [sys1, onemore, inserted, sys2, another, sys3])
self.assertTrue(world.systems.onemore is onemore)
def test_system_step_order(self):
from bGrease import World
world = World()
sys1 = world.systems.one = TestSystem()
sys3 = world.systems.three = TestSystem()
sys2 = TestSystem()
world.systems.insert('two', sys2, index=1)
self.assertEqual(len(world.systems), 3)
self.assertTrue(sys1.runtime == sys2.runtime == sys3.runtime == 0)
self.assertTrue(sys1.order == sys2.order == sys3.order == TestSystem.order)
world.step(0.13)
self.assertTrue(sys1.runtime == sys2.runtime == sys3.runtime == 0.13)
start = sys1.order
self.assertEqual(sys2.order, start + 1)
self.assertEqual(sys3.order, start + 2)
def test_add_system_during_run(self):
from bGrease import World
world = World()
sys1 = world.systems.sys1 = TestSystem()
to_inject = TestSystem()
injector = world.systems.injector = TestSystemInjector('injected', to_inject)
self.assertEqual(len(world.systems), 2)
self.assertTrue(sys1.runtime == to_inject.runtime == injector.runtime == 0)
self.assertFalse(injector.injected)
world.step(0.1)
self.assertEqual(len(world.systems), 3)
self.assertEqual(sys1.runtime, 0.1)
self.assertEqual(injector.runtime, 0.1)
self.assertEqual(to_inject.runtime, 0)
self.assertTrue(injector.injected)
world.step(0.1)
self.assertEqual(len(world.systems), 3)
self.assertEqual(sys1.runtime, 0.2)
self.assertEqual(injector.runtime, 0.2)
self.assertEqual(to_inject.runtime, 0.1)
def test_activate(self):
from bGrease import World
world = World(master_clock=TestClock())
sys1 = world.systems.one = TestSystem()
sys2 = world.systems.two = TestSystem()
manager = TestModeManager()
self.assertFalse(world.active)
world.activate(manager)
self.assertTrue(world.manager is manager, world.manager)
self.assertTrue(world.active)
self.assertTrue((world.tick, None) in world.master_clock.scheduled)
self.assertTrue(sys1 in manager.handlers)
self.assertTrue(sys2 in manager.handlers)
return world, manager
def test_deactivate(self):
world, manager = self.test_activate()
sys1, sys2 = world.systems
world.deactivate(manager)
self.assertFalse(world.active)
self.assertFalse((world.tick, None) in world.master_clock.scheduled)
self.assertFalse(sys1 in manager.handlers)
self.assertFalse(sys2 in manager.handlers)
def test_tick_increments_world_time(self):
from bGrease import World
world = World(clock_factory=TestClock)
self.assertEqual(world.time, 0)
self.assertEqual(world.clock.ticks, 0)
self.assertEqual(world.clock.time_func(), world.time)
dt = 1.0/30.0
world.tick(dt)
self.assertAlmostEqual(world.time, dt)
self.assertEqual(world.clock.time_func(), world.time)
self.assertEqual(world.clock.ticks, 1)
world.tick(dt)
self.assertAlmostEqual(world.time, dt*2)
self.assertEqual(world.clock.time_func(), world.time)
self.assertEqual(world.clock.ticks, 2)
def test_running(self):
from bGrease import World
world = World()
self.assertTrue(world.running)
self.assertEqual(world.time, 0)
dt = 1.0/30.0
world.tick(dt)
self.assertAlmostEqual(world.time, dt)
world.running = False
world.tick(dt)
world.tick(dt)
self.assertAlmostEqual(world.time, dt)
world.running = True
world.tick(dt)
self.assertAlmostEqual(world.time, dt*2)
def test_step_max_dt(self):
from bGrease import World
world = World()
sys1 = world.systems.sys = TestSystem()
comp1 = world.components.foo = TestComponent()
world.step(10000)
self.assertEqual(comp1.runtime, 10.0 / world.step_rate)
self.assertEqual(sys1.runtime, 10.0 / world.step_rate)
def test_set_renderers(self):
from bGrease import World
world = World()
self.assertEqual(tuple(world.renderers), ())
renderer1 = world.renderers.one = TestRenderer()
renderer2 = world.renderers.two = TestRenderer()
renderer3 = world.renderers.three = object() # arbitrary objects can be renderers
self.assertEqual(tuple(world.renderers), (renderer1, renderer2, renderer3))
# objects with a set_world() method should have it called when set
self.assertTrue(renderer1.world is world)
self.assertTrue(renderer2.world is world)
def test_on_draw(self):
from bGrease import World
world = World()
renderer1 = world.renderers.one = TestRenderer()
renderer2 = world.renderers.two = TestRenderer()
gl = TestGL()
self.assertFalse(gl.cleared)
self.assertFalse(gl.matrix_reset)
self.assertFalse(renderer1.drawn)
self.assertFalse(renderer2.drawn)
world.on_draw(gl=gl)
self.assertTrue(gl.cleared)
self.assertTrue(gl.matrix_reset)
self.assertTrue(renderer1.drawn)
self.assertTrue(renderer2.drawn)
start = renderer1.order
self.assertEqual(renderer2.order, start + 1)
if __name__ == '__main__':
unittest.main()
| 32.383085 | 87 | 0.741537 |
6cc3ba613294da0f2a44a84dddcf1f39faa367ca | 28,316 | py | Python | conans/client/cmd/uploader.py | rweickelt/conan | 2e5b21e82618d471c16e8ad6b8872d80c03fe609 | [
"MIT"
] | null | null | null | conans/client/cmd/uploader.py | rweickelt/conan | 2e5b21e82618d471c16e8ad6b8872d80c03fe609 | [
"MIT"
] | null | null | null | conans/client/cmd/uploader.py | rweickelt/conan | 2e5b21e82618d471c16e8ad6b8872d80c03fe609 | [
"MIT"
] | null | null | null | import os
import stat
import tarfile
import time
from collections import defaultdict
from tqdm import tqdm
from conans.client.remote_manager import is_package_snapshot_complete
from conans.client.source import complete_recipe_sources
from conans.errors import ConanException, NotFoundException
from conans.model.manifest import gather_files, FileTreeManifest
from conans.model.ref import ConanFileReference, PackageReference, check_valid_ref
from conans.paths import (CONAN_MANIFEST, CONANFILE, EXPORT_SOURCES_TGZ_NAME,
EXPORT_TGZ_NAME, PACKAGE_TGZ_NAME, CONANINFO)
from conans.search.search import search_packages, search_recipes
from conans.util.files import (load, clean_dirty, is_dirty,
gzopen_without_timestamps, set_dirty)
from conans.util.log import logger
from conans.util.tracer import (log_recipe_upload, log_compressed_files,
log_package_upload)
UPLOAD_POLICY_FORCE = "force-upload"
UPLOAD_POLICY_NO_OVERWRITE = "no-overwrite"
UPLOAD_POLICY_NO_OVERWRITE_RECIPE = "no-overwrite-recipe"
UPLOAD_POLICY_SKIP = "skip-upload"
class CmdUpload(object):
""" This class is responsible for uploading packages to remotes. The flow is:
- Collect all the data from the local cache:
- Collect the refs that matches the given pattern _collect_refs_to_upload
- Collect for every ref all the binaries IDs that has to be uploaded
"_collect_packages_to_upload". This may discard binaries that do not
belong to the current RREV
The collection of this does the interactivity (ask user if yes/no),
the errors (don't upload packages with policy=build_always, and computing
the full REVISIONS for every that has to be uploaded.
No remote API calls are done in this step, everything is local
- Execute the upload. For every ref:
- Upload the recipe of the ref: "_upload_recipe"
- If not FORCE, check the date "_check_recipe_date", i.e. if there are
changes, do not allow uploading if the remote date is newer than the
local cache one
- Retrieve the sources (exports_sources), if they are not cached, and
uploading to a different remote. "complete_recipe_sources"
- Gather files and create 2 .tgz (exports, exports_sources) with
"_compress_recipe_files"
- Decide which files have to be uploaded and deleted from the server
based on the different with the remote snapshot "_recipe_files_to_upload"
This can raise if upload policy is not overwrite
- Execute the real transfer "remote_manager.upload_recipe()"
- For every package_id of every ref: "_upload_package"
- Gather files and create package.tgz. "_compress_package_files"
- (Optional) Do the integrity check of the package
- Decide which files to upload and delete from server:
"_package_files_to_upload". Can raise if policy is NOT overwrite
- Do the actual upload
All the REVISIONS are local defined, not retrieved from servers
This requires calling to the remote API methods:
- get_recipe_sources() to get the export_sources if they are missing
- get_recipe_snapshot() to do the diff and know what files to upload
- get_package_snapshot() to do the diff and know what files to upload
- get_recipe_manifest() to check the date and raise if policy requires
- get_package_manifest() to raise if policy!=force and manifests change
"""
def __init__(self, cache, user_io, remote_manager, loader, hook_manager):
self._cache = cache
self._user_io = user_io
self._remote_manager = remote_manager
self._loader = loader
self._hook_manager = hook_manager
def upload(self, reference_or_pattern, remotes, upload_recorder, package_id=None,
all_packages=None, confirm=False, retry=None, retry_wait=None, integrity_check=False,
policy=None, query=None):
t1 = time.time()
refs, confirm = self._collects_refs_to_upload(package_id, reference_or_pattern, confirm)
refs_by_remote = self._collect_packages_to_upload(refs, confirm, remotes, all_packages,
query, package_id)
# Do the job
for remote, refs in refs_by_remote.items():
self._user_io.out.info("Uploading to remote '{}':".format(remote.name))
for (ref, conanfile, prefs) in refs:
self._upload_ref(conanfile, ref, prefs, retry, retry_wait,
integrity_check, policy, remote, upload_recorder, remotes)
logger.debug("UPLOAD: Time manager upload: %f" % (time.time() - t1))
def _collects_refs_to_upload(self, package_id, reference_or_pattern, confirm):
""" validate inputs and compute the refs (without revisions) to be uploaded
"""
if package_id and not check_valid_ref(reference_or_pattern, allow_pattern=False):
raise ConanException("-p parameter only allowed with a valid recipe reference, "
"not with a pattern")
if package_id or check_valid_ref(reference_or_pattern, allow_pattern=False):
# Upload package
ref = ConanFileReference.loads(reference_or_pattern)
refs = [ref, ]
confirm = True
else:
refs = search_recipes(self._cache, reference_or_pattern)
if not refs:
raise NotFoundException(("No packages found matching pattern '%s'" %
reference_or_pattern))
return refs, confirm
def _collect_packages_to_upload(self, refs, confirm, remotes, all_packages, query, package_id):
""" compute the references with revisions and the package_ids to be uploaded
"""
# Group recipes by remote
refs_by_remote = defaultdict(list)
for ref in refs:
metadata = self._cache.package_layout(ref).load_metadata()
ref = ref.copy_with_rev(metadata.recipe.revision)
remote = remotes.selected
if remote:
ref_remote = remote
else:
ref_remote = metadata.recipe.remote
ref_remote = remotes.get_remote(ref_remote)
upload = True
if not confirm:
msg = "Are you sure you want to upload '%s' to '%s'?" % (str(ref), ref_remote.name)
upload = self._user_io.request_boolean(msg)
if upload:
try:
conanfile_path = self._cache.package_layout(ref).conanfile()
conanfile = self._loader.load_class(conanfile_path)
except NotFoundException:
raise NotFoundException(("There is no local conanfile exported as %s" %
str(ref)))
# TODO: This search of binary packages has to be improved, more robust
# So only real packages are retrieved
if all_packages or query:
if all_packages:
query = None
# better to do a search, that will retrieve real packages with ConanInfo
# Not only "package_id" folders that could be empty
package_layout = self._cache.package_layout(ref.copy_clear_rev())
packages = search_packages(package_layout, query)
packages_ids = list(packages.keys())
elif package_id:
packages_ids = [package_id, ]
else:
packages_ids = []
if packages_ids:
if conanfile.build_policy == "always":
raise ConanException("Conanfile '%s' has build_policy='always', "
"no packages can be uploaded" % str(ref))
prefs = []
# Gather all the complete PREFS with PREV
for package_id in packages_ids:
if package_id not in metadata.packages:
raise ConanException("Binary package %s:%s not found"
% (str(ref), package_id))
# Filter packages that don't match the recipe revision
if self._cache.config.revisions_enabled and ref.revision:
rec_rev = metadata.packages[package_id].recipe_revision
if ref.revision != rec_rev:
self._user_io.out.warn("Skipping package '%s', it doesn't belong to the"
" current recipe revision" % package_id)
continue
package_revision = metadata.packages[package_id].revision
assert package_revision is not None, "PREV cannot be None to upload"
prefs.append(PackageReference(ref, package_id, package_revision))
refs_by_remote[ref_remote].append((ref, conanfile, prefs))
return refs_by_remote
def _upload_ref(self, conanfile, ref, prefs, retry, retry_wait, integrity_check, policy,
recipe_remote, upload_recorder, remotes):
""" Uploads the recipes and binaries identified by ref
"""
assert (ref.revision is not None), "Cannot upload a recipe without RREV"
conanfile_path = self._cache.package_layout(ref).conanfile()
# FIXME: I think it makes no sense to specify a remote to "pre_upload"
# FIXME: because the recipe can have one and the package a different one
self._hook_manager.execute("pre_upload", conanfile_path=conanfile_path,
reference=ref, remote=recipe_remote)
self._user_io.out.info("Uploading %s to remote '%s'" % (str(ref), recipe_remote.name))
self._upload_recipe(ref, conanfile, retry, retry_wait, policy, recipe_remote, remotes)
upload_recorder.add_recipe(ref, recipe_remote.name, recipe_remote.url)
# Now the binaries
if prefs:
total = len(prefs)
for index, pref in enumerate(prefs):
p_remote = recipe_remote
msg = ("Uploading package %d/%d: %s to '%s'" % (index+1, total, str(pref.id),
p_remote.name))
self._user_io.out.info(msg)
self._upload_package(pref, retry, retry_wait,
integrity_check, policy, p_remote)
upload_recorder.add_package(pref, p_remote.name, p_remote.url)
# FIXME: I think it makes no sense to specify a remote to "post_upload"
# FIXME: because the recipe can have one and the package a different one
self._hook_manager.execute("post_upload", conanfile_path=conanfile_path, reference=ref,
remote=recipe_remote)
def _upload_recipe(self, ref, conanfile, retry, retry_wait, policy, remote, remotes):
current_remote_name = self._cache.package_layout(ref).load_metadata().recipe.remote
if remote.name != current_remote_name:
complete_recipe_sources(self._remote_manager, self._cache, conanfile, ref, remotes)
conanfile_path = self._cache.package_layout(ref).conanfile()
self._hook_manager.execute("pre_upload_recipe", conanfile_path=conanfile_path,
reference=ref, remote=remote)
t1 = time.time()
the_files = self._compress_recipe_files(ref)
local_manifest = FileTreeManifest.loads(load(the_files["conanmanifest.txt"]))
remote_manifest = None
if policy != UPLOAD_POLICY_FORCE:
remote_manifest = self._check_recipe_date(ref, remote, local_manifest)
if policy == UPLOAD_POLICY_SKIP:
return ref
files_to_upload, deleted = self._recipe_files_to_upload(ref, policy, the_files,
remote, remote_manifest,
local_manifest)
if files_to_upload or deleted:
self._remote_manager.upload_recipe(ref, files_to_upload, deleted,
remote, retry, retry_wait)
self._upload_recipe_end_msg(ref, remote)
else:
self._user_io.out.info("Recipe is up to date, upload skipped")
duration = time.time() - t1
log_recipe_upload(ref, duration, the_files, remote.name)
self._hook_manager.execute("post_upload_recipe", conanfile_path=conanfile_path,
reference=ref, remote=remote)
# The recipe wasn't in the registry or it has changed the revision field only
if not current_remote_name:
with self._cache.package_layout(ref).update_metadata() as metadata:
metadata.recipe.remote = remote.name
return ref
def _upload_package(self, pref, retry=None, retry_wait=None, integrity_check=False,
policy=None, p_remote=None):
assert (pref.revision is not None), "Cannot upload a package without PREV"
assert (pref.ref.revision is not None), "Cannot upload a package without RREV"
conanfile_path = self._cache.package_layout(pref.ref).conanfile()
self._hook_manager.execute("pre_upload_package", conanfile_path=conanfile_path,
reference=pref.ref,
package_id=pref.id,
remote=p_remote)
t1 = time.time()
the_files = self._compress_package_files(pref, integrity_check)
if policy == UPLOAD_POLICY_SKIP:
return None
files_to_upload, deleted = self._package_files_to_upload(pref, policy, the_files, p_remote)
if files_to_upload or deleted:
self._remote_manager.upload_package(pref, files_to_upload, deleted, p_remote, retry,
retry_wait)
logger.debug("UPLOAD: Time upload package: %f" % (time.time() - t1))
else:
self._user_io.out.info("Package is up to date, upload skipped")
duration = time.time() - t1
log_package_upload(pref, duration, the_files, p_remote)
self._hook_manager.execute("post_upload_package", conanfile_path=conanfile_path,
reference=pref.ref, package_id=pref.id, remote=p_remote)
logger.debug("UPLOAD: Time uploader upload_package: %f" % (time.time() - t1))
metadata = self._cache.package_layout(pref.ref).load_metadata()
cur_package_remote = metadata.packages[pref.id].remote
if not cur_package_remote and policy != UPLOAD_POLICY_SKIP:
with self._cache.package_layout(pref.ref).update_metadata() as metadata:
metadata.packages[pref.id].remote = p_remote.name
return pref
def _compress_recipe_files(self, ref):
export_folder = self._cache.package_layout(ref).export()
for f in (EXPORT_TGZ_NAME, EXPORT_SOURCES_TGZ_NAME):
tgz_path = os.path.join(export_folder, f)
if is_dirty(tgz_path):
self._user_io.out.warn("%s: Removing %s, marked as dirty" % (str(ref), f))
os.remove(tgz_path)
clean_dirty(tgz_path)
files, symlinks = gather_files(export_folder)
if CONANFILE not in files or CONAN_MANIFEST not in files:
raise ConanException("Cannot upload corrupted recipe '%s'" % str(ref))
export_src_folder = self._cache.package_layout(ref).export_sources()
src_files, src_symlinks = gather_files(export_src_folder)
the_files = _compress_recipe_files(files, symlinks, src_files, src_symlinks, export_folder,
self._user_io.out)
return the_files
def _compress_package_files(self, pref, integrity_check):
t1 = time.time()
# existing package, will use short paths if defined
package_folder = self._cache.package_layout(pref.ref, short_paths=None).package(pref)
if is_dirty(package_folder):
raise ConanException("Package %s is corrupted, aborting upload.\n"
"Remove it with 'conan remove %s -p=%s'"
% (pref, pref.ref, pref.id))
tgz_path = os.path.join(package_folder, PACKAGE_TGZ_NAME)
if is_dirty(tgz_path):
self._user_io.out.warn("%s: Removing %s, marked as dirty"
% (str(pref), PACKAGE_TGZ_NAME))
os.remove(tgz_path)
clean_dirty(tgz_path)
# Get all the files in that directory
files, symlinks = gather_files(package_folder)
if CONANINFO not in files or CONAN_MANIFEST not in files:
logger.error("Missing info or manifest in uploading files: %s" % (str(files)))
raise ConanException("Cannot upload corrupted package '%s'" % str(pref))
logger.debug("UPLOAD: Time remote_manager build_files_set : %f" % (time.time() - t1))
if integrity_check:
self._package_integrity_check(pref, files, package_folder)
logger.debug("UPLOAD: Time remote_manager check package integrity : %f"
% (time.time() - t1))
the_files = _compress_package_files(files, symlinks, package_folder, self._user_io.out)
return the_files
def _recipe_files_to_upload(self, ref, policy, the_files, remote, remote_manifest,
local_manifest):
self._remote_manager.check_credentials(remote)
remote_snapshot = self._remote_manager.get_recipe_snapshot(ref, remote)
files_to_upload = {filename.replace("\\", "/"): path
for filename, path in the_files.items()}
if not remote_snapshot:
return files_to_upload, set()
deleted = set(remote_snapshot).difference(the_files)
if policy != UPLOAD_POLICY_FORCE:
if remote_manifest is None:
# This is the weird scenario, we have a snapshot but don't have a manifest.
# Can be due to concurrency issues, so we can try retrieve it now
try:
remote_manifest, _ = self._remote_manager.get_recipe_manifest(ref, remote)
except NotFoundException:
# This is weird, the manifest still not there, better upload everything
self._user_io.out.warn("The remote recipe doesn't have the 'conanmanifest.txt' "
"file and will be uploaded: '{}'".format(ref))
return files_to_upload, deleted
if remote_manifest == local_manifest:
return None, None
if policy in (UPLOAD_POLICY_NO_OVERWRITE, UPLOAD_POLICY_NO_OVERWRITE_RECIPE):
raise ConanException("Local recipe is different from the remote recipe. "
"Forbidden overwrite.")
return files_to_upload, deleted
def _package_files_to_upload(self, pref, policy, the_files, remote):
self._remote_manager.check_credentials(remote)
remote_snapshot = self._remote_manager.get_package_snapshot(pref, remote)
if remote_snapshot and policy != UPLOAD_POLICY_FORCE:
if not is_package_snapshot_complete(remote_snapshot):
return the_files, set([])
remote_manifest, _ = self._remote_manager.get_package_manifest(pref, remote)
local_manifest = FileTreeManifest.loads(load(the_files["conanmanifest.txt"]))
if remote_manifest == local_manifest:
return None, None
if policy == UPLOAD_POLICY_NO_OVERWRITE:
raise ConanException("Local package is different from the remote package. Forbidden"
" overwrite.")
deleted = set(remote_snapshot).difference(the_files)
return the_files, deleted
def _upload_recipe_end_msg(self, ref, remote):
msg = "Uploaded conan recipe '%s' to '%s'" % (str(ref), remote.name)
url = remote.url.replace("https://api.bintray.com/conan", "https://bintray.com")
msg += ": %s" % url
self._user_io.out.writeln("")
self._user_io.out.info(msg)
def _package_integrity_check(self, pref, files, package_folder):
# If package has been modified remove tgz to regenerate it
self._user_io.out.rewrite_line("Checking package integrity...")
# short_paths = None is enough if there exist short_paths
layout = self._cache.package_layout(pref.ref, short_paths=None)
read_manifest, expected_manifest = layout.package_manifests(pref)
if read_manifest != expected_manifest:
self._user_io.out.writeln("")
diff = read_manifest.difference(expected_manifest)
for fname, (h1, h2) in diff.items():
self._user_io.out.warn("Mismatched checksum '%s' (manifest: %s, file: %s)"
% (fname, h1, h2))
if PACKAGE_TGZ_NAME in files:
try:
tgz_path = os.path.join(package_folder, PACKAGE_TGZ_NAME)
os.unlink(tgz_path)
except Exception:
pass
error_msg = os.linesep.join("Mismatched checksum '%s' (manifest: %s, file: %s)"
% (fname, h1, h2) for fname, (h1, h2) in diff.items())
logger.error("Manifests doesn't match!\n%s" % error_msg)
raise ConanException("Cannot upload corrupted package '%s'" % str(pref))
else:
self._user_io.out.rewrite_line("Package integrity OK!")
self._user_io.out.writeln("")
def _check_recipe_date(self, ref, remote, local_manifest):
try:
remote_recipe_manifest, ref = self._remote_manager.get_recipe_manifest(ref, remote)
except NotFoundException:
return # First time uploading this package
if (remote_recipe_manifest != local_manifest and
remote_recipe_manifest.time > local_manifest.time):
self._print_manifest_information(remote_recipe_manifest, local_manifest, ref, remote)
raise ConanException("Remote recipe is newer than local recipe: "
"\n Remote date: %s\n Local date: %s" %
(remote_recipe_manifest.time, local_manifest.time))
return remote_recipe_manifest
def _print_manifest_information(self, remote_recipe_manifest, local_manifest, ref, remote):
try:
self._user_io.out.info("\n%s" % ("-"*40))
self._user_io.out.info("Remote manifest:")
self._user_io.out.info(remote_recipe_manifest)
self._user_io.out.info("Local manifest:")
self._user_io.out.info(local_manifest)
difference = remote_recipe_manifest.difference(local_manifest)
if "conanfile.py" in difference:
contents = load(self._cache.package_layout(ref).conanfile())
endlines = "\\r\\n" if "\r\n" in contents else "\\n"
self._user_io.out.info("Local 'conanfile.py' using '%s' line-ends" % endlines)
remote_contents = self._remote_manager.get_recipe_path(ref, path="conanfile.py",
remote=remote)
endlines = "\\r\\n" if "\r\n" in remote_contents else "\\n"
self._user_io.out.info("Remote 'conanfile.py' using '%s' line-ends" % endlines)
self._user_io.out.info("\n%s" % ("-"*40))
except Exception as e:
self._user_io.out.info("Error printing information about the diff: %s" % str(e))
def _compress_recipe_files(files, symlinks, src_files, src_symlinks, dest_folder, output):
# This is the minimum recipe
result = {CONANFILE: files.pop(CONANFILE),
CONAN_MANIFEST: files.pop(CONAN_MANIFEST)}
export_tgz_path = files.pop(EXPORT_TGZ_NAME, None)
sources_tgz_path = files.pop(EXPORT_SOURCES_TGZ_NAME, None)
def add_tgz(tgz_name, tgz_path, tgz_files, tgz_symlinks, msg):
if tgz_path:
result[tgz_name] = tgz_path
elif tgz_files:
if output and not output.is_terminal:
output.writeln(msg)
tgz_path = compress_files(tgz_files, tgz_symlinks, tgz_name, dest_folder, output)
result[tgz_name] = tgz_path
add_tgz(EXPORT_TGZ_NAME, export_tgz_path, files, symlinks, "Compressing recipe...")
add_tgz(EXPORT_SOURCES_TGZ_NAME, sources_tgz_path, src_files, src_symlinks,
"Compressing recipe sources...")
return result
def _compress_package_files(files, symlinks, dest_folder, output):
tgz_path = files.get(PACKAGE_TGZ_NAME)
if not tgz_path:
if output and not output.is_terminal:
output.writeln("Compressing package...")
tgz_files = {f: path for f, path in files.items() if f not in [CONANINFO, CONAN_MANIFEST]}
tgz_path = compress_files(tgz_files, symlinks, PACKAGE_TGZ_NAME, dest_folder, output)
return {PACKAGE_TGZ_NAME: tgz_path,
CONANINFO: files[CONANINFO],
CONAN_MANIFEST: files[CONAN_MANIFEST]}
def compress_files(files, symlinks, name, dest_dir, output=None):
t1 = time.time()
# FIXME, better write to disk sequentially and not keep tgz contents in memory
tgz_path = os.path.join(dest_dir, name)
set_dirty(tgz_path)
with open(tgz_path, "wb") as tgz_handle:
# tgz_contents = BytesIO()
tgz = gzopen_without_timestamps(name, mode="w", fileobj=tgz_handle)
for filename, dest in sorted(symlinks.items()):
info = tarfile.TarInfo(name=filename)
info.type = tarfile.SYMTYPE
info.linkname = dest
info.size = 0 # A symlink shouldn't have size
tgz.addfile(tarinfo=info)
mask = ~(stat.S_IWOTH | stat.S_IWGRP)
i_file = 0
n_files = len(files)
last_progress = None
if output and n_files > 1 and not output.is_terminal:
output.write("[")
elif output and n_files > 1 and output.is_terminal:
progress_bar = tqdm(total=len(files), desc="Compressing %s" % name,
unit="files", leave=True, dynamic_ncols=False,
ascii=True, file=output)
for filename, abs_path in sorted(files.items()):
info = tarfile.TarInfo(name=filename)
info.size = os.stat(abs_path).st_size
info.mode = os.stat(abs_path).st_mode & mask
if os.path.islink(abs_path):
info.type = tarfile.SYMTYPE
info.size = 0 # A symlink shouldn't have size
info.linkname = os.readlink(abs_path) # @UndefinedVariable
tgz.addfile(tarinfo=info)
else:
with open(abs_path, 'rb') as file_handler:
tgz.addfile(tarinfo=info, fileobj=file_handler)
if output and n_files > 1:
i_file = i_file + 1
units = min(50, int(50 * i_file / n_files))
if last_progress != units: # Avoid screen refresh if nothing has change
if not output.is_terminal:
output.write('=' * (units - (last_progress or 0)))
last_progress = units
if output.is_terminal:
progress_bar.update()
if output and n_files > 1:
if output.is_terminal:
progress_bar.close()
else:
output.writeln("]")
tgz.close()
clean_dirty(tgz_path)
duration = time.time() - t1
log_compressed_files(files, duration, tgz_path)
return tgz_path
| 50.474153 | 100 | 0.615165 |
870ac5230ae4d3b7880dfbae2a9e3a967ca3ad9d | 2,009 | py | Python | encoders/rnn/plotobjective.py | SvenBollweg/eqnet | 76bc51fdff8bef1b898820329e1ddf212323d0b0 | [
"BSD-3-Clause"
] | 35 | 2016-12-11T12:14:48.000Z | 2022-03-28T14:20:27.000Z | encoders/rnn/plotobjective.py | SvenBollweg/eqnet | 76bc51fdff8bef1b898820329e1ddf212323d0b0 | [
"BSD-3-Clause"
] | null | null | null | encoders/rnn/plotobjective.py | SvenBollweg/eqnet | 76bc51fdff8bef1b898820329e1ddf212323d0b0 | [
"BSD-3-Clause"
] | 12 | 2016-12-16T07:11:42.000Z | 2020-04-11T02:53:03.000Z | import pickle
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import numpy as np
from encoders.rnn.siameseencoder import RecursiveNNSiameseEncoder
if __name__ == '__main__':
import sys
import os
if len(sys.argv) != 2:
print("Usage <FileName>")
sys.exit(-1)
hyperparameters = dict(log_learning_rate=-1,
rmsprop_rho=.98,
momentum=0.8,
minibatch_size=10,
memory_size=32,
grad_clip=10,
log_init_scale_embedding=-1,
dropout_rate=0,
dissimilar_margin=.1)
dataset = sys.argv[1]
dset_name = os.path.basename(dataset)
assert dset_name.endswith('.json.gz')
dset_name = dset_name[:-len('.json.gz')]
data_dump_name = 'datadump-' + dset_name + '.pkl'
if not os.path.exists(data_dump_name):
all_params = dict(hyperparameters)
lm = RecursiveNNSiameseEncoder(dataset, hyperparameters)
X, Y, Z = lm.scan_objective(dataset, 'Add')
with open(data_dump_name, 'wb') as f:
pickle.dump((X, Y, Z), f, pickle.HIGHEST_PROTOCOL)
else:
with open(data_dump_name, 'rb') as f:
X, Y, Z = pickle.load(f)
# plt.figure()
# CS = plt.contour(X, Y, np.log(-Z + 1e-20), levels=[-20, -15, -10, -5, -2, -1, -0, 1, 2, 3, 4])
# plt.clabel(CS, inline=1, fontsize=10)
Z_norm = np.log(-Z + 1e-20)
im = plt.imshow(-np.clip(Z, -2, 0), interpolation='gaussian', origin='lower',
cmap=cm.gray, extent=(0, 1, 0, 1))
levels = [-2, -1, 0, 1, 2, 3, 3.95, 4, 4.025, 4.04, 5, 6, 7, 8]
CS = plt.contour(Z_norm, levels,
origin='lower',
linewidths=2,
extent=(0, 1, 0, 1))
plt.xlabel("add_weight")
plt.ylabel("subtract_weight")
plt.title('Objective Values')
plt.grid()
plt.show()
| 34.050847 | 100 | 0.537083 |
18bfd50e9c3ed87efe906eadbbe8db807fe12e29 | 8,294 | py | Python | huaweicloud-sdk-gaussdb/huaweicloudsdkgaussdb/v3/model/list_gauss_my_sql_error_log_request.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 64 | 2020-06-12T07:05:07.000Z | 2022-03-30T03:32:50.000Z | huaweicloud-sdk-gaussdb/huaweicloudsdkgaussdb/v3/model/list_gauss_my_sql_error_log_request.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 11 | 2020-07-06T07:56:54.000Z | 2022-01-11T11:14:40.000Z | huaweicloud-sdk-gaussdb/huaweicloudsdkgaussdb/v3/model/list_gauss_my_sql_error_log_request.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 24 | 2020-06-08T11:42:13.000Z | 2022-03-04T06:44:08.000Z | # coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ListGaussMySqlErrorLogRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'x_language': 'str',
'instance_id': 'str',
'start_date': 'str',
'end_date': 'str',
'offset': 'int',
'limit': 'int',
'level': 'str',
'node_id': 'str'
}
attribute_map = {
'x_language': 'X-Language',
'instance_id': 'instance_id',
'start_date': 'start_date',
'end_date': 'end_date',
'offset': 'offset',
'limit': 'limit',
'level': 'level',
'node_id': 'node_id'
}
def __init__(self, x_language=None, instance_id=None, start_date=None, end_date=None, offset=None, limit=None, level=None, node_id=None):
"""ListGaussMySqlErrorLogRequest - a model defined in huaweicloud sdk"""
self._x_language = None
self._instance_id = None
self._start_date = None
self._end_date = None
self._offset = None
self._limit = None
self._level = None
self._node_id = None
self.discriminator = None
if x_language is not None:
self.x_language = x_language
self.instance_id = instance_id
self.start_date = start_date
self.end_date = end_date
if offset is not None:
self.offset = offset
if limit is not None:
self.limit = limit
if level is not None:
self.level = level
if node_id is not None:
self.node_id = node_id
@property
def x_language(self):
"""Gets the x_language of this ListGaussMySqlErrorLogRequest.
语言
:return: The x_language of this ListGaussMySqlErrorLogRequest.
:rtype: str
"""
return self._x_language
@x_language.setter
def x_language(self, x_language):
"""Sets the x_language of this ListGaussMySqlErrorLogRequest.
语言
:param x_language: The x_language of this ListGaussMySqlErrorLogRequest.
:type: str
"""
self._x_language = x_language
@property
def instance_id(self):
"""Gets the instance_id of this ListGaussMySqlErrorLogRequest.
实例ID
:return: The instance_id of this ListGaussMySqlErrorLogRequest.
:rtype: str
"""
return self._instance_id
@instance_id.setter
def instance_id(self, instance_id):
"""Sets the instance_id of this ListGaussMySqlErrorLogRequest.
实例ID
:param instance_id: The instance_id of this ListGaussMySqlErrorLogRequest.
:type: str
"""
self._instance_id = instance_id
@property
def start_date(self):
"""Gets the start_date of this ListGaussMySqlErrorLogRequest.
开始时间,格式为“yyyy-mm-ddThh:mm:ssZ”。 其中,T指某个时间的开始;Z指时区偏移量,例如北京时间偏移显示为+0800。
:return: The start_date of this ListGaussMySqlErrorLogRequest.
:rtype: str
"""
return self._start_date
@start_date.setter
def start_date(self, start_date):
"""Sets the start_date of this ListGaussMySqlErrorLogRequest.
开始时间,格式为“yyyy-mm-ddThh:mm:ssZ”。 其中,T指某个时间的开始;Z指时区偏移量,例如北京时间偏移显示为+0800。
:param start_date: The start_date of this ListGaussMySqlErrorLogRequest.
:type: str
"""
self._start_date = start_date
@property
def end_date(self):
"""Gets the end_date of this ListGaussMySqlErrorLogRequest.
结束时间,格式为“yyyy-mm-ddThh:mm:ssZ”。 其中,T指某个时间的开始;Z指时区偏移量,例如北京时间偏移显示为+0800。
:return: The end_date of this ListGaussMySqlErrorLogRequest.
:rtype: str
"""
return self._end_date
@end_date.setter
def end_date(self, end_date):
"""Sets the end_date of this ListGaussMySqlErrorLogRequest.
结束时间,格式为“yyyy-mm-ddThh:mm:ssZ”。 其中,T指某个时间的开始;Z指时区偏移量,例如北京时间偏移显示为+0800。
:param end_date: The end_date of this ListGaussMySqlErrorLogRequest.
:type: str
"""
self._end_date = end_date
@property
def offset(self):
"""Gets the offset of this ListGaussMySqlErrorLogRequest.
索引位置,偏移量。从第一条数据偏移offset条数据后开始查询,默认为0(偏移0条数据,表示从第一条数据开始查询),必须为数字,不能为负数
:return: The offset of this ListGaussMySqlErrorLogRequest.
:rtype: int
"""
return self._offset
@offset.setter
def offset(self, offset):
"""Sets the offset of this ListGaussMySqlErrorLogRequest.
索引位置,偏移量。从第一条数据偏移offset条数据后开始查询,默认为0(偏移0条数据,表示从第一条数据开始查询),必须为数字,不能为负数
:param offset: The offset of this ListGaussMySqlErrorLogRequest.
:type: int
"""
self._offset = offset
@property
def limit(self):
"""Gets the limit of this ListGaussMySqlErrorLogRequest.
查询记录数。默认为100,不能为负数,最小值为1,最大值为100
:return: The limit of this ListGaussMySqlErrorLogRequest.
:rtype: int
"""
return self._limit
@limit.setter
def limit(self, limit):
"""Sets the limit of this ListGaussMySqlErrorLogRequest.
查询记录数。默认为100,不能为负数,最小值为1,最大值为100
:param limit: The limit of this ListGaussMySqlErrorLogRequest.
:type: int
"""
self._limit = limit
@property
def level(self):
"""Gets the level of this ListGaussMySqlErrorLogRequest.
日志级别
:return: The level of this ListGaussMySqlErrorLogRequest.
:rtype: str
"""
return self._level
@level.setter
def level(self, level):
"""Sets the level of this ListGaussMySqlErrorLogRequest.
日志级别
:param level: The level of this ListGaussMySqlErrorLogRequest.
:type: str
"""
self._level = level
@property
def node_id(self):
"""Gets the node_id of this ListGaussMySqlErrorLogRequest.
节点ID
:return: The node_id of this ListGaussMySqlErrorLogRequest.
:rtype: str
"""
return self._node_id
@node_id.setter
def node_id(self, node_id):
"""Sets the node_id of this ListGaussMySqlErrorLogRequest.
节点ID
:param node_id: The node_id of this ListGaussMySqlErrorLogRequest.
:type: str
"""
self._node_id = node_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListGaussMySqlErrorLogRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.554817 | 141 | 0.59742 |
577b07b2fc2988f83160120578688d4975662914 | 44,233 | py | Python | src/transformers/models/squeezebert/modeling_squeezebert.py | hardianlawi/transformers | d45fc7da3d43ff29ca597f5ffa8cf3151d705013 | [
"Apache-2.0"
] | 2 | 2021-08-20T21:22:37.000Z | 2021-11-17T10:53:34.000Z | src/transformers/models/squeezebert/modeling_squeezebert.py | xiankaigit/transformers | 61f64262692ac7dc90e2e0bdeb7e79d9cd607a66 | [
"Apache-2.0"
] | null | null | null | src/transformers/models/squeezebert/modeling_squeezebert.py | xiankaigit/transformers | 61f64262692ac7dc90e2e0bdeb7e79d9cd607a66 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2020 The SqueezeBert authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch SqueezeBert model. """
import math
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACT2FN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutput,
BaseModelOutputWithPooling,
MaskedLMOutput,
MultipleChoiceModelOutput,
QuestionAnsweringModelOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_squeezebert import SqueezeBertConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "squeezebert/squeezebert-uncased"
_CONFIG_FOR_DOC = "SqueezeBertConfig"
_TOKENIZER_FOR_DOC = "SqueezeBertTokenizer"
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"squeezebert/squeezebert-uncased",
"squeezebert/squeezebert-mnli",
"squeezebert/squeezebert-mnli-headless",
]
class SqueezeBertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.embedding_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.embedding_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[:, :seq_length]
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class MatMulWrapper(nn.Module):
"""
Wrapper for torch.matmul(). This makes flop-counting easier to implement. Note that if you directly call
torch.matmul() in your code, the flop counter will typically ignore the flops of the matmul.
"""
def __init__(self):
super().__init__()
def forward(self, mat1, mat2):
"""
:param inputs: two torch tensors :return: matmul of these tensors
Here are the typical dimensions found in BERT (the B is optional) mat1.shape: [B, <optional extra dims>, M, K]
mat2.shape: [B, <optional extra dims>, K, N] output shape: [B, <optional extra dims>, M, N]
"""
return torch.matmul(mat1, mat2)
class SqueezeBertLayerNorm(nn.LayerNorm):
"""
This is a nn.LayerNorm subclass that accepts NCW data layout and performs normalization in the C dimension.
N = batch C = channels W = sequence length
"""
def __init__(self, hidden_size, eps=1e-12):
nn.LayerNorm.__init__(self, normalized_shape=hidden_size, eps=eps) # instantiates self.{weight, bias, eps}
def forward(self, x):
x = x.permute(0, 2, 1)
x = nn.LayerNorm.forward(self, x)
return x.permute(0, 2, 1)
class ConvDropoutLayerNorm(nn.Module):
"""
ConvDropoutLayerNorm: Conv, Dropout, LayerNorm
"""
def __init__(self, cin, cout, groups, dropout_prob):
super().__init__()
self.conv1d = nn.Conv1d(in_channels=cin, out_channels=cout, kernel_size=1, groups=groups)
self.layernorm = SqueezeBertLayerNorm(cout)
self.dropout = nn.Dropout(dropout_prob)
def forward(self, hidden_states, input_tensor):
x = self.conv1d(hidden_states)
x = self.dropout(x)
x = x + input_tensor
x = self.layernorm(x)
return x
class ConvActivation(nn.Module):
"""
ConvActivation: Conv, Activation
"""
def __init__(self, cin, cout, groups, act):
super().__init__()
self.conv1d = nn.Conv1d(in_channels=cin, out_channels=cout, kernel_size=1, groups=groups)
self.act = ACT2FN[act]
def forward(self, x):
output = self.conv1d(x)
return self.act(output)
class SqueezeBertSelfAttention(nn.Module):
def __init__(self, config, cin, q_groups=1, k_groups=1, v_groups=1):
"""
config = used for some things; ignored for others (work in progress...) cin = input channels = output channels
groups = number of groups to use in conv1d layers
"""
super().__init__()
if cin % config.num_attention_heads != 0:
raise ValueError(
f"cin ({cin}) is not a multiple of the number of attention heads ({config.num_attention_heads})"
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(cin / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Conv1d(in_channels=cin, out_channels=cin, kernel_size=1, groups=q_groups)
self.key = nn.Conv1d(in_channels=cin, out_channels=cin, kernel_size=1, groups=k_groups)
self.value = nn.Conv1d(in_channels=cin, out_channels=cin, kernel_size=1, groups=v_groups)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.softmax = nn.Softmax(dim=-1)
self.matmul_qk = MatMulWrapper()
self.matmul_qkv = MatMulWrapper()
def transpose_for_scores(self, x):
"""
- input: [N, C, W]
- output: [N, C1, W, C2] where C1 is the head index, and C2 is one head's contents
"""
new_x_shape = (x.size()[0], self.num_attention_heads, self.attention_head_size, x.size()[-1]) # [N, C1, C2, W]
x = x.view(*new_x_shape)
return x.permute(0, 1, 3, 2) # [N, C1, C2, W] --> [N, C1, W, C2]
def transpose_key_for_scores(self, x):
"""
- input: [N, C, W]
- output: [N, C1, C2, W] where C1 is the head index, and C2 is one head's contents
"""
new_x_shape = (x.size()[0], self.num_attention_heads, self.attention_head_size, x.size()[-1]) # [N, C1, C2, W]
x = x.view(*new_x_shape)
# no `permute` needed
return x
def transpose_output(self, x):
"""
- input: [N, C1, W, C2]
- output: [N, C, W]
"""
x = x.permute(0, 1, 3, 2).contiguous() # [N, C1, C2, W]
new_x_shape = (x.size()[0], self.all_head_size, x.size()[3]) # [N, C, W]
x = x.view(*new_x_shape)
return x
def forward(self, hidden_states, attention_mask, output_attentions):
"""
expects hidden_states in [N, C, W] data layout.
The attention_mask data layout is [N, W], and it does not need to be transposed.
"""
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_key_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_score = self.matmul_qk(query_layer, key_layer)
attention_score = attention_score / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_score = attention_score + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = self.softmax(attention_score)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = self.matmul_qkv(attention_probs, value_layer)
context_layer = self.transpose_output(context_layer)
result = {"context_layer": context_layer}
if output_attentions:
result["attention_score"] = attention_score
return result
class SqueezeBertModule(nn.Module):
def __init__(self, config):
"""
- hidden_size = input chans = output chans for Q, K, V (they are all the same ... for now) = output chans for
the module
- intermediate_size = output chans for intermediate layer
- groups = number of groups for all layers in the BertModule. (eventually we could change the interface to
allow different groups for different layers)
"""
super().__init__()
c0 = config.hidden_size
c1 = config.hidden_size
c2 = config.intermediate_size
c3 = config.hidden_size
self.attention = SqueezeBertSelfAttention(
config=config, cin=c0, q_groups=config.q_groups, k_groups=config.k_groups, v_groups=config.v_groups
)
self.post_attention = ConvDropoutLayerNorm(
cin=c0, cout=c1, groups=config.post_attention_groups, dropout_prob=config.hidden_dropout_prob
)
self.intermediate = ConvActivation(cin=c1, cout=c2, groups=config.intermediate_groups, act=config.hidden_act)
self.output = ConvDropoutLayerNorm(
cin=c2, cout=c3, groups=config.output_groups, dropout_prob=config.hidden_dropout_prob
)
def forward(self, hidden_states, attention_mask, output_attentions):
att = self.attention(hidden_states, attention_mask, output_attentions)
attention_output = att["context_layer"]
post_attention_output = self.post_attention(attention_output, hidden_states)
intermediate_output = self.intermediate(post_attention_output)
layer_output = self.output(intermediate_output, post_attention_output)
output_dict = {"feature_map": layer_output}
if output_attentions:
output_dict["attention_score"] = att["attention_score"]
return output_dict
class SqueezeBertEncoder(nn.Module):
def __init__(self, config):
super().__init__()
assert config.embedding_size == config.hidden_size, (
"If you want embedding_size != intermediate hidden_size, "
"please insert a Conv1d layer to adjust the number of channels "
"before the first SqueezeBertModule."
)
self.layers = nn.ModuleList(SqueezeBertModule(config) for _ in range(config.num_hidden_layers))
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
if head_mask is None:
head_mask_is_all_none = True
elif head_mask.count(None) == len(head_mask):
head_mask_is_all_none = True
else:
head_mask_is_all_none = False
assert head_mask_is_all_none is True, "head_mask is not yet supported in the SqueezeBert implementation."
# [batch_size, sequence_length, hidden_size] --> [batch_size, hidden_size, sequence_length]
hidden_states = hidden_states.permute(0, 2, 1)
all_hidden_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
for layer in self.layers:
if output_hidden_states:
hidden_states = hidden_states.permute(0, 2, 1)
all_hidden_states += (hidden_states,)
hidden_states = hidden_states.permute(0, 2, 1)
layer_output = layer.forward(hidden_states, attention_mask, output_attentions)
hidden_states = layer_output["feature_map"]
if output_attentions:
all_attentions += (layer_output["attention_score"],)
# [batch_size, hidden_size, sequence_length] --> [batch_size, sequence_length, hidden_size]
hidden_states = hidden_states.permute(0, 2, 1)
if output_hidden_states:
all_hidden_states += (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
)
class SqueezeBertPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class SqueezeBertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class SqueezeBertLMPredictionHead(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = SqueezeBertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
class SqueezeBertOnlyMLMHead(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = SqueezeBertLMPredictionHead(config)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class SqueezeBertPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = SqueezeBertConfig
base_model_prefix = "transformer"
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, (nn.Linear, nn.Conv1d)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, SqueezeBertLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
SQUEEZEBERT_START_DOCSTRING = r"""
The SqueezeBERT model was proposed in `SqueezeBERT: What can computer vision teach NLP about efficient neural
networks? <https://arxiv.org/abs/2006.11316>`__ by Forrest N. Iandola, Albert E. Shaw, Ravi Krishna, and Kurt W.
Keutzer
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
For best results finetuning SqueezeBERT on text classification tasks, it is recommended to use the
`squeezebert/squeezebert-mnli-headless` checkpoint as a starting point.
Parameters:
config (:class:`~transformers.SqueezeBertConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
Hierarchy::
Internal class hierarchy:
SqueezeBertModel
SqueezeBertEncoder
SqueezeBertModule
SqueezeBertSelfAttention
ConvActivation
ConvDropoutLayerNorm
Data layouts::
Input data is in [batch, sequence_length, hidden_size] format.
Data inside the encoder is in [batch, hidden_size, sequence_length] format. But, if :obj:`output_hidden_states
== True`, the data from inside the encoder is returned in [batch, sequence_length, hidden_size] format.
The final output of the encoder is in [batch, sequence_length, hidden_size] format.
"""
SQUEEZEBERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.SqueezeBertTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
1]``:
- 0 corresponds to a `sentence A` token,
- 1 corresponds to a `sentence B` token.
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
@add_start_docstrings(
"The bare SqueezeBERT Model transformer outputting raw hidden-states without any specific head on top.",
SQUEEZEBERT_START_DOCSTRING,
)
class SqueezeBertModel(SqueezeBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.embeddings = SqueezeBertEmbeddings(config)
self.encoder = SqueezeBertEncoder(config)
self.pooler = SqueezeBertPooler(config)
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, new_embeddings):
self.embeddings.word_embeddings = new_embeddings
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(SQUEEZEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=BaseModelOutputWithPooling,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape, device)
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(
input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
)
encoder_outputs = self.encoder(
hidden_states=embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output)
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPooling(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
@add_start_docstrings("""SqueezeBERT Model with a `language modeling` head on top. """, SQUEEZEBERT_START_DOCSTRING)
class SqueezeBertForMaskedLM(SqueezeBertPreTrainedModel):
_keys_to_ignore_on_load_missing = [r"predictions.decoder.bias"]
def __init__(self, config):
super().__init__(config)
self.transformer = SqueezeBertModel(config)
self.cls = SqueezeBertOnlyMLMHead(config)
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(SQUEEZEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MaskedLMOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss() # -100 index = padding token
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return MaskedLMOutput(
loss=masked_lm_loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
SqueezeBERT Model transformer with a sequence classification/regression head on top (a linear layer on top of the
pooled output) e.g. for GLUE tasks.
""",
SQUEEZEBERT_START_DOCSTRING,
)
class SqueezeBertForSequenceClassification(SqueezeBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.config = config
self.transformer = SqueezeBertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, self.config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(SQUEEZEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=SequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
SqueezeBERT Model with a multiple choice classification head on top (a linear layer on top of the pooled output and
a softmax) e.g. for RocStories/SWAG tasks.
""",
SQUEEZEBERT_START_DOCSTRING,
)
class SqueezeBertForMultipleChoice(SqueezeBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.transformer = SqueezeBertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
self.init_weights()
@add_start_docstrings_to_model_forward(
SQUEEZEBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
)
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MultipleChoiceModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,
num_choices-1]`` where `num_choices` is the size of the second dimension of the input tensors. (see
`input_ids` above)
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if not return_dict:
output = (reshaped_logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return MultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
SqueezeBERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g.
for Named-Entity-Recognition (NER) tasks.
""",
SQUEEZEBERT_START_DOCSTRING,
)
class SqueezeBertForTokenClassification(SqueezeBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.transformer = SqueezeBertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(SQUEEZEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TokenClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels -
1]``.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)
active_labels = torch.where(
active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)
)
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
SqueezeBERT Model with a span classification head on top for extractive question-answering tasks like SQuAD (a
linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
SQUEEZEBERT_START_DOCSTRING,
)
class SqueezeBertForQuestionAnswering(SqueezeBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.transformer = SqueezeBertModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(SQUEEZEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=QuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
| 40.138838 | 122 | 0.666267 |
b67168618e4b00ae00b3c5ffc45d1903ae242ab5 | 1,731 | py | Python | mitorch/models/seresnext.py | shonohs/shtorch_models | afc20dbc9fd272a74ca08ec460401721b1476e72 | [
"MIT"
] | 3 | 2020-06-26T11:30:33.000Z | 2021-01-23T07:42:25.000Z | mitorch/models/seresnext.py | shonohs/shtorch_models | afc20dbc9fd272a74ca08ec460401721b1476e72 | [
"MIT"
] | null | null | null | mitorch/models/seresnext.py | shonohs/shtorch_models | afc20dbc9fd272a74ca08ec460401721b1476e72 | [
"MIT"
] | 2 | 2020-04-22T16:19:28.000Z | 2020-06-11T09:06:37.000Z | """SE-ResNext"""
from .resnext import ResNext
from .modules import SEBlock
class SEResNext(ResNext):
class BasicBlock(ResNext.BasicBlock):
def __init__(self, in_channels, out_channels, cardinality, stride=1, reduction_ratio=16):
super().__init__(in_channels, out_channels, cardinality, stride)
self.se = SEBlock(out_channels, reduction_ratio)
def forward(self, input):
x = self.conv0(input)
x = self.conv1(x)
x = self.conv2(x)
x = self.se(x)
x = self.add(x, self.conv_shortcut(input) if self.conv_shortcut else input)
return self.activation(x)
def __init__(self, num_blocks=[3, 4, 6, 3], cardinality=32, bottleneck_width=4, reduction_ratio=16):
self.reduction_ratio = reduction_ratio
super().__init__(num_blocks, cardinality, bottleneck_width)
def _make_stage(self, in_channels, out_channels, num_blocks, first_block_stride, cardinality, index):
blocks = [(f'block{index}_0', SEResNext.BasicBlock(in_channels, out_channels, cardinality, first_block_stride, self.reduction_ratio))]
for i in range(num_blocks - 1):
blocks.append((f'block{index}_{i+1}', SEResNext.BasicBlock(out_channels, out_channels, cardinality, reduction_ratio=self.reduction_ratio)))
return blocks
class SEResNext14(SEResNext):
def __init__(self):
super().__init__([1, 1, 1, 1])
class SEResNext26(SEResNext):
def __init__(self):
super().__init__([2, 2, 2, 2])
class SEResNext50(SEResNext):
def __init__(self):
super().__init__([3, 4, 6, 3])
class SEResNext101(SEResNext):
def __init__(self):
super().__init__([3, 4, 23, 3])
| 34.62 | 151 | 0.665511 |
f3a32afb076b27faec70a0b5dde715655a3b480f | 7,072 | py | Python | deploy.py | bzhaoopenstack/labkeeper | fecda8a306fd3f1dea3f66606ac8bd962981d2b0 | [
"Apache-2.0"
] | 4 | 2019-04-02T03:49:13.000Z | 2022-01-22T14:57:33.000Z | deploy.py | bzhaoopenstack/labkeeper | fecda8a306fd3f1dea3f66606ac8bd962981d2b0 | [
"Apache-2.0"
] | 451 | 2019-03-25T07:27:52.000Z | 2021-07-26T01:26:43.000Z | deploy.py | bzhaoopenstack/labkeeper | fecda8a306fd3f1dea3f66606ac8bd962981d2b0 | [
"Apache-2.0"
] | 14 | 2018-09-28T18:45:12.000Z | 2022-01-22T14:57:22.000Z | #!/usr/bin/env python
import datetime
import json
import os
import subprocess
import sys
import argparse
from prettytable import PrettyTable
import requests
def add_cli_args():
parser = argparse.ArgumentParser()
parser.add_argument('type',
choices=["allinone", "allinone-ha", "openlab",
"openlab-ha"],
help='OpenLab deployment type',
)
parser.add_argument('--action',
choices=["deploy", "new-slave", "new-zookeeper",
"switch-role", "show-graph", "show-ip",
"list-change", "upgrade", "upgrade-complete"],
default="deploy",
help="The action that labkeeper supports. Default "
"value is 'deploy'.\n"
"'deploy': create a new OpenLab CI environment.\n"
"'new-slave': create new slave nodes base on "
"current master nodes.\n"
"'switch-role': switch the master/slave role of "
"current deployment. This will re-config the "
"existing OpenLab environment.\n"
"'show-graph': show the hosts graph of ansible "
"inventory. It can be used with --with-vars to "
"show more detail.\n"
"'show-ip': show hosts ip address.\n"
"'list-change': show zuul and nodepool code "
"change during last month.\n"
"'upgrade': upgrade zuul and nodepool to the "
"newest master branch.\n"
"'upgrade-complete: after checking upgraded "
"environment, if everything works well, run this "
"action to complete upgrade. Otherwise all nodes"
"will keep being in maintaining status\n '")
parser.add_argument('-u', '--user',
help='the Ansible remote user performing deployment, '
'default is "ubuntu" configured in ansible.cfg',
)
parser.add_argument('-e', '--extra-vars',
metavar="<key=value>",
action='append',
default=[],
help='extra vars passed to Ansible command',
)
parser.add_argument('--host-ip',
metavar="<key=value>",
action='append',
help='override new ip address for current inventory, '
'this argument need to use with key-value pairs '
'combined with "=", the key must be the host name'
' defined in the inventory yaml files, e.g. '
'zuul01, nodepool02, allinone01.',
)
parser.add_argument('--with-vars',
action='store_true',
help='show the hosts graph of ansible inventory with '
'vars. It must be used together with --action '
'show-graph.'
)
return parser
def list_changes(project, days):
from_day = (datetime.date.today() - datetime.timedelta(days=days)).strftime('%Y-%m-%d')
response = requests.get(
'https://review.opendev.org/changes/?q=project:' +
project + '+status:merged+after:' + from_day)
changes = json.loads(
response.content.decode("utf-8")[5:].replace('\n', ''))
pt = PrettyTable([project, 'change_id'], caching=False)
pt.align = 'l'
for change in changes:
pt.add_row([change['subject'], change['change_id']])
print(pt.get_string(sortby=project))
def main():
parsed_args = add_cli_args().parse_args()
os.environ['OL_TYPE'] = parsed_args.type
cmd = []
if parsed_args.action == 'deploy':
cmd = ['ansible-playbook', '-i', 'inventory/inventory.py',
'playbooks/site.yaml']
elif parsed_args.action == 'new-slave':
cmd = ['ansible-playbook', '-i', 'inventory/inventory.py',
'playbooks/site.yaml', '-l', '*-slave']
elif parsed_args.action == 'new-zookeeper':
cmd = ['ansible-playbook', '-i', 'inventory/inventory.py',
'playbooks/conf-new-zookeeper.yaml']
elif parsed_args.action == 'switch-role':
os.environ['OL_SWITCH_MASTER_SLAVE'] = True
cmd = ['ansible-playbook', '-i', 'inventory/inventory.py',
'playbooks/switch_role.yaml']
elif parsed_args.action == 'show-graph':
cmd = ['ansible-inventory', '-i', 'inventory/inventory.py', '--graph']
if parsed_args.with_vars:
cmd.append('--vars')
elif parsed_args.action == 'show-ip':
cmd = ['python', 'inventory/inventory.py', '--show-ip']
elif parsed_args.action == 'list-change':
list_changes('zuul/zuul', 31)
list_changes('zuul/nodepool', 31)
elif parsed_args.action == 'upgrade':
if parsed_args.type != 'openlab-ha':
print("upgrade action only support openlab-ha deployment.")
exit(1)
cmd = ['ansible-playbook', '-i', 'inventory/inventory.py',
'playbooks/upgrade-ha-deployment.yaml']
elif parsed_args.action == 'upgrade-complete':
if parsed_args.type != 'openlab-ha':
print("upgrade-complete action only support openlab-ha deployment.")
exit(1)
cmd = ['ansible-playbook', '-i', 'inventory/inventory.py',
'playbooks/upgrade-complete-ha-deployment.yaml']
if parsed_args.host_ip:
specified_ips = dict([(d.partition('=')[::2])
for d in parsed_args.new_ip])
for host, ip in specified_ips.items():
os.environ['OL_%s_IP' % host.upper()] = ip
if parsed_args.user and cmd[0] == 'ansible-playbook':
cmd += ['-u', parsed_args.user]
if parsed_args.extra_vars:
cmd += ['-e', ' '.join(parsed_args.extra_vars)]
ol_env_msg = '\n'.join(['%s=%s' % (k, os.environ[k]) for k in os.environ
if k.startswith('OL_')])
if cmd:
print("OpenLab deployment ENV:\n%s" % ol_env_msg)
print('Ansible command:\n%s' % ' '.join(cmd))
print("*" * 100)
subprocess.call(cmd)
if (parsed_args.action == 'new-slave' or
parsed_args.action == 'new-zookeeper'):
print("Don't forget to restart zuul and nodepool by hand.")
if parsed_args.action == 'new-slave':
subprocess.call(['ansible-playbook', '-i', 'inventory/inventory.py',
'playbooks/conf-cluster.yaml'])
if __name__ == '__main__':
sys.exit(main())
| 45.333333 | 91 | 0.516544 |
5f0aac252d83b7bf4266e7dab89093ddcdd55348 | 279 | py | Python | etcampy/api.py | Yuki-Mori/etcampy | 0af30327420b981d32b601b0346e4e4646094d67 | [
"MIT"
] | null | null | null | etcampy/api.py | Yuki-Mori/etcampy | 0af30327420b981d32b601b0346e4e4646094d67 | [
"MIT"
] | null | null | null | etcampy/api.py | Yuki-Mori/etcampy | 0af30327420b981d32b601b0346e4e4646094d67 | [
"MIT"
] | null | null | null | from .exception import CameraCannotOpenError
import cv2
def save_image(path, camera=0):
cap = cv2.VideoCapture(camera)
if not cap.isOpened:
raise CameraCannotOpenError("Camera cannot open!")
_, frame = cap.read()
cv2.imwrite(path,frame)
cap.release() | 27.9 | 58 | 0.706093 |
b8c98745cafcf29a4da0932ed7f63cf1d2c7a56c | 4,763 | py | Python | models.py | sulabhkatiyar/Spatial_Att | 675d0d51b131f277f764cb9604d0f9cd559c5484 | [
"Apache-2.0"
] | null | null | null | models.py | sulabhkatiyar/Spatial_Att | 675d0d51b131f277f764cb9604d0f9cd559c5484 | [
"Apache-2.0"
] | null | null | null | models.py | sulabhkatiyar/Spatial_Att | 675d0d51b131f277f764cb9604d0f9cd559c5484 | [
"Apache-2.0"
] | null | null | null | import torch
from torch import nn
import torchvision
import pretrainedmodels
import json
from tqdm import tqdm
import numpy as np
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
encoder_choice = 1
class Encoder(nn.Module):
def __init__(self, encoded_image_size=8):
super(Encoder, self).__init__()
self.enc_image_size = encoded_image_size
if encoder_choice==1:
vgg16 = torchvision.models.vgg16(pretrained = True)
self.features_nopool = nn.Sequential(*list(vgg16.features.children())[:-1])
self.features_pool = list(vgg16.features.children())[-1]
self.adaptive_pool = nn.AdaptiveAvgPool2d((encoded_image_size, encoded_image_size))
self.fine_tune()
def forward(self, images):
global encoder_choice
if encoder_choice==1:
x = self.features_nopool(images)
x_pool = self.features_pool(x)
return x_pool.permute(0,2,3,1)
out = self.adaptive_pool(out)
out = out.permute(0, 2, 3, 1)
return out
class DecoderWithAttention_choice(nn.Module):
def __init__(self, embed_dim, decoder_dim, vocab_size, encoder_dim=2048, dropout=0.5):
super(DecoderWithAttention_choice, self).__init__()
self.encoder_dim = encoder_dim
self.embed_dim = embed_dim
self.decoder_dim = decoder_dim
self.vocab_size = vocab_size
self.dropout = dropout
self.num_feats = 64
self.embedding = nn.Embedding(vocab_size, self.embed_dim)
self.dropout = nn.Dropout(p=self.dropout)
self.decode_step = nn.LSTMCell(embed_dim + decoder_dim, decoder_dim, bias=True)
self.init_h = nn.Linear(encoder_dim, decoder_dim)
self.init_c = nn.Linear(encoder_dim, decoder_dim)
self.get_global_features = nn.Linear(encoder_dim, decoder_dim)
self.image_feat_small = nn.Linear(encoder_dim, decoder_dim, bias = False)
self.w_v = nn.Linear(decoder_dim, self.num_feats, bias = False)
self.w_g = nn.Linear(decoder_dim, self.num_feats, bias = False)
self.w_h_t = nn.Linear(self.num_feats, 1, bias = False)
self.tanh = torch.nn.Tanh()
self.softmax = nn.Softmax(dim=1)
self.fc = nn.Linear(decoder_dim + decoder_dim, vocab_size)
self.init_weights()
def init_weights(self):
self.embedding.weight.data.uniform_(-0.1, 0.1)
self.fc.bias.data.fill_(0)
self.fc.weight.data.uniform_(-0.1, 0.1)
def get_global_image(self, encoder_out):
mean_encoder_out = encoder_out.mean(dim=1)
img = self.get_global_features(mean_encoder_out)
return img
def forward(self, encoder_out, encoded_captions, caption_lengths):
batch_size = encoder_out.size(0)
encoder_dim = encoder_out.size(-1)
vocab_size = self.vocab_size
encoder_out = encoder_out.view(batch_size, -1, encoder_dim)
num_pixels = encoder_out.size(1)
assert(self.num_feats == num_pixels)
caption_lengths, sort_ind = caption_lengths.squeeze(1).sort(dim=0, descending=True)
encoder_out = encoder_out[sort_ind]
encoded_captions = encoded_captions[sort_ind]
embeddings = self.embedding(encoded_captions)
global_img = self.get_global_image(encoder_out)
h, c = torch.zeros_like(global_img), torch.zeros_like(global_img)
encoder_out_small = self.image_feat_small(encoder_out)
decode_lengths = (caption_lengths - 1).tolist()
predictions = torch.zeros(batch_size, max(decode_lengths), vocab_size).to(device)
predictions_reverse = torch.zeros(batch_size, max(decode_lengths), vocab_size).to(device)
for t in range(max(decode_lengths)):
batch_size_t = sum([l > t for l in decode_lengths])
h, c = self.decode_step(torch.cat([embeddings[:batch_size_t, t, :], global_img[:batch_size_t]], dim = 1),(h[:batch_size_t], c[:batch_size_t]))
h_new = self.w_g(h).unsqueeze(-1)
alpha = self.softmax(self.w_h_t(self.tanh(self.w_v(encoder_out_small[:batch_size_t]) + torch.matmul(h_new, torch.ones(batch_size_t, 1, num_pixels).to(device)))).squeeze(2))
context_vector = (encoder_out_small[:batch_size_t] * alpha.unsqueeze(2)).sum(dim=1)
preds = self.fc(self.dropout(torch.cat([h, context_vector], dim =1))) # (batch_size_t, vocab_size)
predictions[:batch_size_t, t, :] = preds
return predictions, encoded_captions, decode_lengths, sort_ind
| 41.780702 | 184 | 0.643502 |
8edd1ace5b2de8fcd6b2a01d0707dfc96192d652 | 200 | py | Python | aws/container/app.py | m-li/lambda_cluster | 1481ecf83cd4be16c1c09a77301689e6e1ce61fb | [
"MIT"
] | null | null | null | aws/container/app.py | m-li/lambda_cluster | 1481ecf83cd4be16c1c09a77301689e6e1ce61fb | [
"MIT"
] | null | null | null | aws/container/app.py | m-li/lambda_cluster | 1481ecf83cd4be16c1c09a77301689e6e1ce61fb | [
"MIT"
] | null | null | null | import cloudpickle
def handler(event, context):
func = cloudpickle.loads(event["func"].encode('latin1'))
args = event["args"]
r = func(*args)
return {"statusCode": 200, "results": r}
| 25 | 60 | 0.645 |
0cf3e3e954d4f1264adc139d4133a9412a794d03 | 3,592 | py | Python | tests/components/zha/test_fan.py | jasperro/core | 26d7b2164e8a971506790ae5af06f31abdf278b5 | [
"Apache-2.0"
] | 4 | 2016-12-23T10:36:36.000Z | 2021-04-22T12:38:16.000Z | tests/components/zha/test_fan.py | jasperro/core | 26d7b2164e8a971506790ae5af06f31abdf278b5 | [
"Apache-2.0"
] | 9 | 2022-01-27T06:32:10.000Z | 2022-03-31T07:07:51.000Z | tests/components/zha/test_fan.py | jasperro/core | 26d7b2164e8a971506790ae5af06f31abdf278b5 | [
"Apache-2.0"
] | 1 | 2020-03-07T10:43:50.000Z | 2020-03-07T10:43:50.000Z | """Test zha fan."""
from unittest.mock import call
import pytest
import zigpy.zcl.clusters.hvac as hvac
import zigpy.zcl.foundation as zcl_f
from homeassistant.components import fan
from homeassistant.components.fan import ATTR_SPEED, DOMAIN, SERVICE_SET_SPEED
from homeassistant.const import (
ATTR_ENTITY_ID,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_OFF,
STATE_ON,
STATE_UNAVAILABLE,
)
from .common import (
async_enable_traffic,
async_test_rejoin,
find_entity_id,
make_attribute,
make_zcl_header,
)
@pytest.fixture
def zigpy_device(zigpy_device_mock):
"""Device tracker zigpy device."""
endpoints = {
1: {"in_clusters": [hvac.Fan.cluster_id], "out_clusters": [], "device_type": 0}
}
return zigpy_device_mock(endpoints)
async def test_fan(hass, zha_device_joined_restored, zigpy_device):
"""Test zha fan platform."""
zha_device = await zha_device_joined_restored(zigpy_device)
cluster = zigpy_device.endpoints.get(1).fan
entity_id = await find_entity_id(DOMAIN, zha_device, hass)
assert entity_id is not None
# test that the fan was created and that it is unavailable
assert hass.states.get(entity_id).state == STATE_UNAVAILABLE
# allow traffic to flow through the gateway and device
await async_enable_traffic(hass, [zha_device])
# test that the state has changed from unavailable to off
assert hass.states.get(entity_id).state == STATE_OFF
# turn on at fan
attr = make_attribute(0, 1)
hdr = make_zcl_header(zcl_f.Command.Report_Attributes)
cluster.handle_message(hdr, [[attr]])
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == STATE_ON
# turn off at fan
attr.value.value = 0
cluster.handle_message(hdr, [[attr]])
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == STATE_OFF
# turn on from HA
cluster.write_attributes.reset_mock()
await async_turn_on(hass, entity_id)
assert len(cluster.write_attributes.mock_calls) == 1
assert cluster.write_attributes.call_args == call({"fan_mode": 2})
# turn off from HA
cluster.write_attributes.reset_mock()
await async_turn_off(hass, entity_id)
assert len(cluster.write_attributes.mock_calls) == 1
assert cluster.write_attributes.call_args == call({"fan_mode": 0})
# change speed from HA
cluster.write_attributes.reset_mock()
await async_set_speed(hass, entity_id, speed=fan.SPEED_HIGH)
assert len(cluster.write_attributes.mock_calls) == 1
assert cluster.write_attributes.call_args == call({"fan_mode": 3})
# test adding new fan to the network and HA
await async_test_rejoin(hass, zigpy_device, [cluster], (1,))
async def async_turn_on(hass, entity_id, speed=None):
"""Turn fan on."""
data = {
key: value
for key, value in [(ATTR_ENTITY_ID, entity_id), (ATTR_SPEED, speed)]
if value is not None
}
await hass.services.async_call(DOMAIN, SERVICE_TURN_ON, data, blocking=True)
async def async_turn_off(hass, entity_id):
"""Turn fan off."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
await hass.services.async_call(DOMAIN, SERVICE_TURN_OFF, data, blocking=True)
async def async_set_speed(hass, entity_id, speed=None):
"""Set speed for specified fan."""
data = {
key: value
for key, value in [(ATTR_ENTITY_ID, entity_id), (ATTR_SPEED, speed)]
if value is not None
}
await hass.services.async_call(DOMAIN, SERVICE_SET_SPEED, data, blocking=True)
| 30.965517 | 87 | 0.714087 |
231c2c22193f731f1d6524f942d2be3a6ab52e63 | 7,870 | py | Python | pokemonlib/GameClasses/PokemonClass.py | Lapinux/pokemonlib | b7e2e056fdd9988ba8d38a159b8e6a0ecf17e090 | [
"MIT"
] | null | null | null | pokemonlib/GameClasses/PokemonClass.py | Lapinux/pokemonlib | b7e2e056fdd9988ba8d38a159b8e6a0ecf17e090 | [
"MIT"
] | null | null | null | pokemonlib/GameClasses/PokemonClass.py | Lapinux/pokemonlib | b7e2e056fdd9988ba8d38a159b8e6a0ecf17e090 | [
"MIT"
] | null | null | null | import os # for finding the relative path to pokemon_data
import json # for parsing pokemon_data json files
import random # for generating IV or other random values
import logging # for printing data
from pokemonlib.misc import Exeptions # for initialisation exceptions
logging.basicConfig()
logging.root.setLevel(logging.NOTSET)
logging.basicConfig(level=logging.NOTSET)
logPokemon = logging.getLogger("Main.PokemonClass")
logPokemon.setLevel(logging.DEBUG)
class Pokemon(object):
# noinspection PyTypeChecker
def __init__(self, pokemon_id=1):
"""
Pokemon is the base class of all pokemon objects. It stores the data and stats of the pokemons.
It also generates the base stats of a pokemon basing on it's id.
Usage: p = Pokemon(pokemon_id)
Result: 'Pokemon detected as ´" + self.name + "´ has finished initializing.'
"""
# Pokemon ID to recognize what pokemon it is even if self.name is changed (For Example Zoroarks
# illusions or Dittos transformation)
self.__id = pokemon_id
pokemon = []
path = os.getcwd() # Gets the relative path from main.py
# Decides if using \ for windows or / for mac and linux in the path
if os.name == "nt":
path += "\\pokemonlib\\pokemon_data\\pokemon_properties\\"
else:
path += "/pokemonlib/pokemon_data/pokemon_properties/"
# Search in pokemon_data for the data of the pokemon depending on the ID
for filename in os.listdir(path):
if filename != "pokemon_" + str(self.__id) + ".json":
continue
else:
f = open(path + filename, "r")
pokemon = json.load(f)
f.close()
break
# When There are no properties in the list raise an error
if pokemon == []:
raise Exeptions.IdNotReferenced("The pokemon_data with id " + str(self.__id) +
" was not found in the directory ´" + path +
"´. Make sure it is in a file named pokemon_(id of the pokemon_data).json")
# Sets all the base pokemon data to object variables. If any needed information is missing
# raise an error
try:
stats = pokemon["stats"]
self.name = pokemon["name"]
self._hp = stats["hp"]
self._defph = stats["defense"]
self._defsp = stats["special-defense"]
self._attkph = stats["attack"]
self._attksp = stats["special-defense"]
self._speed = stats["speed"]
self._lv = 1
self._types = pokemon["types"]
self._attacks = {1: ["Thunderbolt", 20, True], 2: ["Poop", 40, True], 3: ["Earthquake", 40, True],
4: ["Scratch", 40, True]}
self._iv = random.randint(0, 31)
except KeyError:
raise Exeptions.InvalidPokemonData("Some needed data is missing in ´" + path + filename + "´. Please "
+ "ensure that the name, base stats, attacks and the type are included."
+ "")
logPokemon.info("Pokemon detected as ´" + pokemon["name"] + "´ has finished initializing.")
def getstats(self):
"""
Gets all the stats of the pokemon and return them in form of a Dictionary.
Usage: getstats()
Returns: {'hp', 'def', 'defsp', 'attk', 'attksp', 'attacks'=[]}
"""
return {"hp": self._hp, "def": self._defph, "defsp": self._defsp, "attk": self._attkph,
"attksp": self._attksp, "speed": self._speed, "attacks": self._attacks}
class Team:
def __init__(self, pokemon1, pokemon2=None, pokemon3=None, pokemon4=None, pokemon5=None, pokemon6=None):
templist = [pokemon1, pokemon2, pokemon3, pokemon4, pokemon5, pokemon6]
self.__pokemonlist = []
for pokemon in templist:
if pokemon is not None: # Check if this argument was passed
if isinstance(pokemon, Pokemon): # Basic check
self.__pokemonlist.append(pokemon)
else:
raise Exeptions.NotAPokemon("Argument was passed that wasn't an instance of pokemon class")
def pop(self):
pass
def add(self):
pass
def transfer(self, BoxInstance):
pass
class Box(Team):
def __init__(self, *args, **kwargs):
templist = []
self.__pokemonlist = []
if "MaxPokemon" in kwargs:
self.MaxPokemon = kwargs["MaxPokemon"]
else:
self.MaxPokemon = 50
for _ in range(self.MaxPokemon):
templist.append(None)
for pkmn in args:
length = 0
for p in templist:
if p is not None:
length += 1
logPokemon.debug(str(length))
logPokemon.debug(templist)
logPokemon.debug(len(templist))
templist[length] = pkmn
for pos, pkmn in kwargs.items():
if pos == "MaxPokemon":
continue
try:
pos = [int(s) for s in pos.split() if s.isdigit()][0]
templist[pos] = pkmn
except IndexError:
logPokemon.info("An Unvalid keyword arg was given. Handling it as a normal arg.")
length = 0
for p in templist:
if p is not None:
length += 1
logPokemon.debug(str(length))
logPokemon.debug(templist)
logPokemon.debug(len(templist))
templist[length] = pkmn
if len(templist)-1 > self.MaxPokemon:
raise ValueError("You entered contradictory values: More pokemons than the max capacity of the box")
for pokemon in templist:
if pokemon is not None: # Check if this argument was passed
if isinstance(pokemon, Pokemon): # Basic check
self.__pokemonlist.append(pokemon)
else:
raise Exeptions.NotAPokemon("Argument was passed that wasn't an instance of pokemon class")
else:
self.__pokemonlist.append(pokemon)
def __len__(self):
length = 0
for p in self.__pokemonlist:
if p is not None:
length +=1
return length
def __str__(self):
string = ""
for pokemon in self.__pokemonlist:
if pokemon is not None:
string += pokemon.name + " "
return string
def transfer(self, TeamInstance):
pass
def testPokemonClass(loggerInstance=logPokemon):
"""
A function that inits an object of the Pokemon Class to see if it works.
:param loggerInstance:
:return:
"""
loggerInstance.info("Testing Pokemon Class:")
num = int(input("Give the id of a Pokemon to pass to the pokemon class")) # Get id to pass to the __init__
# function from Pokemon Class
if type(num) is int:
num_poke = 0
path = os.getcwd() # Gets the relative path from main.py
# Decides if using \ for windows or / for mac and linux in the path
if os.name == "nt":
path += "pokemonlib\\pokemon_data\\pokemon_properties\\"
else:
path += "pokemonlib/pokemon_data/pokemon_properties/"
# Count the pokemon IDs
for _ in os.listdir(path):
num_poke += 1
if num not in range(0, num_poke):
raise Exeptions.IdNotReferenced("The id you entered is incorrect.")
else:
raise TypeError("Please enter an Integer.")
pkmn = Pokemon(num)
loggerInstance.info(pkmn.getstats())
return True
| 37.47619 | 119 | 0.569759 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.