text
stringlengths 4
1.02M
| meta
dict |
|---|---|
import pickle
import gzip
import re
import random
import string
import fractions
import os
import itertools
import bisect
import numpy as np
import pint
import simpleeval
class UnsupportedDimensionsError(Exception):
'''
Raised when comparator doesn't support the entered dimensions.
'''
def __init__(self, value):
self.value = value
def __str__(self):
return self.value
class NoDimensionsError(Exception):
'''
Raised when the query is dimensionless.
'''
def __init__(self):
pass
class match(object):
'''
A quantity match returned by the comparator.
'''
def __init__(self, label, wiki, ratio, mag, unit, dimension, category):
'''
Sets label, Wikipedia URL, ratio, magnitude, unit, dimension,
and category.
'''
self.label = label
self.wiki = wiki
self.ratio = ratio
self.magnitude = mag
self.unit = unit
self.dimension = dimension
self.category = category
def __str__(self):
'''
Returns comma seperated string of match's values
'''
return '%s, %s, %f, %s, %s' % (self.label, self.wiki, self.ratio,
self.magnitude, self.unit, self.dimension, self.category)
def natural_language(self):
'''
Returns natural language string describing match.
'''
properties = {
'area.area': 'the area of %s',
'area.floorArea': 'the floor area of %s',
'area.surfaceArea': 'the surface area of %s',
'area.areaLand': 'the land area of %s',
'area.watershed': 'the watershed area of %s',
'area.areaMetro': 'the metro area of %s',
'area.areaOfCatchment': 'the area of catchment of %s',
'area.areaTotal': 'the total area of %s',
'area.areaWater': 'the water area of %s',
'area.areaUrban': 'the urban area of %s',
'area.campusSize': 'the campus size of %s',
'area.areaRural': 'the rural area of %s',
'density.density': 'the density of %s',
'frequency.frequency': 'the frequency of %s',
'length.maximumBoatLength': 'the maximum boat length of the %s',
'length.waistSize': 'the waist size of %s',
'length.wheelbase': 'the wheelbase of the %s',
'length.course': 'the course length of the %s',
'length.mouthElevation': 'the mouth elevation of the %s',
'length.hipSize': 'the hip size of %s',
'length.meanRadius': 'the mean radius of %s',
'length.originalMaximumBoatBeam':
'the original maximum boat beam of the %s',
'length.height': 'the height of %s',
'length.originalMaximumBoatLength':
'the original maximum boat length of the %s',
'length.periapsis': 'the periapsis of %s',
'length.distanceTraveled': 'the distance traveled by the %s',
'length.bustSize': 'the bust size of %s',
'length.shipDraft': 'the ship draft of the %s',
'length.pistonStroke': 'the area of the %s',
'length.trackLength': 'the area of the %s',
'length.capitalElevation': 'the capital elevation of %s',
'length.prominence': 'the topographic prominence of %s',
'length.minimumElevation': 'the minimum elevation of %s',
'length.shoreLength': 'the shore length of %s',
'length.elevation': 'the elevation of %s',
'length.runwayLength': 'the length of a runway at %s',
'length.sourceConfluenceElevation':
'the source confluence elevation of the %s',
'length.maximumElevation': 'the maximum elevation of %s',
'length.cylinderBore': 'the cylinder bore of %s',
'length.railGauge': 'the rail gauge of the %s',
'length.diameter': 'the diameter of %s',
'length.maximumBoatBeam': 'the maximum boat beam of the %s',
'length.depth': 'the depth of %s',
'length.length': 'the length of %s',
'length.shipBeam': 'the ship beam of the %s',
'length.wavelength': 'the wavelength of %s',
'length.sourceElevation': 'the source elevation of the %s',
'length.lineLength': 'the length of the %s',
'length.apoapsis': 'the apoapsis of %s',
'length.width': 'the width of %s',
'length.distance': 'the distance of the %s',
'length.heightAboveAverageTerrain':
'the height above average terrain of %s\'s transmitter',
'length.mainspan': 'the mainspan of the %s',
'length.originalMaximumBoatLength':
'the original maximum boat length of the %s',
'length.maximumBoatLength': 'the maximum boat length of the %s',
'mass.mass': 'the mass of %s',
'mass.loadLimit': 'the load limit of the %s',
'mass.weight': 'the weight of %s',
'mass.shipDisplacement': 'the displacement of the %s',
'mass.lowerEarthOrbitPayload':
'the low earth orbit payload capacity of the %s rocket',
'power.effectiveRadiatedPower':
'the effective radiated power of %s\'s transmitter',
'power.powerOutput': 'the power output of the %s',
'power.installedCapacity': 'the installed capacity of the %s',
'voltage.voltageOfElectrification':
'the voltage of electrification of the %s',
'speed.topSpeed': 'the top speed of the %s',
'speed.averageSpeed': 'the average speed of %s',
'speed.escapeVelocity': 'the escape velocity of %s',
'temperature.minimumTemperature': 'the minimum temperature of %s',
'temperature.maximumTemperature': 'the maximum temperature of %s',
'temperature.temperature': 'the temperature of %s',
'temperature.meanTemperature': 'the mean temperature of %s',
'time.missionDuration': 'the mission duration of the %s',
'time.orbitalPeriod': 'the orbital period of %s',
'time.rotationPeriod': 'the rotation period of %s',
'time.timeInSpace': 'the time spent in space by %s',
'time.runtime': 'the runtime of %s',
'torque.torqueOutput': 'the torque output of the %s',
'volume.volume': 'the volume of %s',
'volume.fuelCapacity': 'the fuel capacity of the %s',
'volume.displacement': 'the displacement of the %s'
}
ratio = '{:.3g}'.format(self.ratio)
if re.search('[e]', ratio):
a = ratio[:ratio.index('e') + 2] # Strip leading zeros on exponent
b = ratio[ratio.index('e') + 2:]
b = re.sub('^(0*)', '', b)
ratio = '<span>' + a + b + '</sup></span>'
if self.ratio < 1:
frac = str(fractions.Fraction(self.ratio).limit_denominator(100))
if frac != '0':
f = frac.split('/')
if len(f) == 2:
ratio += ' (<sup>' + f[0] + '</sup>⁄<sub>' \
+ f[1] + '</sub>)'
label = '<a href="' + '//en.wikipedia.org/wiki/' + self.wiki + \
'">' + self.label + '</a>'
properties[self.dimension + '.' + self.category] = \
properties[self.dimension + '.' \
+ self.category].replace("'", '’')
if re.search('[e]', ratio):
ratio = '(' + ratio + ')'
ratio = re.sub('(e\+)', ' × 10<sup>', ratio)
ratio = re.sub('(e\-)', ' × 10<sup>-', ratio)
prefix = '≈ %s × '
mag = '{:.3g}'.format(self.magnitude)
if re.search('[e]', mag):
a = mag[:mag.index('e') + 2]
b = mag[mag.index('e') + 2:]
b = re.sub('^(0*)', '', b)
mag = '<span>' + a + b + '</sup></span>'
mag = re.sub('(e\+)', ' × 10<sup>', mag)
mag = re.sub('(e\-)', ' × 10<sup>-', mag)
suffix = ' <small><span class="text-muted">(%s %s)</span></small>'
units = ''
for k in self.unit.keys():
units += '<span>' + k
if self.unit[k] != 1:
units += '<sup>' + str(int(self.unit[k])) + '</sup>'
units += '</span> '
units = units.strip().replace('_', ' ')
return (prefix + properties[self.dimension + '.' + self.category]
+ suffix) % (ratio, label, mag, units)
class comparator(object):
'''
Compares quantities.
'''
def __init__(self):
'''
Initalizes comparator with pickled data and initalizes unit registry.
'''
pkl_file = gzip.open(os.path.join(os.path.dirname(
os.path.abspath(__file__)), 'data.pkl.gz'), 'rb')
self.data = pickle.load(pkl_file)
pkl_file.close()
self.ureg = pint.UnitRegistry()
def compare(self, input_string, close_count=5, random_count=5):
'''
Parses quantity described by input string and compares it to other
quantities. The number of close matches (closest 10% of database) and
the number of random matches can be specified.
'''
# Since Pint uses eval, whitelist certain characters for security
alphabet = string.ascii_letters + string.digits + re.escape(' */^+-.')
escaped_string = re.sub('[^' + alphabet + ']', '', input_string)
# Parse number words
nums = {
'zero': 0,
'one': 1,
'two': 2,
'three': 3,
'four': 4,
'five': 5,
'six': 6,
'seven': 7,
'eight': 8,
'nine': 9,
'ten': 10,
'eleven': 11,
'twelve': 12,
'thirteen': 13,
'fourteen': 14,
'fifteen': 15,
'sixteen': 16,
'seventeen': 17,
'eighteen': 18,
'nineteen': 19,
'twenty': 20,
'thirty': 30,
'forty': 40,
'fifty': 50,
'sixty': 60,
'seventy': 70,
'eighty': 80,
'ninety': 90,
'hundred': 1e2,
'thousand': 1e3,
'million': 1e6,
'billion': 1e9,
'trillion': 1e12,
'quadrillion': 1e15,
'quintillion': 1e18,
'sextillion': 1e21,
'septillion': 1e24,
'octillion': 1e27,
'nonillion': 1e30,
'decillion': 1e33,
'dozen': 12,
'gross': 144
}
# Replace number words with numerals
s = escaped_string.replace('-', ' ')
s = s.replace('stone', 'rock') # Escape stone unit
for n in nums:
r = re.compile(re.escape(n), re.IGNORECASE)
s = r.sub('*' + '{:f}'.format(nums[n]), s)
s = s.replace('rock', 'stone') # Unescape stone unit
if (s[0] == '*'):
s = '1' + s
# Allow caret power notation to work
s = s.replace('^', '**')
# Split at first remaining letter
i = re.search('[^a-zA-Z]*', s).end()
# Parse numerical expression
if i != 0:
s = s.replace(s[:i], str(simpleeval.simple_eval(s[:i])) + ' ')
# Parse escaped input expression
try:
q = self.ureg.parse_expression(s)
except pint.UndefinedUnitError:
split = s.split(' ')
length = len(split) - 1
for i in itertools.product("_ ", repeat=length):
joined = split[0]
for j in range(length):
joined += i[j] + split[j+1]
try:
q = self.ureg.parse_expression(joined)
break
except pint.UndefinedUnitError:
pass
if 'q' not in vars():
self.ureg.parse_expression(s)
if not hasattr(q, 'magnitude'):
raise NoDimensionsError()
# Input interpretation
mag = '{:.3g}'.format(q.magnitude)
if re.search('[e]', mag):
a = mag[:mag.index('e') + 2]
b = mag[mag.index('e') + 2:]
b = re.sub('^(0*)', '', b)
mag = '<span>' + a + b + '</sup></span>'
mag = re.sub('(e\+)', ' × 10<sup>', mag)
mag = re.sub('(e\-)', ' × 10<sup>-', mag)
units = ''
for k in q.units.keys():
units += '<span>' + k
if q.units[k] != 1:
units += '<sup>' + str(int(q.units[k])) + '</sup>'
units += '</span> '
units = units.strip().replace('_', ' ')
input_interp = '%s %s' % (mag, units)
base_units = {
'area': (self.ureg.meter ** 2),
'density': (self.ureg.kilogram / self.ureg.meter ** 3),
'frequency': (self.ureg.hertz),
'length': (self.ureg.meter),
'mass': (self.ureg.gram),
'power': (self.ureg.watt),
'voltage': (self.ureg.volt),
'speed': (self.ureg.kph),
'temperature': (self.ureg.kelvin),
'time': (self.ureg.second),
'torque': (self.ureg.newton * self.ureg.meter),
'volume': (self.ureg.meter ** 3)
}
dimension = None
for u in base_units:
if hasattr(q, 'dimensionality') \
and base_units[u].dimensionality == q.dimensionality:
qb = q.to(base_units[u].units)
dimension = u
break
if not dimension:
raise UnsupportedDimensionsError(input_interp)
# Either closest or 2nd closest value (good enough and very fast)
index = np.searchsorted(self.data[dimension][2], qb.magnitude)
index = min(index, len(self.data[dimension][2]) - 1)
index = max(index, 0)
closest_match = match(self.data[dimension][0][index],
self.data[dimension][1][index],
qb.magnitude / self.data[dimension][2][index],
(self.data[dimension][2][index]
* base_units[u]).to(q.units).magnitude,
q.units, dimension, self.data[dimension][3][index])
# Close comparisons
close_matches = []
prev_close_matches = []
length = len(self.data[dimension][2])
dist = min(250, int(0.05 * length))
indicies = (index + max(-dist, 0), index + min(dist, length))
popularity = self.data[dimension][4][indicies[0]:indicies[1]]
pop_dist = list(itertools.accumulate(popularity))
for i in range(close_count):
while True:
if i < close_count - 1:
ri = random.random() * pop_dist[-1]
ri = indicies[0] + bisect.bisect(pop_dist, ri)
else:
ri = random.randint(*indicies)
if ri not in prev_close_matches:
break
prev_close_matches.append(ri)
close_matches.append(match(self.data[dimension][0][ri],
self.data[dimension][1][ri],
qb.magnitude / self.data[dimension][2][ri],
(self.data[dimension][2][ri]
* base_units[u]).to(q.units).magnitude,
q.units, dimension, self.data[dimension][3][ri]))
# Random comparisons
random_matches = []
prev_random_matches = []
indicies = (0, len(self.data[dimension][2]) - 1)
popularity = self.data[dimension][4]
pop_dist = list(itertools.accumulate(popularity))
for i in range(random_count):
while True:
if i < close_count - 1:
ri = random.random() * pop_dist[-1]
ri = indicies[0] + bisect.bisect(pop_dist, ri)
else:
ri = random.randint(*indicies)
if ri not in prev_random_matches:
break
random_matches.append(match(self.data[dimension][0][ri],
self.data[dimension][1][ri],
qb.magnitude / self.data[dimension][2][ri],
(self.data[dimension][2][ri]
* base_units[u]).to(q.units).magnitude,
q.units, dimension, self.data[dimension][3][ri]))
return input_interp, closest_match, close_matches, random_matches
def statistics(self):
'''
Returns size of quantity database.
'''
quantity_count = 0
for t in self.data:
quantity_count += len(self.data[t][2])
return quantity_count
|
{
"content_hash": "67077c5127dfffbc98cd24976a0e8431",
"timestamp": "",
"source": "github",
"line_count": 420,
"max_line_length": 79,
"avg_line_length": 40.35,
"alnum_prop": 0.5055762081784386,
"repo_name": "mpetroff/nugacious",
"id": "d5497c4fb296fe505b0a01fa3e576a477b83bfa2",
"size": "17076",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django/nugacious/scripts/quantity.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5333"
},
{
"name": "HTML",
"bytes": "7987"
},
{
"name": "Python",
"bytes": "29761"
}
],
"symlink_target": ""
}
|
"""Handler for cron update requests made from persistent-cal."""
__author__ = 'daniel.j.hermes@gmail.com (Daniel Hermes)'
# General libraries
import datetime
# App engine specific libraries
import webapp2
# App specific libraries
from google_api_utils import InitCredentials
from handler_utils import ExtendedHandler
from library import MonthlyCleanup
from library import UpdateUserSubscriptions
from models import UserCal
from time_utils import ConvertToInterval
class MainHandler(ExtendedHandler):
"""Handles cron requests to /cron.
This handler carries out updates for any user scheduled to get an update
during that update interval.
"""
def get(self): # pylint:disable-msg=C0103
"""Updates every three hours."""
# ('http://code.google.com/appengine/docs/python/tools/webapp/'
# 'requestclass.html#Request_headers')
# http://docs.webob.org/en/latest/reference.html#headers
# "Keys are case-insensitive."
if self.request.headers.get('X-AppEngine-Cron', '') != 'true':
# Check header for X-AppEngine-Cron: true
# Don't run if not
return
now = datetime.datetime.utcnow()
now_interval = ConvertToInterval(now)
credentials = None
current_users = UserCal.query(UserCal.update_intervals == now_interval)
for user_cal in current_users:
if user_cal.calendars:
if credentials is None:
credentials = InitCredentials()
# pylint:disable-msg=E1123
UpdateUserSubscriptions(user_cal, credentials=credentials,
defer_now=True)
class CleanupHandler(ExtendedHandler):
"""Handles cron requests to /cron-monthly.
Cleans up any events older than three months by using MonthlyCleanup.
"""
def get(self): # pylint:disable-msg=C0103
"""Updates once a month."""
if self.request.headers.get('X-AppEngine-Cron', '') != 'true':
return
now = datetime.datetime.utcnow()
MonthlyCleanup(now.date(), defer_now=True) # pylint:disable-msg=E1123
APPLICATION = webapp2.WSGIApplication([
('/cron', MainHandler),
('/cron-monthly', CleanupHandler),
], debug=True)
|
{
"content_hash": "08043267a9708b75cd860faea40db1b8",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 75,
"avg_line_length": 29.63888888888889,
"alnum_prop": 0.6982193064667291,
"repo_name": "dhermes/persistent-cal",
"id": "ab084359a8b8f08a7f756868bc553814cd85a6e2",
"size": "2740",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cron.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1321"
},
{
"name": "HTML",
"bytes": "14513"
},
{
"name": "Makefile",
"bytes": "346"
},
{
"name": "Python",
"bytes": "162807"
}
],
"symlink_target": ""
}
|
from fabric.api import *
import os, os.path
import fabric.contrib.project as project
PROD = 'eugen@eugenkiss.com:223'
DEST_PATH = '/var/www/loopgotowhile/'
ROOT_PATH = os.path.abspath(os.path.dirname(__file__))
DEPLOY_PATH = os.path.join(ROOT_PATH, 'deploy')
TEST_PATH = os.path.join(ROOT_PATH, 'test')
NGINX_CONF_PATH = '/etc/nginx/sites-available/loopgotowhile'
@hosts(PROD)
def update_nginx_conf():
put('nginx.conf', NGINX_CONF_PATH, use_sudo=True)
sudo('/etc/init.d/nginx reload')
def backup():
local('git push -f ~/Dropbox/backup/loopgotowhile-site.git')
def compile(profiling):
local('rm -rf ' + os.path.join(ROOT_PATH, 'dist/'))
if profiling:
local('cabal configure --enable-executable-profiling --enable-library-profiling --ghc-option=-auto-all')
else:
local('cabal configure --disable-executable-profiling --disable-library-profiling')
local('cabal build')
def copy(path):
# make sure the directory is there!
local('mkdir -p ' + path)
# remove old contents
local('rm -r ' + path + '/*')
local('cp ' + os.path.join(ROOT_PATH, 'index.html') + ' ' +
os.path.join(path, 'index.html'))
local('cp ' + os.path.join(ROOT_PATH, 'style.css') + ' ' +
os.path.join(path, 'style.css'))
local('cp ' + os.path.join(ROOT_PATH, 'bg.jpg') + ' ' +
os.path.join(path, 'bg.jpg'))
local('cp ' + os.path.join(ROOT_PATH, 'dist/build/LGWServer/LGWServer') + ' ' +
os.path.join(path, 'LGWServer'))
local('cp -r ' + os.path.join(ROOT_PATH, 'codemirror') + ' ' +
os.path.join(path, 'codemirror'))
def test():
"""Test the server locally"""
compile(False) # Profiling and using more than one thread is not possible
# make sure the directory is there!
local('mkdir -p ' + TEST_PATH)
copy(TEST_PATH)
with lcd(TEST_PATH):
local('LGWServer test +RTS -hd -p')
#local('LGWServer test +RTS -N')
@hosts(PROD)
def update_static_files():
copy(DEPLOY_PATH)
project.rsync_project(
remote_dir=DEST_PATH,
local_dir=DEPLOY_PATH.rstrip('/') + '/',
delete=True
)
update_nginx_conf()
@hosts(PROD)
def publish():
# Kill old LGWServer instances
# Note: For some magical reason I have to kill the processes here instead
# of directly before restarting them at the bottom of this function.
# Otherwise LGWServer is hardly ever started on the remote. I guess the
# killall has some delay/aftermath... but I also tried to put a `sleep 5s`
# between the kills and the restarts without any luck...
with settings(warn_only=True):
run('killall -9 -v LGWServer ')
run('killall -9 -v cpulimit ')
#backup()
#compile(False)
copy(DEPLOY_PATH)
project.rsync_project(
remote_dir=DEST_PATH,
local_dir=DEPLOY_PATH.rstrip('/') + '/',
delete=True
)
update_nginx_conf()
# Limit cpu usage (cpulimit must be installed on remote)
run('nohup cpulimit -e LGWServer -l 80 >& /dev/null < /dev/null &')
run('sleep 1s')
# Run LGWServer in background and limit heap size
#run('nohup ' + os.path.join(DEST_PATH, 'LGWServer') + ' +RTS -N -M40m -RTS >& /dev/null < /dev/null &')
run('nohup ' + os.path.join(DEST_PATH, 'LGWServer') + ' +RTS -M25m -RTS >& /dev/null < /dev/null &')
run('sleep 1s')
|
{
"content_hash": "552f42b186317b82061a7cf6174f32d4",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 112,
"avg_line_length": 37.8,
"alnum_prop": 0.6202233980011758,
"repo_name": "eugenkiss/loopgotowhile-site",
"id": "c40b5fe07355a406d088560db067b2188cac3c01",
"size": "3402",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fabfile.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Haskell",
"bytes": "8238"
},
{
"name": "JavaScript",
"bytes": "1395"
}
],
"symlink_target": ""
}
|
"""Support for Orvibo S20 Wifi Smart Switches."""
import logging
from orvibo.s20 import S20, S20Exception, discover
import voluptuous as vol
from homeassistant.components.switch import PLATFORM_SCHEMA, SwitchDevice
from homeassistant.const import (
CONF_DISCOVERY,
CONF_HOST,
CONF_MAC,
CONF_NAME,
CONF_SWITCHES,
)
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "Orvibo S20 Switch"
DEFAULT_DISCOVERY = True
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_SWITCHES, default=[]): vol.All(
cv.ensure_list,
[
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_MAC): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
],
),
vol.Optional(CONF_DISCOVERY, default=DEFAULT_DISCOVERY): cv.boolean,
}
)
def setup_platform(hass, config, add_entities_callback, discovery_info=None):
"""Set up S20 switches."""
switch_data = {}
switches = []
switch_conf = config.get(CONF_SWITCHES, [config])
if config.get(CONF_DISCOVERY):
_LOGGER.info("Discovering S20 switches ...")
switch_data.update(discover())
for switch in switch_conf:
switch_data[switch.get(CONF_HOST)] = switch
for host, data in switch_data.items():
try:
switches.append(
S20Switch(data.get(CONF_NAME), S20(host, mac=data.get(CONF_MAC)))
)
_LOGGER.info("Initialized S20 at %s", host)
except S20Exception:
_LOGGER.error("S20 at %s couldn't be initialized", host)
add_entities_callback(switches)
class S20Switch(SwitchDevice):
"""Representation of an S20 switch."""
def __init__(self, name, s20):
"""Initialize the S20 device."""
self._name = name
self._s20 = s20
self._state = False
self._exc = S20Exception
@property
def should_poll(self):
"""Return the polling state."""
return True
@property
def name(self):
"""Return the name of the switch."""
return self._name
@property
def is_on(self):
"""Return true if device is on."""
return self._state
def update(self):
"""Update device state."""
try:
self._state = self._s20.on
except self._exc:
_LOGGER.exception("Error while fetching S20 state")
def turn_on(self, **kwargs):
"""Turn the device on."""
try:
self._s20.on = True
except self._exc:
_LOGGER.exception("Error while turning on S20")
def turn_off(self, **kwargs):
"""Turn the device off."""
try:
self._s20.on = False
except self._exc:
_LOGGER.exception("Error while turning off S20")
|
{
"content_hash": "0b928b134ce5f64c66e3626cba462d82",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 81,
"avg_line_length": 26.772727272727273,
"alnum_prop": 0.5806451612903226,
"repo_name": "Teagan42/home-assistant",
"id": "75a95e053ae166c9087b4d92a588e3333b031feb",
"size": "2945",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/orvibo/switch.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "19774313"
},
{
"name": "Shell",
"bytes": "6846"
}
],
"symlink_target": ""
}
|
"""
Maximum likelihood covariance estimator.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Virgile Fritsch <virgile.fritsch@inria.fr>
#
# License: BSD 3 clause
# avoid division truncation
from __future__ import division
import warnings
import numpy as np
from scipy import linalg
from ..base import BaseEstimator
from ..utils import check_array
from ..utils.extmath import fast_logdet, pinvh
def log_likelihood(emp_cov, precision):
"""Computes the sample mean of the log_likelihood under a covariance model
computes the empirical expected log-likelihood (accounting for the
normalization terms and scaling), allowing for universal comparison (beyond
this software package)
Parameters
----------
emp_cov : 2D ndarray (n_features, n_features)
Maximum Likelihood Estimator of covariance
precision : 2D ndarray (n_features, n_features)
The precision matrix of the covariance model to be tested
Returns
-------
sample mean of the log-likelihood
"""
p = precision.shape[0]
log_likelihood_ = - np.sum(emp_cov * precision) + fast_logdet(precision)
log_likelihood_ -= p * np.log(2 * np.pi)
log_likelihood_ /= 2.
return log_likelihood_
def empirical_covariance(X, assume_centered=False):
"""Computes the Maximum likelihood covariance estimator
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Data from which to compute the covariance estimate
assume_centered : Boolean
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False, data are centered before computation.
Returns
-------
covariance : 2D ndarray, shape (n_features, n_features)
Empirical covariance (Maximum Likelihood Estimator).
"""
X = np.asarray(X)
if X.ndim == 1:
X = np.reshape(X, (1, -1))
if X.shape[0] == 1:
warnings.warn("Only one sample available. "
"You may want to reshape your data array")
if assume_centered:
covariance = np.dot(X.T, X) / X.shape[0]
else:
covariance = np.cov(X.T, bias=1)
return covariance
class EmpiricalCovariance(BaseEstimator):
"""Maximum likelihood covariance estimator
Parameters
----------
store_precision : bool
Specifies if the estimated precision is stored.
assume_centered : bool
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False (default), data are centered before computation.
Attributes
----------
covariance_ : 2D ndarray, shape (n_features, n_features)
Estimated covariance matrix
precision_ : 2D ndarray, shape (n_features, n_features)
Estimated pseudo-inverse matrix.
(stored only if store_precision is True)
"""
def __init__(self, store_precision=True, assume_centered=False):
self.store_precision = store_precision
self.assume_centered = assume_centered
def _set_covariance(self, covariance):
"""Saves the covariance and precision estimates
Storage is done accordingly to `self.store_precision`.
Precision stored only if invertible.
Parameters
----------
covariance : 2D ndarray, shape (n_features, n_features)
Estimated covariance matrix to be stored, and from which precision
is computed.
"""
covariance = check_array(covariance)
# set covariance
self.covariance_ = covariance
# set precision
if self.store_precision:
self.precision_ = pinvh(covariance)
else:
self.precision_ = None
def get_precision(self):
"""Getter for the precision matrix.
Returns
-------
precision_ : array-like,
The precision matrix associated to the current covariance object.
"""
if self.store_precision:
precision = self.precision_
else:
precision = pinvh(self.covariance_)
return precision
def fit(self, X, y=None):
"""Fits the Maximum Likelihood Estimator covariance model
according to the given training data and parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples and
n_features is the number of features.
y : not used, present for API consistence purpose.
Returns
-------
self : object
Returns self.
"""
X = check_array(X)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
covariance = empirical_covariance(
X, assume_centered=self.assume_centered)
self._set_covariance(covariance)
return self
def score(self, X_test, y=None):
"""Computes the log-likelihood of a Gaussian data set with
`self.covariance_` as an estimator of its covariance matrix.
Parameters
----------
X_test : array-like, shape = [n_samples, n_features]
Test data of which we compute the likelihood, where n_samples is
the number of samples and n_features is the number of features.
X_test is assumed to be drawn from the same distribution than
the data used in fit (including centering).
y : not used, present for API consistence purpose.
Returns
-------
res : float
The likelihood of the data set with `self.covariance_` as an
estimator of its covariance matrix.
"""
# compute empirical covariance of the test set
test_cov = empirical_covariance(
X_test - self.location_, assume_centered=True)
# compute log likelihood
res = log_likelihood(test_cov, self.get_precision())
return res
def error_norm(self, comp_cov, norm='frobenius', scaling=True,
squared=True):
"""Computes the Mean Squared Error between two covariance estimators.
(In the sense of the Frobenius norm).
Parameters
----------
comp_cov : array-like, shape = [n_features, n_features]
The covariance to compare with.
norm : str
The type of norm used to compute the error. Available error types:
- 'frobenius' (default): sqrt(tr(A^t.A))
- 'spectral': sqrt(max(eigenvalues(A^t.A))
where A is the error ``(comp_cov - self.covariance_)``.
scaling : bool
If True (default), the squared error norm is divided by n_features.
If False, the squared error norm is not rescaled.
squared : bool
Whether to compute the squared error norm or the error norm.
If True (default), the squared error norm is returned.
If False, the error norm is returned.
Returns
-------
The Mean Squared Error (in the sense of the Frobenius norm) between
`self` and `comp_cov` covariance estimators.
"""
# compute the error
error = comp_cov - self.covariance_
# compute the error norm
if norm == "frobenius":
squared_norm = np.sum(error ** 2)
elif norm == "spectral":
squared_norm = np.amax(linalg.svdvals(np.dot(error.T, error)))
else:
raise NotImplementedError(
"Only spectral and frobenius norms are implemented")
# optionaly scale the error norm
if scaling:
squared_norm = squared_norm / error.shape[0]
# finally get either the squared norm or the norm
if squared:
result = squared_norm
else:
result = np.sqrt(squared_norm)
return result
def mahalanobis(self, observations):
"""Computes the Mahalanobis distances of given observations.
The provided observations are assumed to be centered. One may want to
center them using a location estimate first.
Parameters
----------
observations : array-like, shape = [n_observations, n_features]
The observations, the Mahalanobis distances of the which we
compute. Observations are assumed to be drawn from the same
distribution than the data used in fit (including centering).
Returns
-------
mahalanobis_distance : array, shape = [n_observations,]
Mahalanobis distances of the observations.
"""
precision = self.get_precision()
# compute mahalanobis distances
centered_obs = observations - self.location_
mahalanobis_dist = np.sum(
np.dot(centered_obs, precision) * centered_obs, 1)
return mahalanobis_dist
|
{
"content_hash": "998a789ae8d42988a8e2a39d3afb0362",
"timestamp": "",
"source": "github",
"line_count": 285,
"max_line_length": 79,
"avg_line_length": 32.07368421052632,
"alnum_prop": 0.6132808226671043,
"repo_name": "abhishekkrthakur/scikit-learn",
"id": "a594cb687b11af21a5a58deb250d2fd382974b60",
"size": "9141",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sklearn/covariance/empirical_covariance_.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1786"
},
{
"name": "C",
"bytes": "385102"
},
{
"name": "C++",
"bytes": "139482"
},
{
"name": "Makefile",
"bytes": "1364"
},
{
"name": "PowerShell",
"bytes": "13427"
},
{
"name": "Python",
"bytes": "5668741"
},
{
"name": "Shell",
"bytes": "4182"
}
],
"symlink_target": ""
}
|
"""Tests for tensorflow.python.training.saver.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import functools
import math
import os
import random
import shutil
import sys
import tempfile
import time
import traceback
import numpy as np
import six
from google.protobuf.any_pb2 import Any
from google.protobuf import text_format
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.protobuf import queue_runner_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.client import session
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import function
from tensorflow.python.framework import graph_io
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops as ops_lib
from tensorflow.python.framework import test_util
from tensorflow.python.keras.engine import training
from tensorflow.python.keras.layers import core
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.summary import summary
from tensorflow.python.training import adam
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import queue_runner_impl
from tensorflow.python.training import saver as saver_module
from tensorflow.python.training import saver_test_utils
from tensorflow.python.training import training_util
from tensorflow.python.training.checkpoint_state_pb2 import CheckpointState
from tensorflow.python.training.checkpointable import base as checkpointable
from tensorflow.python.training.checkpointable import util as checkpointable_utils
from tensorflow.python.util import compat
class SaverTest(test.TestCase):
def basicSaveRestore(self, variable_op):
save_path = os.path.join(self.get_temp_dir(), "basic_save_restore")
with self.test_session(graph=ops_lib.Graph()) as sess:
# Build a graph with 2 parameter nodes, and Save and
# Restore nodes for them.
v0 = variable_op(10.0, name="v0")
v1 = variable_op(20.0, name="v1")
v2 = saver_test_utils.CheckpointedOp(name="v2")
v2_init = v2.insert("k1", 30.0)
# Initialize all variables
if not context.executing_eagerly():
self.evaluate([variables.global_variables_initializer(), v2_init])
# Check that the parameter nodes have been initialized.
self.assertEqual(10.0, self.evaluate(v0))
self.assertEqual(20.0, self.evaluate(v1))
self.assertEqual(b"k1", self.evaluate(v2.keys()))
self.assertEqual(30.0, self.evaluate(v2.values()))
# Save the initialized values in the file at "save_path"
save = saver_module.Saver(
{
"v0": v0,
"v1": v1,
"v2": v2.saveable
}, restore_sequentially=True)
val = save.save(sess, save_path)
self.assertTrue(isinstance(val, six.string_types))
self.assertEqual(save_path, val)
# Start a second session. In that session the parameter nodes
# have not been initialized either.
with self.test_session(graph=ops_lib.Graph()) as sess:
v0 = variable_op(-1.0, name="v0")
v1 = variable_op(-1.0, name="v1")
v2 = saver_test_utils.CheckpointedOp(name="v2")
# Assert that the variables are not initialized.
if not context.executing_eagerly():
self.assertEqual(
len(variables.report_uninitialized_variables().eval()), 2)
self.assertEqual(0, len(v2.keys().eval()))
self.assertEqual(0, len(v2.values().eval()))
# Restore the saved values in the parameter nodes.
save = saver_module.Saver({"v0": v0, "v1": v1, "v2": v2.saveable})
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, self.evaluate(v0))
self.assertEqual(20.0, self.evaluate(v1))
self.assertEqual(b"k1", self.evaluate(v2.keys()))
self.assertEqual(30.0, self.evaluate(v2.values()))
# Build another graph with 2 nodes, initialized
# differently, and a Restore node for them.
with self.test_session(graph=ops_lib.Graph()) as sess:
v0_2 = variable_op(1000.0, name="v0")
v1_2 = variable_op(2000.0, name="v1")
v2_2 = saver_test_utils.CheckpointedOp(name="v2")
v2_init = v2_2.insert("k1000", 3000.0)
# Check that the parameter nodes have been initialized.
if not context.executing_eagerly():
init_all_op = [variables.global_variables_initializer(), v2_init]
self.evaluate(init_all_op)
# TODO(xpan): Why _mutable_hash_table_v2 doesn't create empty
# table as it claims in eager mode?
self.assertEqual(b"k1000", self.evaluate(v2_2.keys()))
self.assertEqual(3000.0, self.evaluate(v2_2.values()))
self.assertEqual(1000.0, self.evaluate(v0_2))
self.assertEqual(2000.0, self.evaluate(v1_2))
# Restore the values saved earlier in the parameter nodes.
save2 = saver_module.Saver({"v0": v0_2, "v1": v1_2, "v2": v2_2.saveable})
save2.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, self.evaluate(v0_2))
self.assertEqual(20.0, self.evaluate(v1_2))
self.assertEqual(b"k1", self.evaluate(v2_2.keys()))
self.assertEqual(30.0, self.evaluate(v2_2.values()))
def testBasic(self):
self.basicSaveRestore(variables.Variable)
@test_util.run_in_graph_and_eager_modes()
def testResourceBasic(self):
self.basicSaveRestore(resource_variable_ops.ResourceVariable)
def testResourceVariableReadOpsAddedDeterministically(self):
graph_defs = []
num_graphs = 10
for _ in range(num_graphs):
with ops_lib.Graph().as_default() as g:
for i in range(20):
resource_variable_ops.ResourceVariable(i, name="var%s" % i)
saver_module.Saver()
graph_defs.append(g.as_graph_def())
for i in range(num_graphs - 1):
self.assertEqual(graph_defs[i], graph_defs[i + 1])
def testEagerBasic(self):
with context.eager_mode():
ckpt_prefix = os.path.join(self.get_temp_dir(), "ckpt")
v1 = resource_variable_ops.ResourceVariable(3.14, name="v1")
v2 = resource_variable_ops.ResourceVariable([1, 2], name="v2")
save = saver_module.Saver([v1, v2])
save.save(None, ckpt_prefix)
v1.assign(0.0)
v2.assign([0, 0])
self.assertNear(0.0, self.evaluate(v1), 1e-5)
self.assertAllEqual([0, 0], self.evaluate(v2))
save.restore(None, ckpt_prefix)
self.assertNear(3.14, self.evaluate(v1), 1e-5)
self.assertAllEqual([1, 2], self.evaluate(v2))
def testEagerGraphCompatibility(self):
# Save from graph mode and restore from eager mode.
graph_ckpt_prefix = os.path.join(self.get_temp_dir(), "graph_ckpt")
with context.graph_mode():
with self.test_session(graph=ops_lib.Graph()) as sess:
# Create a graph model and save the checkpoint.
w1 = resource_variable_ops.ResourceVariable(1.0, name="w1")
w2 = resource_variable_ops.ResourceVariable(2.0, name="w2")
graph_saver = saver_module.Saver([w1, w2])
sess.run(variables.global_variables_initializer())
graph_saver.save(sess, graph_ckpt_prefix)
with context.eager_mode():
ops_lib._default_graph_stack.reset() # pylint: disable=protected-access
ops_lib.reset_default_graph()
w1 = resource_variable_ops.ResourceVariable(0.0, name="w1")
w2 = resource_variable_ops.ResourceVariable(0.0, name="w2")
graph_saver = saver_module.Saver([w1, w2])
graph_saver.restore(None, graph_ckpt_prefix)
self.assertAllEqual(self.evaluate(w1), 1.0)
self.assertAllEqual(self.evaluate(w2), 2.0)
# Save from eager mode and restore from graph mode.
eager_ckpt_prefix = os.path.join(self.get_temp_dir(), "eager_ckpt")
with context.eager_mode():
ops_lib._default_graph_stack.reset() # pylint: disable=protected-access
ops_lib.reset_default_graph()
w3 = resource_variable_ops.ResourceVariable(3.0, name="w3")
w4 = resource_variable_ops.ResourceVariable(4.0, name="w4")
graph_saver = saver_module.Saver([w3, w4])
graph_saver.save(None, eager_ckpt_prefix)
with context.graph_mode():
with self.test_session(graph=ops_lib.Graph()) as sess:
w3 = resource_variable_ops.ResourceVariable(0.0, name="w3")
w4 = resource_variable_ops.ResourceVariable(0.0, name="w4")
graph_saver = saver_module.Saver([w3, w4])
sess.run(variables.global_variables_initializer())
graph_saver.restore(sess, eager_ckpt_prefix)
self.assertAllEqual(w3.eval(), 3.0)
self.assertAllEqual(w4.eval(), 4.0)
@test_util.run_in_graph_and_eager_modes()
def testResourceSaveRestoreCachingDevice(self):
save_path = os.path.join(self.get_temp_dir(), "resource_cache")
with self.test_session(graph=ops_lib.Graph()) as sess:
v = resource_variable_ops.ResourceVariable([1], caching_device="/cpu:0",
name="v")
if context.executing_eagerly():
sess = None
else:
self.evaluate(variables.global_variables_initializer())
save = saver_module.Saver([v])
save.save(sess, save_path)
save2 = saver_module.Saver([v])
save2.restore(sess, save_path)
self.assertEquals(self.evaluate(v), [1])
def testNoAdditionalOpsAddedBySaverForResourceVariablesOutsideSaveScope(self):
with ops_lib.Graph().as_default() as g:
v = resource_variable_ops.ResourceVariable(1.0, name="v")
with ops_lib.name_scope("saver1"):
saver_module.Saver()
with ops_lib.name_scope("saver2"):
saver_module.Saver({"name": v})
ops_in_saver1_scope_but_not_save_scope = [
op for op in g.get_operations()
if (op.name.startswith("saver1/") and
not op.name.startswith("saver1/save/"))]
self.assertEqual(ops_in_saver1_scope_but_not_save_scope, [])
ops_in_saver2_scope_but_not_save_scope = [
op for op in g.get_operations()
if (op.name.startswith("saver2/") and
not op.name.startswith("saver2/save/"))]
self.assertEqual(ops_in_saver2_scope_but_not_save_scope, [])
def testSaveCopyRestoreWithSaveRelativePaths(self):
"""Save, copy checkpoint dir and restore from copied dir.
This only works for save_relative_paths=True.
"""
save_dir1 = os.path.join(self.get_temp_dir(), "save_dir1")
os.mkdir(save_dir1)
save_path1 = os.path.join(save_dir1, "save_copy_restore")
# Build a graph with 2 parameter nodes, and Save and
# Restore nodes for them.
v0 = variables.Variable(10.0, name="v0")
v1 = variables.Variable(20.0, name="v1")
v2 = saver_test_utils.CheckpointedOp(name="v2")
v2_init = v2.insert("k1", 30.0)
save = saver_module.Saver(
var_list={
"v0": v0,
"v1": v1,
"v2": v2.saveable},
restore_sequentially=True,
save_relative_paths=True)
init_all_op = [variables.global_variables_initializer(), v2_init]
with self.test_session() as sess:
# Initialize all variables
sess.run(init_all_op)
# Check that the parameter nodes have been initialized.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
self.assertEqual(b"k1", v2.keys().eval())
self.assertEqual(30.0, v2.values().eval())
# Save the initialized values in the file at "save_path"
val = save.save(sess, save_path1)
self.assertTrue(isinstance(val, six.string_types))
self.assertEqual(save_path1, val)
self.assertEqual(saver_module.latest_checkpoint(save_dir1), save_path1)
save_dir2 = os.path.join(self.get_temp_dir(), "save_dir2")
os.renames(save_dir1, save_dir2)
save_path2 = os.path.join(save_dir2, "save_copy_restore")
self.assertEqual(saver_module.latest_checkpoint(save_dir2), save_path2)
# Start a second session. In that session the parameter nodes
# have not been initialized either.
with self.test_session() as sess:
v0 = variables.Variable(-1.0, name="v0")
v1 = variables.Variable(-1.0, name="v1")
v2 = saver_test_utils.CheckpointedOp(name="v2")
save = saver_module.Saver({"v0": v0, "v1": v1, "v2": v2.saveable})
# Assert that the variables are not initialized.
self.assertEqual(
len(variables.report_uninitialized_variables().eval()), 2)
self.assertEqual(0, len(v2.keys().eval()))
self.assertEqual(0, len(v2.values().eval()))
# Restore the saved values in the parameter nodes.
save.restore(sess, save_path2)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
self.assertEqual(b"k1", v2.keys().eval())
self.assertEqual(30.0, v2.values().eval())
def testFilenameTensor(self):
v0 = variables.Variable(0, name="v0")
filename = b"somerandomfilename"
save = saver_module.Saver({"v0": v0}, filename=filename)
with self.test_session() as sess:
tensor = sess.graph.get_tensor_by_name(
save.saver_def.filename_tensor_name)
self.assertEqual(sess.run(tensor), filename)
def testInvalidPath(self):
v0 = variables.Variable(0, name="v0")
for ver in (saver_pb2.SaverDef.V1, saver_pb2.SaverDef.V2):
with self.test_session() as sess:
save = saver_module.Saver({"v0": v0}, write_version=ver)
with self.assertRaisesRegexp(errors.NotFoundError,
"Failed to find any matching files for"):
save.restore(sess, "invalid path")
def testInt64(self):
save_path = os.path.join(self.get_temp_dir(), "int64")
with self.test_session() as sess:
# Build a graph with 1 node, and save and restore for them.
v = variables.Variable(np.int64(15), name="v")
save = saver_module.Saver({"v": v}, restore_sequentially=True)
variables.global_variables_initializer().run()
# Save the initialized values in the file at "save_path"
val = save.save(sess, save_path)
self.assertTrue(isinstance(val, six.string_types))
self.assertEqual(save_path, val)
with self.test_session() as sess:
v = variables.Variable(np.int64(-1), name="v")
save = saver_module.Saver({"v": v})
with self.assertRaisesWithPredicateMatch(
errors_impl.OpError, lambda e: "uninitialized value v" in e.message):
sess.run(v)
# Restore the saved values in the parameter nodes.
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(np.int64(15), v.eval())
def testSomeErrors(self):
with ops_lib.Graph().as_default():
v0 = variables.Variable([10.0], name="v0")
v1 = variables.Variable([20.0], name="v1")
v2 = variables.Variable([20.0], name="v2")
v2._set_save_slice_info(
variables.Variable.SaveSliceInfo("v1", [1], [0], [1]))
# By default the name used for "v2" will be "v1" and raise an error.
with self.assertRaisesRegexp(ValueError, "same name: v1"):
saver_module.Saver([v0, v1, v2])
# The names are different and will work.
saver_module.Saver({"vee1": v1, "other": [v2]})
# Partitioned variables also cause name conflicts.
p_v1 = variable_scope.get_variable(
"p_v1",
shape=[4, 5],
partitioner=partitioned_variables.fixed_size_partitioner(
num_shards=2))
p_v2 = variable_scope.get_variable(
"p_v2",
shape=[4, 5],
partitioner=partitioned_variables.fixed_size_partitioner(
num_shards=2))
p_v2._name = "p_v1"
with self.assertRaisesRegexp(ValueError, "same name: p_v1"):
saver_module.Saver([p_v1, p_v2])
def testSameName(self):
with ops_lib.Graph().as_default():
v0 = variables.Variable([10.0], name="v0")
v2 = saver_test_utils.CheckpointedOp(name="v2")
# Saving one variable under two names raises an error.
with self.assertRaisesRegexp(
ValueError, "The same saveable will be restored with two names: v0"):
saver_module.Saver({"v0": v0, "v0too": v0})
# Ditto for custom saveables.
with self.assertRaisesRegexp(
ValueError, "The same saveable will be restored with two names: v2"):
saver_module.Saver({"v2": v2.saveable, "v2too": v2.saveable})
# Verify non-duplicate names work.
saver_module.Saver({"v0": v0, "v2": v2.saveable})
def testBasicsWithListOfVariables(self):
save_path = os.path.join(self.get_temp_dir(), "basics_with_list")
with self.test_session(graph=ops_lib.Graph()) as sess:
# Build a graph with 2 parameter nodes, and Save and
# Restore nodes for them.
v0 = variables.Variable(10.0, name="v0")
v1 = variables.Variable(20.0, name="v1")
v2 = saver_test_utils.CheckpointedOp(name="v2")
v2_init = v2.insert("k1", 30.0)
save = saver_module.Saver([v0, v1, v2.saveable])
variables.global_variables_initializer().run()
v2_init.run()
# Check that the parameter nodes have been initialized.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
self.assertEqual(b"k1", v2.keys().eval())
self.assertEqual(30.0, v2.values().eval())
# Save the initialized values in the file at "save_path"
val = save.save(sess, save_path)
self.assertTrue(isinstance(val, six.string_types))
self.assertEqual(save_path, val)
# Start a second session. In that session the variables
# have not been initialized either.
with self.test_session(graph=ops_lib.Graph()) as sess:
v0 = variables.Variable(-1.0, name="v0")
v1 = variables.Variable(-1.0, name="v1")
v2 = saver_test_utils.CheckpointedOp(name="v2")
save = saver_module.Saver([v0, v1, v2.saveable])
with self.assertRaisesWithPredicateMatch(
errors_impl.OpError, lambda e: "uninitialized value v0" in e.message):
sess.run(v0)
with self.assertRaisesWithPredicateMatch(
errors_impl.OpError, lambda e: "uninitialized value v1" in e.message):
sess.run(v1)
self.assertEqual(0, len(v2.keys().eval()))
self.assertEqual(0, len(v2.values().eval()))
# Restore the saved values in the parameter nodes.
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
self.assertEqual(b"k1", v2.keys().eval())
self.assertEqual(30.0, v2.values().eval())
# Build another graph with 2 nodes, initialized
# differently, and a Restore node for them.
with self.test_session(graph=ops_lib.Graph()) as sess:
v0_2 = variables.Variable(1000.0, name="v0")
v1_2 = variables.Variable(2000.0, name="v1")
v2_2 = saver_test_utils.CheckpointedOp(name="v2")
save2 = saver_module.Saver([v0_2, v1_2, v2_2.saveable])
v2_2.insert("k1000", 3000.0).run()
variables.global_variables_initializer().run()
# Check that the parameter nodes have been initialized.
self.assertEqual(1000.0, v0_2.eval())
self.assertEqual(2000.0, v1_2.eval())
self.assertEqual(b"k1000", v2_2.keys().eval())
self.assertEqual(3000.0, v2_2.values().eval())
# Restore the values saved earlier in the parameter nodes.
save2.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, v0_2.eval())
self.assertEqual(20.0, v1_2.eval())
self.assertEqual(b"k1", v2_2.keys().eval())
self.assertEqual(30.0, v2_2.values().eval())
def _SaveAndLoad(self, var_name, var_value, other_value, save_path):
with self.test_session(graph=ops_lib.Graph()) as sess:
var = resource_variable_ops.ResourceVariable(var_value, name=var_name)
save = saver_module.Saver({var_name: var})
if not context.executing_eagerly():
self.evaluate(var.initializer)
val = save.save(sess, save_path)
self.assertEqual(save_path, val)
with self.test_session(graph=ops_lib.Graph()) as sess:
var = resource_variable_ops.ResourceVariable(other_value, name=var_name)
save = saver_module.Saver({var_name: var})
save.restore(sess, save_path)
self.assertAllClose(var_value, self.evaluate(var))
def testCacheRereadsFile(self):
save_path = os.path.join(self.get_temp_dir(), "cache_rereads")
# Save and reload one Variable named "var0".
self._SaveAndLoad("var0", 0.0, 1.0, save_path)
# Save and reload one Variable named "var1" in the same file.
# The cached readers should know to re-read the file.
self._SaveAndLoad("var1", 1.1, 2.2, save_path)
def testAllowEmpty(self):
save_path = os.path.join(self.get_temp_dir(), "allow_empty")
with self.test_session() as sess:
_ = constant_op.constant(1)
save = saver_module.Saver(allow_empty=True)
val = save.save(sess, save_path)
self.assertIsNone(val)
with self.test_session() as sess:
save = saver_module.Saver(allow_empty=True)
save.restore(sess, save_path)
def testGPU(self):
if not test.is_gpu_available():
return
save_path = os.path.join(self.get_temp_dir(), "gpu")
with session.Session("", graph=ops_lib.Graph()) as sess:
with sess.graph.device(test.gpu_device_name()):
v0_1 = variables.Variable(123.45)
save = saver_module.Saver({"v0": v0_1})
variables.global_variables_initializer().run()
save.save(sess, save_path)
with session.Session("", graph=ops_lib.Graph()) as sess:
with sess.graph.device(test.gpu_device_name()):
v0_2 = variables.Variable(543.21)
save = saver_module.Saver({"v0": v0_2})
variables.global_variables_initializer().run()
def testSharedServerOnGPU(self):
if not test.is_gpu_available():
return
save_path = os.path.join(self.get_temp_dir(), "gpu")
with session.Session("", graph=ops_lib.Graph()) as sess:
with sess.graph.device(test.gpu_device_name()):
v0_1 = variables.Variable(123.45)
save = saver_module.Saver({"v0": v0_1}, sharded=True, allow_empty=True)
variables.global_variables_initializer().run()
save.save(sess, save_path)
with session.Session("", graph=ops_lib.Graph()) as sess:
with sess.graph.device(test.gpu_device_name()):
v0_2 = variables.Variable(543.21)
save = saver_module.Saver({"v0": v0_2}, sharded=True, allow_empty=True)
variables.global_variables_initializer().run()
def testVariables(self):
save_path = os.path.join(self.get_temp_dir(), "variables")
with session.Session("", graph=ops_lib.Graph()) as sess:
one = variables.Variable(1.0)
twos = variables.Variable([2.0, 2.0, 2.0])
v2 = saver_test_utils.CheckpointedOp(name="v2")
init = variables.global_variables_initializer()
save = saver_module.Saver()
init.run()
v2.insert("k1", 3.0).run()
save.save(sess, save_path)
with session.Session("", graph=ops_lib.Graph()) as sess:
one = variables.Variable(0.0)
twos = variables.Variable([0.0, 0.0, 0.0])
v2 = saver_test_utils.CheckpointedOp(name="v2")
# Saver with no arg, defaults to 'all variables'.
save = saver_module.Saver()
save.restore(sess, save_path)
self.assertAllClose(1.0, one.eval())
self.assertAllClose([2.0, 2.0, 2.0], twos.eval())
self.assertEqual(b"k1", v2.keys().eval())
self.assertEqual(3.0, v2.values().eval())
def testVarListShouldBeEmptyInDeferredBuild(self):
with ops_lib.Graph().as_default():
v = variables.Variable(1.0)
with self.assertRaisesRegexp(ValueError, "defer_build"):
saver_module.Saver([v], defer_build=True)
def testBuildShouldBeCalledBeforeSaveInCaseOfDeferBuild(self):
save_path = os.path.join(self.get_temp_dir(), "error_deferred_build")
with ops_lib.Graph().as_default(), session.Session() as sess:
variables.Variable(1.0)
saver = saver_module.Saver(defer_build=True)
with self.assertRaisesRegexp(RuntimeError, "build"):
saver.save(sess, save_path)
def testDeferredBuild(self):
save_path = os.path.join(self.get_temp_dir(), "deferred_build")
with session.Session("", graph=ops_lib.Graph()) as sess:
one = variables.Variable(1.0)
save = saver_module.Saver(defer_build=True)
# if build is not deferred, saver cannot save the `twos`.
twos = variables.Variable([2.0, 2.0, 2.0])
init = variables.global_variables_initializer()
save.build()
init.run()
save.save(sess, save_path)
with session.Session("", graph=ops_lib.Graph()) as sess:
one = variables.Variable(0.0)
twos = variables.Variable([0.0, 0.0, 0.0])
# Saver with no arg, defaults to 'all variables'.
save = saver_module.Saver()
save.restore(sess, save_path)
self.assertAllClose(1.0, one.eval())
self.assertAllClose([2.0, 2.0, 2.0], twos.eval())
def testReshape(self):
save_path = os.path.join(self.get_temp_dir(), "variables_reshape")
with session.Session("", graph=ops_lib.Graph()) as sess:
var = variables.Variable([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
init = variables.global_variables_initializer()
save = saver_module.Saver()
init.run()
save.save(sess, save_path)
# Error when restoring with default reshape=False
with session.Session("", graph=ops_lib.Graph()) as sess:
var = variables.Variable([[0.0, 0.0], [0.0, 0.0], [0.0, 0.0]])
save = saver_module.Saver()
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
"Assign requires shapes of both tensors to match."):
save.restore(sess, save_path)
# Restored to new shape with reshape=True
with session.Session("", graph=ops_lib.Graph()) as sess:
var = variables.Variable([[0.0, 0.0], [0.0, 0.0], [0.0, 0.0]])
save = saver_module.Saver(reshape=True)
save.restore(sess, save_path)
self.assertAllClose([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], var.eval())
@test_util.run_in_graph_and_eager_modes()
def testSaveWithGlobalStep(self, pad_step_number=False):
save_path = os.path.join(self.get_temp_dir(), "ckpt_with_global_step")
global_step_int = 5
# Save and reload one Variable named "var0".
self._SaveAndLoad("var0", 0.0, 1.0, save_path)
for use_tensor in [True, False]:
with self.test_session(graph=ops_lib.Graph()):
var = resource_variable_ops.ResourceVariable(1.0, name="var0")
save = saver_module.Saver(
{
var._shared_name: var
}, pad_step_number=pad_step_number)
if context.executing_eagerly():
sess = None
else:
self.evaluate(var.initializer)
sess = ops_lib.get_default_session()
if use_tensor:
global_step = constant_op.constant(global_step_int)
val = save.save(sess, save_path, global_step=global_step)
else:
val = save.save(sess, save_path, global_step=global_step_int)
if pad_step_number:
expected_save_path = "%s-%s" % (save_path,
"{:08d}".format(global_step_int))
else:
expected_save_path = "%s-%d" % (save_path, global_step_int)
self.assertEqual(expected_save_path, val)
def testSaveWithGlobalStepWithPadding(self):
self.testSaveWithGlobalStep(pad_step_number=True)
def testSaveToNonexistingPath(self):
file_io.write_string_to_file(
os.path.join(self.get_temp_dir(), "actually_a_file"), "")
paths = [
os.path.join(self.get_temp_dir(), "nonexisting_dir/path"),
os.path.join(self.get_temp_dir(), "other_nonexisting_dir/path1/path2"),
os.path.join(self.get_temp_dir(), "actually_a_file/path"),
]
for save_path in paths:
# Build a graph with 2 parameter nodes, and Save and
# Restore nodes for them.
v0 = variables.Variable(10.0, name="v0")
v1 = variables.Variable(20.0, name="v1")
save = saver_module.Saver({"v0": v0, "v1": v1}, restore_sequentially=True)
init_all_op = variables.global_variables_initializer()
# In the case where the parent directory doesn't exist, whether or not the
# save succeeds or fails is implementation dependent. Therefore we allow
# both cases.
try:
with self.test_session() as sess:
# Initialize all variables
sess.run(init_all_op)
# Check that the parameter nodes have been initialized.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
# Save the graph.
save.save(sess, save_path)
with self.test_session() as sess:
# Restore the saved values in the parameter nodes.
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
except ValueError as exc:
error_msg_template = "Parent directory of {} doesn't exist, can't save."
self.assertEqual(error_msg_template.format(save_path), str(exc))
def testSaveToURI(self):
# ParseURI functions don't work on Windows yet.
# TODO(jhseu): Remove this check when it works.
if os.name == "nt":
self.skipTest("Local URI support doesn't work on Windows")
save_path = "file://" + os.path.join(self.get_temp_dir(), "uri")
# Build a graph with 2 parameter nodes, and Save and
# Restore nodes for them.
v0 = variables.Variable(10.0, name="v0")
v1 = variables.Variable(20.0, name="v1")
save = saver_module.Saver({"v0": v0, "v1": v1}, restore_sequentially=True)
init_all_op = variables.global_variables_initializer()
with self.test_session() as sess:
# Initialize all variables
sess.run(init_all_op)
# Check that the parameter nodes have been initialized.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
save.save(sess, save_path)
class SaveRestoreShardedTest(test.TestCase):
_WRITE_VERSION = saver_pb2.SaverDef.V1
def _get_test_dir(self, dirname):
test_dir = os.path.join(self.get_temp_dir(), dirname)
gfile.MakeDirs(test_dir)
return test_dir
def testBasics(self):
save_path = os.path.join(self.get_temp_dir(), "sharded_basics")
# Build a graph with 2 parameter nodes on different devices.
with session.Session(
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v0 = variables.Variable(10, name="v0")
t0 = saver_test_utils.CheckpointedOp(name="t0")
with sess.graph.device("/cpu:1"):
v1 = variables.Variable(20, name="v1")
t1 = saver_test_utils.CheckpointedOp(name="t1")
save = saver_module.Saver(
{
"v0": v0,
"v1": v1,
"t0": t0.saveable,
"t1": t1.saveable
},
write_version=self._WRITE_VERSION,
sharded=True)
variables.global_variables_initializer().run()
t0.insert("k1", 30.0).run()
t1.insert("k2", 40.0).run()
val = save.save(sess, save_path)
if save._write_version is saver_pb2.SaverDef.V1:
self.assertEqual(save_path + "-?????-of-00002", val)
else:
self.assertEqual(save_path, val)
meta_graph_filename = save._MetaGraphFilename(val)
self.assertEqual(save_path + ".meta", meta_graph_filename)
if save._write_version is saver_pb2.SaverDef.V1:
# Restore different ops from shard 0 of the saved files.
with session.Session(
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v0 = variables.Variable(111, name="v0")
t0 = saver_test_utils.CheckpointedOp(name="t0")
save = saver_module.Saver(
{
"v0": v0,
"t0": t0.saveable
},
write_version=self._WRITE_VERSION,
sharded=True)
variables.global_variables_initializer().run()
t0.insert("k11", 33.0).run()
self.assertEqual(111, v0.eval())
self.assertEqual(b"k11", t0.keys().eval())
self.assertEqual(33.0, t0.values().eval())
save.restore(sess, save_path + "-00000-of-00002")
self.assertEqual(10, v0.eval())
self.assertEqual(b"k1", t0.keys().eval())
self.assertEqual(30.0, t0.values().eval())
# Restore different ops from shard 1 of the saved files.
with session.Session(
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v1 = variables.Variable(222)
t1 = saver_test_utils.CheckpointedOp(name="t1")
save = saver_module.Saver(
{
"v1": v1,
"t1": t1.saveable
},
write_version=self._WRITE_VERSION,
sharded=True)
variables.global_variables_initializer().run()
t1.insert("k22", 44.0).run()
self.assertEqual(222, v1.eval())
self.assertEqual(b"k22", t1.keys().eval())
self.assertEqual(44.0, t1.values().eval())
save.restore(sess, save_path + "-00001-of-00002")
self.assertEqual(20, v1.eval())
self.assertEqual(b"k2", t1.keys().eval())
self.assertEqual(40.0, t1.values().eval())
# Now try a restore with the sharded filename.
with session.Session(
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v0 = variables.Variable(111, name="v0")
t0 = saver_test_utils.CheckpointedOp(name="t0")
with sess.graph.device("/cpu:1"):
v1 = variables.Variable(222, name="v1")
t1 = saver_test_utils.CheckpointedOp(name="t1")
save = saver_module.Saver(
{
"v0": v0,
"v1": v1,
"t0": t0.saveable,
"t1": t1.saveable
},
write_version=self._WRITE_VERSION,
sharded=True)
variables.global_variables_initializer().run()
t0.insert("k11", 33.0).run()
t1.insert("k22", 44.0).run()
self.assertEqual(111, v0.eval())
self.assertEqual(222, v1.eval())
self.assertEqual(b"k11", t0.keys().eval())
self.assertEqual(33.0, t0.values().eval())
self.assertEqual(b"k22", t1.keys().eval())
self.assertEqual(44.0, t1.values().eval())
save_path = os.path.join(self.get_temp_dir(), "sharded_basics")
if save._write_version is saver_pb2.SaverDef.V1:
save.restore(sess, save_path + "-?????-of-?????")
else:
save.restore(sess, save_path)
self.assertEqual(10, v0.eval())
self.assertEqual(20, v1.eval())
self.assertEqual(b"k1", t0.keys().eval())
self.assertEqual(30.0, t0.values().eval())
self.assertEqual(b"k2", t1.keys().eval())
self.assertEqual(40.0, t1.values().eval())
if save._write_version is saver_pb2.SaverDef.V1:
self.assertEqual(
saver_module.latest_checkpoint(self.get_temp_dir()),
os.path.join(self.get_temp_dir(), "sharded_basics-?????-of-00002"))
else:
self.assertEqual(
saver_module.latest_checkpoint(self.get_temp_dir()),
os.path.join(self.get_temp_dir(), "sharded_basics"))
def testSaverDef(self):
with self.test_session():
v0 = variables.Variable(123, name="v0")
save = saver_module.Saver({"v0": v0}, sharded=True)
sd = save.as_saver_def()
self.assertTrue(sd.sharded)
def _testPartitionedVariables(self, use_resource):
var_full_shape = [10, 3]
# Allows save/restore mechanism to work w/ different slicings.
var_name = "my_var"
saved_dir = self._get_test_dir("partitioned_variables")
saved_path = os.path.join(saved_dir, "ckpt")
call_saver_with_dict = False # updated by test loop below
def _save(slices=None, partitioner=None):
with self.test_session(graph=ops_lib.Graph()) as sess:
# Calls .eval() to return the ndarray that makes up the full variable.
rnd = random_ops.random_uniform(var_full_shape).eval()
if slices:
assert not partitioner
# TODO(apassos): make create_partitioned_variables take use_resource
# option to make this test passable without creating a named
# variable_scope.
vs = partitioned_variables.create_partitioned_variables(
var_full_shape, slices, rnd, name=var_name)
elif partitioner:
vs = [
variable_scope.get_variable(
var_name,
shape=var_full_shape,
initializer=rnd,
partitioner=partitioner,
use_resource=use_resource)
]
else:
if use_resource:
vs = [resource_variable_ops.ResourceVariable(rnd, name=var_name)]
else:
vs = [variables.Variable(rnd, name=var_name)]
variables.global_variables_initializer().run()
if call_saver_with_dict:
saver = saver_module.Saver({var_name: (vs if slices else vs[0])})
else:
saver = saver_module.Saver(vs)
actual_path = saver.save(sess, saved_path)
self.assertEqual(saved_path, actual_path)
return rnd
def _restore(slices=None, partitioner=None):
with self.test_session(graph=ops_lib.Graph()) as sess:
if slices:
assert not partitioner
new_vs = partitioned_variables.create_partitioned_variables(
var_full_shape,
slices,
array_ops.zeros(var_full_shape), # != original contents.
name=var_name)
elif partitioner:
new_vs = [
variable_scope.get_variable(
var_name,
shape=var_full_shape,
initializer=array_ops.zeros(var_full_shape),
partitioner=partitioner)
]
else:
new_vs = [
variables.Variable(
array_ops.zeros(
shape=var_full_shape), # != original contents.
name=var_name)
]
variables.global_variables_initializer().run()
if call_saver_with_dict:
saver = saver_module.Saver({
var_name: (new_vs if slices else new_vs[0])
})
else:
saver = saver_module.Saver(new_vs)
saver.restore(sess, saved_path)
if partitioner:
return new_vs[0].as_tensor().eval()
elif slices and slices[0] != 1:
return array_ops.concat(new_vs, 0).eval()
elif slices and slices[1] != 1:
return array_ops.concat(new_vs, 1).eval()
else: # Non-sliced.
return new_vs[0].eval()
for call_saver_with_dict in {False, True}:
# Save PartitionedVariable and restore into full variable.
saved_full = _save(
partitioner=partitioned_variables.fixed_size_partitioner(
num_shards=2))
restored_full = _restore()
self.assertAllEqual(saved_full, restored_full)
# Saves 10 horizontal parts of a partitioned variable.
# Restores into a full variable, non-sliced.
saved_full = _save(slices=[10, 1])
restored_full = _restore()
self.assertAllEqual(saved_full, restored_full)
# Restores into a different number/orientation of slices.
restored_full = _restore(slices=[2, 1]) # 2 horizon parts.
self.assertAllEqual(saved_full, restored_full)
restored_full = _restore(slices=[1, 3]) # 3 vertical parts.
self.assertAllEqual(saved_full, restored_full)
# Restores into a PartitionedVariable
restored_full = _restore(
partitioner=partitioned_variables.fixed_size_partitioner(
num_shards=2))
self.assertAllEqual(saved_full, restored_full)
# Now, saves a full variable and restores in slices.
saved_full = _save()
restored_full = _restore(slices=[1, 3])
self.assertAllEqual(saved_full, restored_full)
def testPartitionedVariable(self):
self._testPartitionedVariables(use_resource=False)
def testPartitionedResourceVariable(self):
self._testPartitionedVariables(use_resource=True)
class SaveRestoreShardedTestV2(SaveRestoreShardedTest):
_WRITE_VERSION = saver_pb2.SaverDef.V2
class MaxToKeepTest(test.TestCase):
def _get_test_dir(self, dirname):
test_dir = os.path.join(self.get_temp_dir(), dirname)
gfile.MakeDirs(test_dir)
return test_dir
def assertCheckpointState(self, model_checkpoint_path,
all_model_checkpoint_paths, save_dir):
checkpoint_state = saver_module.get_checkpoint_state(save_dir)
self.assertEqual(checkpoint_state.model_checkpoint_path,
model_checkpoint_path)
self.assertEqual(checkpoint_state.all_model_checkpoint_paths,
all_model_checkpoint_paths)
def testMaxToKeepEager(self):
with context.eager_mode():
save_dir = self._get_test_dir("max_to_keep_non_sharded")
v = variable_scope.variable(10.0, name="v")
save = saver_module.Saver({"v": v}, max_to_keep=2)
self.evaluate(variables.global_variables_initializer())
if not context.executing_eagerly():
self.assertEqual([], save.last_checkpoints)
s1 = save.save(None, os.path.join(save_dir, "s1"))
self.assertEqual([s1], save.last_checkpoints)
self.assertTrue(saver_module.checkpoint_exists(s1))
self.assertCheckpointState(
model_checkpoint_path=s1,
all_model_checkpoint_paths=[s1],
save_dir=save_dir)
s2 = save.save(None, os.path.join(save_dir, "s2"))
self.assertEqual([s1, s2], save.last_checkpoints)
self.assertTrue(saver_module.checkpoint_exists(s1))
self.assertTrue(saver_module.checkpoint_exists(s2))
self.assertCheckpointState(
model_checkpoint_path=s2,
all_model_checkpoint_paths=[s1, s2],
save_dir=save_dir)
s3 = save.save(None, os.path.join(save_dir, "s3"))
self.assertEqual([s2, s3], save.last_checkpoints)
self.assertFalse(saver_module.checkpoint_exists(s1))
self.assertTrue(saver_module.checkpoint_exists(s2))
self.assertTrue(saver_module.checkpoint_exists(s3))
self.assertCheckpointState(
model_checkpoint_path=s3,
all_model_checkpoint_paths=[s2, s3],
save_dir=save_dir)
# Create a second helper, identical to the first.
save2 = saver_module.Saver({"v": v}, max_to_keep=2)
save2.set_last_checkpoints(save.last_checkpoints)
# Exercise the first helper.
# Adding s2 again (old s2 is removed first, then new s2 appended)
s2 = save.save(None, os.path.join(save_dir, "s2"))
self.assertEqual([s3, s2], save.last_checkpoints)
self.assertFalse(saver_module.checkpoint_exists(s1))
self.assertTrue(saver_module.checkpoint_exists(s3))
self.assertTrue(saver_module.checkpoint_exists(s2))
self.assertCheckpointState(
model_checkpoint_path=s2,
all_model_checkpoint_paths=[s3, s2],
save_dir=save_dir)
# Adding s1 (s3 should now be deleted as oldest in list)
s1 = save.save(None, os.path.join(save_dir, "s1"))
self.assertEqual([s2, s1], save.last_checkpoints)
self.assertFalse(saver_module.checkpoint_exists(s3))
self.assertTrue(saver_module.checkpoint_exists(s2))
self.assertCheckpointState(
model_checkpoint_path=s1,
all_model_checkpoint_paths=[s2, s1],
save_dir=save_dir)
s2 = save2.save(None, os.path.join(save_dir, "s2"))
self.assertEqual([s3, s2], save2.last_checkpoints)
# Created by the first helper.
self.assertTrue(saver_module.checkpoint_exists(s1))
# Deleted by the first helper.
self.assertFalse(saver_module.checkpoint_exists(s3))
def testNonSharded(self):
save_dir = self._get_test_dir("max_to_keep_non_sharded")
with self.test_session() as sess:
v = variables.Variable(10.0, name="v")
save = saver_module.Saver({"v": v}, max_to_keep=2)
variables.global_variables_initializer().run()
self.assertEqual([], save.last_checkpoints)
s1 = save.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([s1], save.last_checkpoints)
self.assertTrue(saver_module.checkpoint_exists(s1))
self.assertCheckpointState(
model_checkpoint_path=s1,
all_model_checkpoint_paths=[s1],
save_dir=save_dir)
s2 = save.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([s1, s2], save.last_checkpoints)
self.assertTrue(saver_module.checkpoint_exists(s1))
self.assertTrue(saver_module.checkpoint_exists(s2))
self.assertCheckpointState(
model_checkpoint_path=s2,
all_model_checkpoint_paths=[s1, s2],
save_dir=save_dir)
s3 = save.save(sess, os.path.join(save_dir, "s3"))
self.assertEqual([s2, s3], save.last_checkpoints)
self.assertFalse(saver_module.checkpoint_exists(s1))
self.assertTrue(saver_module.checkpoint_exists(s2))
self.assertTrue(saver_module.checkpoint_exists(s3))
self.assertCheckpointState(
model_checkpoint_path=s3,
all_model_checkpoint_paths=[s2, s3],
save_dir=save_dir)
# Create a second helper, identical to the first.
save2 = saver_module.Saver(saver_def=save.as_saver_def())
save2.set_last_checkpoints(save.last_checkpoints)
# Create a third helper, with the same configuration but no knowledge of
# previous checkpoints.
save3 = saver_module.Saver(saver_def=save.as_saver_def())
# Exercise the first helper.
# Adding s2 again (old s2 is removed first, then new s2 appended)
s2 = save.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([s3, s2], save.last_checkpoints)
self.assertFalse(saver_module.checkpoint_exists(s1))
self.assertFalse(
saver_module.checkpoint_exists(save._MetaGraphFilename(s1)))
self.assertTrue(saver_module.checkpoint_exists(s3))
self.assertTrue(
saver_module.checkpoint_exists(save._MetaGraphFilename(s3)))
self.assertTrue(saver_module.checkpoint_exists(s2))
self.assertTrue(
saver_module.checkpoint_exists(save._MetaGraphFilename(s2)))
self.assertCheckpointState(
model_checkpoint_path=s2,
all_model_checkpoint_paths=[s3, s2],
save_dir=save_dir)
# Adding s1 (s3 should now be deleted as oldest in list)
s1 = save.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([s2, s1], save.last_checkpoints)
self.assertFalse(saver_module.checkpoint_exists(s3))
self.assertFalse(
saver_module.checkpoint_exists(save._MetaGraphFilename(s3)))
self.assertTrue(saver_module.checkpoint_exists(s2))
self.assertTrue(
saver_module.checkpoint_exists(save._MetaGraphFilename(s2)))
self.assertTrue(saver_module.checkpoint_exists(s1))
self.assertTrue(
saver_module.checkpoint_exists(save._MetaGraphFilename(s1)))
self.assertCheckpointState(
model_checkpoint_path=s1,
all_model_checkpoint_paths=[s2, s1],
save_dir=save_dir)
# Exercise the second helper.
# Adding s2 again (old s2 is removed first, then new s2 appended)
s2 = save2.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([s3, s2], save2.last_checkpoints)
# Created by the first helper.
self.assertTrue(saver_module.checkpoint_exists(s1))
self.assertTrue(
saver_module.checkpoint_exists(save._MetaGraphFilename(s1)))
# Deleted by the first helper.
self.assertFalse(saver_module.checkpoint_exists(s3))
self.assertFalse(
saver_module.checkpoint_exists(save._MetaGraphFilename(s3)))
self.assertTrue(saver_module.checkpoint_exists(s2))
self.assertTrue(
saver_module.checkpoint_exists(save._MetaGraphFilename(s2)))
self.assertCheckpointState(
model_checkpoint_path=s2,
all_model_checkpoint_paths=[s3, s2],
save_dir=save_dir)
# Adding s1 (s3 should now be deleted as oldest in list)
s1 = save2.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([s2, s1], save2.last_checkpoints)
self.assertFalse(saver_module.checkpoint_exists(s3))
self.assertFalse(
saver_module.checkpoint_exists(save._MetaGraphFilename(s3)))
self.assertTrue(saver_module.checkpoint_exists(s2))
self.assertTrue(
saver_module.checkpoint_exists(save._MetaGraphFilename(s2)))
self.assertTrue(saver_module.checkpoint_exists(s1))
self.assertTrue(
saver_module.checkpoint_exists(save._MetaGraphFilename(s1)))
self.assertCheckpointState(
model_checkpoint_path=s1,
all_model_checkpoint_paths=[s2, s1],
save_dir=save_dir)
# Exercise the third helper.
# Adding s2 again (but helper is unaware of previous s2)
s2 = save3.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([s2], save3.last_checkpoints)
# Created by the first helper.
self.assertTrue(saver_module.checkpoint_exists(s1))
self.assertTrue(
saver_module.checkpoint_exists(save._MetaGraphFilename(s1)))
# Deleted by the first helper.
self.assertFalse(saver_module.checkpoint_exists(s3))
self.assertFalse(
saver_module.checkpoint_exists(save._MetaGraphFilename(s3)))
self.assertTrue(saver_module.checkpoint_exists(s2))
self.assertTrue(
saver_module.checkpoint_exists(save._MetaGraphFilename(s2)))
# Even though the file for s1 exists, this saver isn't aware of it, which
# is why it doesn't end up in the checkpoint state.
self.assertCheckpointState(
model_checkpoint_path=s2,
all_model_checkpoint_paths=[s2],
save_dir=save_dir)
# Adding s1 (s3 should not be deleted because helper is unaware of it)
s1 = save3.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([s2, s1], save3.last_checkpoints)
self.assertFalse(saver_module.checkpoint_exists(s3))
self.assertFalse(
saver_module.checkpoint_exists(save._MetaGraphFilename(s3)))
self.assertTrue(saver_module.checkpoint_exists(s2))
self.assertTrue(
saver_module.checkpoint_exists(save._MetaGraphFilename(s2)))
self.assertTrue(saver_module.checkpoint_exists(s1))
self.assertTrue(
saver_module.checkpoint_exists(save._MetaGraphFilename(s1)))
self.assertCheckpointState(
model_checkpoint_path=s1,
all_model_checkpoint_paths=[s2, s1],
save_dir=save_dir)
def testSharded(self):
save_dir = self._get_test_dir("max_to_keep_sharded")
with session.Session(
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v0 = variables.Variable(111, name="v0")
with sess.graph.device("/cpu:1"):
v1 = variables.Variable(222, name="v1")
save = saver_module.Saver(
{
"v0": v0,
"v1": v1
}, sharded=True, max_to_keep=2)
variables.global_variables_initializer().run()
self.assertEqual([], save.last_checkpoints)
s1 = save.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([s1], save.last_checkpoints)
if save._write_version is saver_pb2.SaverDef.V1:
self.assertEqual(2, len(gfile.Glob(s1)))
else:
self.assertEqual(4, len(gfile.Glob(s1 + "*")))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s1)))
s2 = save.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([s1, s2], save.last_checkpoints)
if save._write_version is saver_pb2.SaverDef.V1:
self.assertEqual(2, len(gfile.Glob(s1)))
else:
self.assertEqual(4, len(gfile.Glob(s1 + "*")))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s1)))
if save._write_version is saver_pb2.SaverDef.V1:
self.assertEqual(2, len(gfile.Glob(s2)))
else:
self.assertEqual(4, len(gfile.Glob(s2 + "*")))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s2)))
s3 = save.save(sess, os.path.join(save_dir, "s3"))
self.assertEqual([s2, s3], save.last_checkpoints)
self.assertEqual(0, len(gfile.Glob(s1 + "*")))
self.assertFalse(gfile.Exists(save._MetaGraphFilename(s1)))
if save._write_version is saver_pb2.SaverDef.V1:
self.assertEqual(2, len(gfile.Glob(s2)))
else:
self.assertEqual(4, len(gfile.Glob(s2 + "*")))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s2)))
if save._write_version is saver_pb2.SaverDef.V1:
self.assertEqual(2, len(gfile.Glob(s3)))
else:
self.assertEqual(4, len(gfile.Glob(s3 + "*")))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s3)))
def testNoMaxToKeep(self):
save_dir = self._get_test_dir("no_max_to_keep")
save_dir2 = self._get_test_dir("max_to_keep_0")
with self.test_session() as sess:
v = variables.Variable(10.0, name="v")
variables.global_variables_initializer().run()
# Test max_to_keep being None.
save = saver_module.Saver({"v": v}, max_to_keep=None)
self.assertEqual([], save.last_checkpoints)
s1 = save.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([], save.last_checkpoints)
self.assertTrue(saver_module.checkpoint_exists(s1))
s2 = save.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([], save.last_checkpoints)
self.assertTrue(saver_module.checkpoint_exists(s2))
# Test max_to_keep being 0.
save2 = saver_module.Saver({"v": v}, max_to_keep=0)
self.assertEqual([], save2.last_checkpoints)
s1 = save2.save(sess, os.path.join(save_dir2, "s1"))
self.assertEqual([], save2.last_checkpoints)
self.assertTrue(saver_module.checkpoint_exists(s1))
s2 = save2.save(sess, os.path.join(save_dir2, "s2"))
self.assertEqual([], save2.last_checkpoints)
self.assertTrue(saver_module.checkpoint_exists(s2))
def testNoMetaGraph(self):
save_dir = self._get_test_dir("no_meta_graph")
with self.test_session() as sess:
v = variables.Variable(10.0, name="v")
save = saver_module.Saver({"v": v})
variables.global_variables_initializer().run()
s1 = save.save(sess, os.path.join(save_dir, "s1"), write_meta_graph=False)
self.assertTrue(saver_module.checkpoint_exists(s1))
self.assertFalse(gfile.Exists(save._MetaGraphFilename(s1)))
class KeepCheckpointEveryNHoursTest(test.TestCase):
def _get_test_dir(self, dirname):
test_dir = os.path.join(self.get_temp_dir(), dirname)
gfile.MakeDirs(test_dir)
return test_dir
@test_util.run_in_graph_and_eager_modes()
@test.mock.patch.object(saver_module, "time")
def testNonSharded(self, mock_time):
save_dir = self._get_test_dir("keep_checkpoint_every_n_hours")
with self.test_session() as sess:
v = variable_scope.variable([10.0], name="v")
# Run the initializer NOW to avoid the 0.5s overhead of the first Run()
# call, which throws the test timing off in fastbuild mode.
self.evaluate(variables.global_variables_initializer())
# Create a saver that will keep the last 2 checkpoints plus one every 0.7
# seconds.
start_time = time.time()
mock_time.time.return_value = start_time
save = saver_module.Saver(
{
"v": v
}, max_to_keep=2, keep_checkpoint_every_n_hours=0.7 / 3600)
self.assertEqual([], save.last_checkpoints)
# Wait till 1 seconds have elapsed so s1 will be old enough to keep.
# sleep may return early, don't trust it.
mock_time.time.return_value = start_time + 1.0
s1 = save.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([s1], save.last_checkpoints)
s2 = save.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([s1, s2], save.last_checkpoints)
# We now have 2 'last_checkpoints': [s1, s2]. The next call to Save(),
# would normally delete s1, because max_to_keep is 2. However, s1 is
# older than 0.7s so we must keep it.
s3 = save.save(sess, os.path.join(save_dir, "s3"))
self.assertEqual([s2, s3], save.last_checkpoints)
# s1 should still be here, we are Not checking now to reduce time
# variance in the test.
# We now have 2 'last_checkpoints': [s2, s3], and s1 on disk. The next
# call to Save(), will delete s2, because max_to_keep is 2, and because
# we already kept the old s1. s2 is very close in time to s1 so it gets
# deleted.
s4 = save.save(sess, os.path.join(save_dir, "s4"))
self.assertEqual([s3, s4], save.last_checkpoints)
# Check that s1 is still here, but s2 is gone.
self.assertTrue(saver_module.checkpoint_exists(s1))
self.assertFalse(saver_module.checkpoint_exists(s2))
self.assertTrue(saver_module.checkpoint_exists(s3))
self.assertTrue(saver_module.checkpoint_exists(s4))
class SaveRestoreWithVariableNameMap(test.TestCase):
def _testNonReshape(self, variable_op):
save_path = os.path.join(self.get_temp_dir(), "non_reshape")
with self.test_session(graph=ops_lib.Graph()) as sess:
# Build a graph with 2 parameter nodes, and Save and
# Restore nodes for them.
v0 = variable_op(10.0, name="v0")
v1 = variable_op(20.0, name="v1")
save = saver_module.Saver({"save_prefix/v0": v0, "save_prefix/v1": v1})
self.evaluate(variables.global_variables_initializer())
# Check that the parameter nodes have been initialized.
self.assertEqual(10.0, self.evaluate(v0))
self.assertEqual(20.0, self.evaluate(v1))
# Save the initialized values in the file at "save_path"
# Use a variable name map to set the saved tensor names
val = save.save(sess, save_path)
self.assertTrue(isinstance(val, six.string_types))
self.assertEqual(save_path, val)
# Verify that the original names are not in the Saved file
save = saver_module.Saver({"v0": v0, "v1": v1})
with self.assertRaisesOpError("not found in checkpoint"):
save.restore(sess, save_path)
# Verify that the mapped names are present in the Saved file and can be
# Restored using remapped names.
with self.test_session(graph=ops_lib.Graph()) as sess:
v0 = variable_op(-1.0, name="v0")
v1 = variable_op(-1.0, name="v1")
if not context.executing_eagerly():
with self.assertRaisesOpError("uninitialized"):
self.evaluate(v0)
with self.assertRaisesOpError("uninitialized"):
self.evaluate(v1)
save = saver_module.Saver({"save_prefix/v0": v0, "save_prefix/v1": v1})
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
if not context.executing_eagerly():
self.assertEqual(10.0, self.evaluate(v0))
self.assertEqual(20.0, self.evaluate(v1))
# Add a prefix to the node names in the current graph and Restore using
# remapped names.
with self.test_session(graph=ops_lib.Graph()) as sess:
v0 = variable_op(-1.0, name="restore_prefix/v0")
v1 = variable_op(-1.0, name="restore_prefix/v1")
if not context.executing_eagerly():
with self.assertRaisesOpError("uninitialized"):
self.evaluate(v0)
with self.assertRaisesOpError("uninitialized"):
self.evaluate(v1)
# Restore the saved values in the parameter nodes.
save = saver_module.Saver({"save_prefix/v0": v0, "save_prefix/v1": v1})
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, self.evaluate(v0))
self.assertEqual(20.0, self.evaluate(v1))
@test_util.run_in_graph_and_eager_modes()
def testNonReshapeResourceVariable(self):
self._testNonReshape(resource_variable_ops.ResourceVariable)
def testNonReshapeVariable(self):
self._testNonReshape(variables.Variable)
class LatestCheckpointWithRelativePaths(test.TestCase):
@staticmethod
@contextlib.contextmanager
def tempWorkingDir(temppath):
cwd = os.getcwd()
os.chdir(temppath)
try:
yield
finally:
os.chdir(cwd)
@staticmethod
@contextlib.contextmanager
def tempDir():
tempdir = tempfile.mkdtemp()
try:
yield tempdir
finally:
shutil.rmtree(tempdir)
def testNameCollision(self):
# Make sure we have a clean directory to work in.
with self.tempDir() as tempdir:
# Jump to that directory until this test is done.
with self.tempWorkingDir(tempdir):
# Save training snapshots to a relative path.
traindir = "train/"
os.mkdir(traindir)
# Collides with the default name of the checkpoint state file.
filepath = os.path.join(traindir, "checkpoint")
with self.test_session() as sess:
unused_a = variables.Variable(0.0) # So that Saver saves something.
variables.global_variables_initializer().run()
# Should fail.
saver = saver_module.Saver(sharded=False)
with self.assertRaisesRegexp(ValueError, "collides with"):
saver.save(sess, filepath)
# Succeeds: the file will be named "checkpoint-<step>".
saver.save(sess, filepath, global_step=1)
self.assertIsNotNone(saver_module.latest_checkpoint(traindir))
# Succeeds: the file will be named "checkpoint-<i>-of-<n>".
saver = saver_module.Saver(sharded=True)
saver.save(sess, filepath)
self.assertIsNotNone(saver_module.latest_checkpoint(traindir))
# Succeeds: the file will be named "checkpoint-<step>-<i>-of-<n>".
saver = saver_module.Saver(sharded=True)
saver.save(sess, filepath, global_step=1)
self.assertIsNotNone(saver_module.latest_checkpoint(traindir))
def testRelativePath(self):
# Make sure we have a clean directory to work in.
with self.tempDir() as tempdir:
# Jump to that directory until this test is done.
with self.tempWorkingDir(tempdir):
# Save training snapshots to a relative path.
traindir = "train/"
os.mkdir(traindir)
filename = "snapshot"
filepath = os.path.join(traindir, filename)
with self.test_session() as sess:
# Build a simple graph.
v0 = variables.Variable(0.0)
inc = v0.assign_add(1.0)
save = saver_module.Saver({"v0": v0})
# Record a short training history.
variables.global_variables_initializer().run()
save.save(sess, filepath, global_step=0)
inc.eval()
save.save(sess, filepath, global_step=1)
inc.eval()
save.save(sess, filepath, global_step=2)
with self.test_session() as sess:
# Build a new graph with different initialization.
v0 = variables.Variable(-1.0)
# Create a new saver.
save = saver_module.Saver({"v0": v0})
variables.global_variables_initializer().run()
# Get the most recent checkpoint name from the training history file.
name = saver_module.latest_checkpoint(traindir)
self.assertIsNotNone(name)
# Restore "v0" from that checkpoint.
save.restore(sess, name)
self.assertEqual(v0.eval(), 2.0)
class CheckpointStateTest(test.TestCase):
def _get_test_dir(self, dirname):
test_dir = os.path.join(self.get_temp_dir(), dirname)
gfile.MakeDirs(test_dir)
return test_dir
def testAbsPath(self):
save_dir = self._get_test_dir("abs_paths")
abs_path = os.path.join(save_dir, "model-0")
ckpt = saver_module.generate_checkpoint_state_proto(save_dir, abs_path)
self.assertEqual(ckpt.model_checkpoint_path, abs_path)
self.assertTrue(os.path.isabs(ckpt.model_checkpoint_path))
self.assertEqual(len(ckpt.all_model_checkpoint_paths), 1)
self.assertEqual(ckpt.all_model_checkpoint_paths[-1], abs_path)
def testRelPath(self):
train_dir = "train"
model = os.path.join(train_dir, "model-0")
# model_checkpoint_path should have no "train" directory part.
new_rel_path = "model-0"
ckpt = saver_module.generate_checkpoint_state_proto(train_dir, model)
self.assertEqual(ckpt.model_checkpoint_path, new_rel_path)
self.assertEqual(len(ckpt.all_model_checkpoint_paths), 1)
self.assertEqual(ckpt.all_model_checkpoint_paths[-1], new_rel_path)
def testAllModelCheckpointPaths(self):
save_dir = self._get_test_dir("all_models_test")
abs_path = os.path.join(save_dir, "model-0")
for paths in [None, [], ["model-2"]]:
ckpt = saver_module.generate_checkpoint_state_proto(
save_dir, abs_path, all_model_checkpoint_paths=paths)
self.assertEqual(ckpt.model_checkpoint_path, abs_path)
self.assertTrue(os.path.isabs(ckpt.model_checkpoint_path))
self.assertEqual(
len(ckpt.all_model_checkpoint_paths), len(paths) if paths else 1)
self.assertEqual(ckpt.all_model_checkpoint_paths[-1], abs_path)
def testUpdateCheckpointState(self):
save_dir = self._get_test_dir("update_checkpoint_state")
os.chdir(save_dir)
# Make a temporary train directory.
train_dir = "train"
os.mkdir(train_dir)
abs_path = os.path.join(save_dir, "model-0")
rel_path = os.path.join("train", "model-2")
saver_module.update_checkpoint_state(
train_dir, rel_path, all_model_checkpoint_paths=[abs_path, rel_path])
ckpt = saver_module.get_checkpoint_state(train_dir)
self.assertEqual(ckpt.model_checkpoint_path, rel_path)
self.assertEqual(len(ckpt.all_model_checkpoint_paths), 2)
self.assertEqual(ckpt.all_model_checkpoint_paths[-1], rel_path)
self.assertEqual(ckpt.all_model_checkpoint_paths[0], abs_path)
def testUpdateCheckpointStateSaveRelativePaths(self):
save_dir = self._get_test_dir("update_checkpoint_state")
os.chdir(save_dir)
abs_path2 = os.path.join(save_dir, "model-2")
rel_path2 = "model-2"
abs_path0 = os.path.join(save_dir, "model-0")
rel_path0 = "model-0"
saver_module._update_checkpoint_state( # pylint: disable=protected-access
save_dir=save_dir,
model_checkpoint_path=abs_path2,
all_model_checkpoint_paths=[rel_path0, abs_path2],
save_relative_paths=True)
# File should contain relative paths.
file_content = file_io.read_file_to_string(
os.path.join(save_dir, "checkpoint"))
ckpt = CheckpointState()
text_format.Merge(file_content, ckpt)
self.assertEqual(ckpt.model_checkpoint_path, rel_path2)
self.assertEqual(len(ckpt.all_model_checkpoint_paths), 2)
self.assertEqual(ckpt.all_model_checkpoint_paths[-1], rel_path2)
self.assertEqual(ckpt.all_model_checkpoint_paths[0], rel_path0)
# get_checkpoint_state should return absolute paths.
ckpt = saver_module.get_checkpoint_state(save_dir)
self.assertEqual(ckpt.model_checkpoint_path, abs_path2)
self.assertEqual(len(ckpt.all_model_checkpoint_paths), 2)
self.assertEqual(ckpt.all_model_checkpoint_paths[-1], abs_path2)
self.assertEqual(ckpt.all_model_checkpoint_paths[0], abs_path0)
def testCheckPointStateFailsWhenIncomplete(self):
save_dir = self._get_test_dir("checkpoint_state_fails_when_incomplete")
os.chdir(save_dir)
ckpt_path = os.path.join(save_dir, "checkpoint")
ckpt_file = open(ckpt_path, "w")
ckpt_file.write("")
ckpt_file.close()
with self.assertRaises(ValueError):
saver_module.get_checkpoint_state(save_dir)
def testCheckPointCompletesRelativePaths(self):
save_dir = self._get_test_dir("checkpoint_completes_relative_paths")
os.chdir(save_dir)
ckpt_path = os.path.join(save_dir, "checkpoint")
ckpt_file = open(ckpt_path, "w")
ckpt_file.write("""
model_checkpoint_path: "./model.ckpt-687529"
all_model_checkpoint_paths: "./model.ckpt-687500"
all_model_checkpoint_paths: "./model.ckpt-687529"
""")
ckpt_file.close()
ckpt = saver_module.get_checkpoint_state(save_dir)
self.assertEqual(ckpt.model_checkpoint_path,
os.path.join(save_dir, "./model.ckpt-687529"))
self.assertEqual(ckpt.all_model_checkpoint_paths[0],
os.path.join(save_dir, "./model.ckpt-687500"))
self.assertEqual(ckpt.all_model_checkpoint_paths[1],
os.path.join(save_dir, "./model.ckpt-687529"))
class MetaGraphTest(test.TestCase):
def _get_test_dir(self, dirname):
test_dir = os.path.join(self.get_temp_dir(), dirname)
gfile.MakeDirs(test_dir)
return test_dir
def testAddCollectionDef(self):
test_dir = self._get_test_dir("good_collection")
filename = os.path.join(test_dir, "metafile")
with self.test_session():
# Creates a graph.
v0 = variables.Variable(1.0, name="v0")
control_flow_ops.cond(
math_ops.less(v0, 10), lambda: math_ops.add(v0, 1),
lambda: math_ops.subtract(v0, 1))
control_flow_ops.while_loop(lambda i: math_ops.less(i, 10),
lambda i: math_ops.add(i, 1), [v0])
var = variables.Variable(constant_op.constant(0, dtype=dtypes.int64))
count_up_to = var.count_up_to(3)
input_queue = data_flow_ops.FIFOQueue(
30, dtypes.float32, shared_name="collection_queue")
qr = queue_runner_impl.QueueRunner(input_queue, [count_up_to])
variables.global_variables_initializer()
# Creates a saver.
save = saver_module.Saver({"v0": v0})
# Adds a set of collections.
ops_lib.add_to_collection("int_collection", 3)
ops_lib.add_to_collection("float_collection", 3.5)
ops_lib.add_to_collection("string_collection", "hello")
ops_lib.add_to_collection("variable_collection", v0)
# Add QueueRunners.
queue_runner_impl.add_queue_runner(qr)
# Adds user_defined proto in three formats: string, bytes and Any.
queue_runner = queue_runner_pb2.QueueRunnerDef(queue_name="test_queue")
ops_lib.add_to_collection("user_defined_string_collection",
str(queue_runner))
ops_lib.add_to_collection("user_defined_bytes_collection",
queue_runner.SerializeToString())
any_buf = Any()
any_buf.Pack(queue_runner)
ops_lib.add_to_collection("user_defined_any_collection", any_buf)
# Generates MetaGraphDef.
meta_graph_def = save.export_meta_graph(filename)
self.assertTrue(meta_graph_def.HasField("saver_def"))
self.assertTrue(meta_graph_def.HasField("graph_def"))
self.assertTrue(meta_graph_def.HasField("meta_info_def"))
self.assertNotEqual(meta_graph_def.meta_info_def.tensorflow_version, "")
self.assertNotEqual(meta_graph_def.meta_info_def.tensorflow_git_version,
"")
collection_def = meta_graph_def.collection_def
self.assertEqual(len(collection_def), 12)
with ops_lib.Graph().as_default():
# Restores from MetaGraphDef.
new_saver = saver_module.import_meta_graph(filename)
# Generates a new MetaGraphDef.
new_meta_graph_def = new_saver.export_meta_graph()
# It should be the same as the original.
test_util.assert_meta_graph_protos_equal(
self, meta_graph_def, new_meta_graph_def)
def testAddCollectionDefFails(self):
with self.test_session():
# Creates a graph.
v0 = variables.Variable(10.0, name="v0")
# Creates a saver.
save = saver_module.Saver({"v0": v0})
# Generates MetaGraphDef.
meta_graph_def = meta_graph_pb2.MetaGraphDef()
# Verifies that collection with unsupported key will not be added.
ops_lib.add_to_collection(save, 3)
save._add_collection_def(meta_graph_def, save)
self.assertEqual(len(meta_graph_def.collection_def), 0)
# Verifies that collection where item type does not match expected
# type will not be added.
ops_lib.add_to_collection("int_collection", 3)
ops_lib.add_to_collection("int_collection", 3.5)
save._add_collection_def(meta_graph_def, "int_collection")
self.assertEqual(len(meta_graph_def.collection_def), 0)
def _testMultiSaverCollectionSave(self, test_dir):
filename = os.path.join(test_dir, "metafile")
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
saver1_ckpt = os.path.join(test_dir, "saver1.ckpt")
with self.test_session(graph=ops_lib.Graph()) as sess:
# Creates a graph.
v0 = variables.Variable([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], name="v0")
v1 = variables.Variable(11.0, name="v1")
# Creates 2 savers.
saver0 = saver_module.Saver({"v0": v0}, name="saver0")
saver1 = saver_module.Saver({"v1": v1}, name="saver1")
ops_lib.add_to_collection("savers", saver0)
ops_lib.add_to_collection("savers", saver1)
variables.global_variables_initializer().run()
# Saves to different checkpoints.
saver0.save(sess, saver0_ckpt)
saver1.save(sess, saver1_ckpt)
# Generates MetaGraphDef.
meta_graph_def = saver_module.export_meta_graph(filename)
meta_graph_def0 = saver0.export_meta_graph()
meta_graph_def1 = saver1.export_meta_graph()
# Verifies that there is no saver_def in meta_graph_def.
self.assertFalse(meta_graph_def.HasField("saver_def"))
# Verifies that there is saver_def in meta_graph_def0 and 1.
self.assertTrue(meta_graph_def0.HasField("saver_def"))
self.assertTrue(meta_graph_def1.HasField("saver_def"))
# Verifies SAVERS is saved as bytes_list for meta_graph_def.
collection_def = meta_graph_def.collection_def["savers"]
kind = collection_def.WhichOneof("kind")
self.assertEqual(kind, "bytes_list")
# Verifies that there are 2 entries in SAVERS collection.
savers = getattr(collection_def, kind)
self.assertEqual(2, len(savers.value))
# Verifies SAVERS collection is saved as bytes_list for meta_graph_def0.
collection_def = meta_graph_def0.collection_def["savers"]
kind = collection_def.WhichOneof("kind")
self.assertEqual(kind, "bytes_list")
# Verifies that there are 2 entries in SAVERS collection.
savers = getattr(collection_def, kind)
self.assertEqual(2, len(savers.value))
def _testMultiSaverCollectionRestore(self, test_dir):
filename = os.path.join(test_dir, "metafile")
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
saver1_ckpt = os.path.join(test_dir, "saver1.ckpt")
with self.test_session(graph=ops_lib.Graph()) as sess:
# Imports from meta_graph.
saver_module.import_meta_graph(filename)
# Retrieves SAVERS collection. Verifies there are 2 entries.
savers = ops_lib.get_collection("savers")
self.assertEqual(2, len(savers))
# Retrieves saver0. Verifies that new_saver0 can restore v0, but not v1.
new_saver0 = savers[0]
new_saver0.restore(sess, saver0_ckpt)
v0 = sess.graph.get_tensor_by_name("v0:0")
v1 = sess.graph.get_tensor_by_name("v1:0")
self.assertAllEqual([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], v0.eval())
self.assertEqual([3, 2], v0.get_shape())
self.assertEqual([], v1.get_shape())
with self.assertRaisesWithPredicateMatch(
errors_impl.OpError, lambda e: "uninitialized value v1" in e.message):
sess.run(v1)
# Retrieves saver1. Verifies that new_saver1 can restore v1.
new_saver1 = savers[1]
new_saver1.restore(sess, saver1_ckpt)
v1 = sess.graph.get_tensor_by_name("v1:0")
self.assertEqual(11.0, v1.eval())
def testMultiSaverCollection(self):
test_dir = self._get_test_dir("saver_collection")
self._testMultiSaverCollectionSave(test_dir)
self._testMultiSaverCollectionRestore(test_dir)
def testClearExtraneousSavers(self):
test_dir = self._get_test_dir("clear_extraneous_savers")
filename = os.path.join(test_dir, "metafile")
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
saver1_ckpt = os.path.join(test_dir, "saver1.ckpt")
with self.test_session(graph=ops_lib.Graph()) as sess:
# Creates a graph.
v0 = variables.Variable([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], name="v0")
v1 = variables.Variable(11.0, name="v1")
# Creates 2 savers.
saver0 = saver_module.Saver({"v0": v0}, name="saver0")
saver1 = saver_module.Saver({"v1": v1}, name="saver1")
ops_lib.add_to_collection("savers", saver0)
ops_lib.add_to_collection("savers", saver1)
variables.global_variables_initializer().run()
# Saves to different checkpoints.
saver0.save(sess, saver0_ckpt)
saver1.save(sess, saver1_ckpt)
# Generates MetaGraphDef.
meta_graph_def = saver_module.export_meta_graph(filename)
meta_graph_def0 = saver0.export_meta_graph()
meta_graph_def1 = saver1.export_meta_graph(clear_extraneous_savers=True)
# Verifies that there is no saver_def in meta_graph_def.
self.assertFalse(meta_graph_def.HasField("saver_def"))
# Verifies that there is saver_def in meta_graph_def0 and 1.
self.assertTrue(meta_graph_def0.HasField("saver_def"))
self.assertTrue(meta_graph_def1.HasField("saver_def"))
# Verifies SAVERS is saved as bytes_list for meta_graph_def.
collection_def = meta_graph_def.collection_def["savers"]
kind = collection_def.WhichOneof("kind")
self.assertEqual(kind, "bytes_list")
# Verifies that there are 2 entries in SAVERS collection.
savers = getattr(collection_def, kind)
self.assertEqual(2, len(savers.value))
# Verifies SAVERS collection is saved as bytes_list for meta_graph_def1.
collection_def = meta_graph_def1.collection_def["savers"]
kind = collection_def.WhichOneof("kind")
self.assertEqual(kind, "bytes_list")
# Verifies that there is 1 entry in SAVERS collection.
savers = getattr(collection_def, kind)
self.assertEqual(1, len(savers.value))
# Verifies that saver0 graph nodes are omitted from the saver1 export
self.assertEqual(29, len(meta_graph_def0.graph_def.node))
self.assertEqual(19, len(meta_graph_def1.graph_def.node))
def testBinaryAndTextFormat(self):
test_dir = self._get_test_dir("binary_and_text")
filename = os.path.join(test_dir, "metafile")
with self.test_session(graph=ops_lib.Graph()):
# Creates a graph.
variables.Variable(10.0, name="v0")
# Exports the graph as binary format.
saver_module.export_meta_graph(filename, as_text=False)
with self.test_session(graph=ops_lib.Graph()):
# Imports the binary format graph.
saver = saver_module.import_meta_graph(filename)
self.assertIsNotNone(saver)
# Exports the graph as text format.
saver.export_meta_graph(filename, as_text=True)
with self.test_session(graph=ops_lib.Graph()):
# Imports the text format graph.
saver_module.import_meta_graph(filename)
# Writes wrong contents to the file.
graph_io.write_graph(saver.as_saver_def(),
os.path.dirname(filename),
os.path.basename(filename))
with self.test_session(graph=ops_lib.Graph()):
# Import should fail.
with self.assertRaisesWithPredicateMatch(IOError,
lambda e: "Cannot parse file"):
saver_module.import_meta_graph(filename)
# Deletes the file
gfile.Remove(filename)
with self.assertRaisesWithPredicateMatch(IOError,
lambda e: "does not exist"):
saver_module.import_meta_graph(filename)
def testSliceVariable(self):
test_dir = self._get_test_dir("slice_saver")
filename = os.path.join(test_dir, "metafile")
with self.test_session():
v1 = variables.Variable([20.0], name="v1")
v2 = variables.Variable([20.0], name="v2")
v2._set_save_slice_info(
variables.Variable.SaveSliceInfo("v1", [1], [0], [1]))
# The names are different and will work.
slice_saver = saver_module.Saver({"first": v1, "second": v2})
variables.global_variables_initializer().run()
# Exports to meta_graph
meta_graph_def = slice_saver.export_meta_graph(filename)
with ops_lib.Graph().as_default():
# Restores from MetaGraphDef.
new_saver = saver_module.import_meta_graph(filename)
self.assertIsNotNone(new_saver)
# Generates a new MetaGraphDef.
new_meta_graph_def = new_saver.export_meta_graph()
# It should be the same as the original.
test_util.assert_meta_graph_protos_equal(self, meta_graph_def,
new_meta_graph_def)
def _testGraphExtensionSave(self, test_dir):
filename = os.path.join(test_dir, "metafile")
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
# Creates an inference graph.
# Hidden 1
images = constant_op.constant(1.2, dtypes.float32, shape=[100, 28])
with ops_lib.name_scope("hidden1"):
weights = variables.Variable(
random_ops.truncated_normal(
[28, 128], stddev=1.0 / math.sqrt(float(28))),
name="weights")
# The use of control_flow_ops.cond here is purely for adding test coverage
# the save and restore of control flow context (which doesn't make any
# sense here from a machine learning perspective). The typical biases is
# a simple Variable without the conditions.
biases = variables.Variable(
control_flow_ops.cond(
math_ops.less(random.random(), 0.5),
lambda: array_ops.ones([128]), lambda: array_ops.zeros([128])),
name="biases")
hidden1 = nn_ops.relu(math_ops.matmul(images, weights) + biases)
# Hidden 2
with ops_lib.name_scope("hidden2"):
weights = variables.Variable(
random_ops.truncated_normal(
[128, 32], stddev=1.0 / math.sqrt(float(128))),
name="weights")
# The use of control_flow_ops.while_loop here is purely for adding test
# coverage the save and restore of control flow context (which doesn't
# make any sense here from a machine learning perspective). The typical
# biases is a simple Variable without the conditions.
def loop_cond(it, _):
return it < 2
def loop_body(it, biases):
biases += constant_op.constant(0.1, shape=[32])
return it + 1, biases
_, biases = control_flow_ops.while_loop(
loop_cond, loop_body,
[constant_op.constant(0), variables.Variable(array_ops.zeros([32]))])
hidden2 = nn_ops.relu(math_ops.matmul(hidden1, weights) + biases)
# Linear
with ops_lib.name_scope("softmax_linear"):
weights = variables.Variable(
random_ops.truncated_normal(
[32, 10], stddev=1.0 / math.sqrt(float(32))),
name="weights")
biases = variables.Variable(array_ops.zeros([10]), name="biases")
logits = math_ops.matmul(hidden2, weights) + biases
ops_lib.add_to_collection("logits", logits)
init_all_op = variables.global_variables_initializer()
with self.test_session() as sess:
# Initializes all the variables.
sess.run(init_all_op)
# Runs to logit.
sess.run(logits)
# Creates a saver.
saver0 = saver_module.Saver()
saver0.save(sess, saver0_ckpt)
# Generates MetaGraphDef.
saver0.export_meta_graph(filename)
def _testGraphExtensionRestore(self, test_dir):
filename = os.path.join(test_dir, "metafile")
train_filename = os.path.join(test_dir, "train_metafile")
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
with self.test_session(graph=ops_lib.Graph()) as sess:
# Restores from MetaGraphDef.
new_saver = saver_module.import_meta_graph(filename)
# Generates a new MetaGraphDef.
new_saver.export_meta_graph()
# Restores from checkpoint.
new_saver.restore(sess, saver0_ckpt)
# Adds loss and train.
labels = constant_op.constant(0, dtypes.int32, shape=[100], name="labels")
batch_size = array_ops.size(labels)
labels = array_ops.expand_dims(labels, 1)
indices = array_ops.expand_dims(math_ops.range(0, batch_size), 1)
concated = array_ops.concat([indices, labels], 1)
onehot_labels = sparse_ops.sparse_to_dense(
concated, array_ops.stack([batch_size, 10]), 1.0, 0.0)
logits = ops_lib.get_collection("logits")[0]
cross_entropy = nn_ops.softmax_cross_entropy_with_logits(
labels=onehot_labels, logits=logits, name="xentropy")
loss = math_ops.reduce_mean(cross_entropy, name="xentropy_mean")
summary.scalar("loss", loss)
# Creates the gradient descent optimizer with the given learning rate.
optimizer = gradient_descent.GradientDescentOptimizer(0.01)
# Runs train_op.
train_op = optimizer.minimize(loss)
ops_lib.add_to_collection("train_op", train_op)
# Runs train_op.
sess.run(train_op)
# Generates MetaGraphDef.
saver_module.export_meta_graph(train_filename)
def _testRestoreFromTrainGraphWithControlContext(self, test_dir):
train_filename = os.path.join(test_dir, "train_metafile")
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
with self.test_session(graph=ops_lib.Graph()) as sess:
# Restores from MetaGraphDef.
new_saver = saver_module.import_meta_graph(train_filename)
# Restores from checkpoint.
new_saver.restore(sess, saver0_ckpt)
train_op = ops_lib.get_collection("train_op")[0]
sess.run(train_op)
def testGraphExtension(self):
test_dir = self._get_test_dir("graph_extension")
self._testGraphExtensionSave(test_dir)
self._testGraphExtensionRestore(test_dir)
self._testRestoreFromTrainGraphWithControlContext(test_dir)
def _testGradientSerDes(self, graph_fn):
"""Tests that gradients can be computed after exporting and importing.
Builds a graph, exports it, and verifies that it can be imported and the
gradient can be built and run correctly.
Args:
graph_fn: takes a single float Tensor argument as input, outputs a single
Tensor
"""
test_dir = self._get_test_dir("nested_control_flow")
filename = os.path.join(test_dir, "metafile")
saver_ckpt = os.path.join(test_dir, "saver.ckpt")
# Create while loop using `outer_body_fn`.
with ops_lib.Graph().as_default():
var = variables.Variable(0.0)
var_name = var.name
output = graph_fn(var)
output_name = output.name
init_op = variables.global_variables_initializer()
# Generate a MetaGraphDef containing the while loop.
with session.Session() as sess:
sess.run(init_op)
sess.run(output)
saver = saver_module.Saver()
saver.save(sess, saver_ckpt)
saver.export_meta_graph(filename)
# Build and run the gradients of the while loop. We use this below to
# verify that the gradients are correct with an imported MetaGraphDef.
grad = gradients_impl.gradients([output], [var])
# Turn off constant folding to avoid breaking testNestedControlFlowSerDes.
# It appears that a missing control dependency in the gradient graph
# causes the fetch node to not be triggered.
no_constfold_config = config_pb2.ConfigProto()
no_constfold_config.graph_options.rewrite_options.constant_folding = (
rewriter_config_pb2.RewriterConfig.OFF)
with session.Session(config=no_constfold_config) as sess:
sess.run(init_op)
expected_grad_value = sess.run(grad)
# Restore the MetaGraphDef into a new Graph.
with ops_lib.Graph().as_default():
with session.Session() as sess:
saver = saver_module.import_meta_graph(filename)
saver.restore(sess, saver_ckpt)
# Make sure we can still build gradients and get the same result.
var = ops_lib.get_default_graph().get_tensor_by_name(var_name)
output = ops_lib.get_default_graph().get_tensor_by_name(output_name)
grad = gradients_impl.gradients([output], [var])
init_op = variables.global_variables_initializer()
with session.Session(config=no_constfold_config) as sess:
sess.run(init_op)
actual_grad_value = sess.run(grad)
self.assertEqual(expected_grad_value, actual_grad_value)
def _testWhileLoopAndGradientSerDes(self, outer_body_fn):
# Build a while loop with `outer_body_fn`, export it, and verify that it can
# be imported and the gradient can be built and run correctly.
# pylint: disable=g-long-lambda
return self._testGradientSerDes(
lambda x: control_flow_ops.while_loop(
lambda i, y: i < 5, outer_body_fn, [0, x])[1])
# pylint: enable=g-long-lambda
def testNestedWhileLoopsSerDes(self):
# Test two simple nested while loops.
def body(i, x):
_, r = control_flow_ops.while_loop(lambda j, y: j < 3,
lambda j, y: (j + 1, y + x),
[0, 0.0])
return i + 1, x + r
self._testWhileLoopAndGradientSerDes(body)
def testNestedControlFlowSerDes(self):
# Test while loop in a cond in a while loop.
# pylint: disable=g-long-lambda
def body(i, x):
cond_result = control_flow_ops.cond(
i > 0,
lambda: control_flow_ops.while_loop(
lambda j, y: j < 3,
lambda j, y: (j + 1, y + x),
[0, 0.0])[1],
lambda: x)
return i + 1, cond_result
# pylint: enable=g-long-lambda
self._testWhileLoopAndGradientSerDes(body)
def testNestedCondsSerDes(self):
# Test conds in a cond.
# pylint: disable=g-long-lambda
self._testGradientSerDes(lambda x: control_flow_ops.cond(
x > 0,
lambda: control_flow_ops.cond(x > 3,
lambda: array_ops.identity(x),
lambda: math_ops.multiply(x, 2.0)),
lambda: control_flow_ops.cond(x < -3,
lambda: constant_op.constant(1.0),
lambda: math_ops.multiply(x, -1.0))))
# pylint: enable=g-long-lambda
def testStrippedOpListDef(self):
with self.test_session():
# Creates a graph.
v0 = variables.Variable(0.0)
var = variables.Variable(10.0)
math_ops.add(v0, var)
@function.Defun(dtypes.float32)
def minus_one(x):
return x - 1
minus_one(array_ops.identity(v0))
save = saver_module.Saver({"v0": v0})
variables.global_variables_initializer()
# Generates MetaGraphDef.
meta_graph_def = save.export_meta_graph()
ops = [o.name for o in meta_graph_def.meta_info_def.stripped_op_list.op]
if save._write_version is saver_pb2.SaverDef.V1:
self.assertEqual(ops, [
"Add", "Assign", "Const", "Identity", "NoOp", "RestoreV2",
"SaveSlices", "Sub", "VariableV2"
])
else:
self.assertEqual(ops, [
"Add", "Assign", "Const", "Identity", "NoOp", "RestoreV2", "SaveV2",
"Sub", "VariableV2"
])
# Test calling stripped_op_list_for_graph directly
op_list = meta_graph.stripped_op_list_for_graph(meta_graph_def.graph_def)
self.assertEqual(ops, [o.name for o in op_list.op])
for o in op_list.op:
self.assertEqual(o.summary, "")
self.assertEqual(o.description, "")
def testStripDefaultValuedAttrs(self):
"""Verifies that default valued attrs are stripped, unless disabled."""
# With strip_default_attrs enabled, attributes "T" (float32) and "Tout"
# (complex64) in the "Complex" op must be removed.
with self.test_session():
real_num = variables.Variable(1.0, dtype=dtypes.float32, name="real")
imag_num = variables.Variable(2.0, dtype=dtypes.float32, name="imag")
math_ops.complex(real_num, imag_num, name="complex")
save = saver_module.Saver({"real_num": real_num, "imag_num": imag_num})
variables.global_variables_initializer()
meta_graph_def = save.export_meta_graph(strip_default_attrs=True)
node_def = test_util.get_node_def_from_graph("complex",
meta_graph_def.graph_def)
self.assertNotIn("T", node_def.attr)
self.assertNotIn("Tout", node_def.attr)
# With strip_default_attrs disabled, attributes "T" (float32) and "Tout"
# (complex64) in the "Complex" op must *not* be removed, even if they map
# to their defaults.
with self.test_session(graph=ops_lib.Graph()):
real_num = variables.Variable(1.0, dtype=dtypes.float32, name="real")
imag_num = variables.Variable(2.0, dtype=dtypes.float32, name="imag")
math_ops.complex(real_num, imag_num, name="complex")
save = saver_module.Saver({"real_num": real_num, "imag_num": imag_num})
variables.global_variables_initializer()
meta_graph_def = save.export_meta_graph(strip_default_attrs=False)
node_def = test_util.get_node_def_from_graph("complex",
meta_graph_def.graph_def)
self.assertIn("T", node_def.attr)
self.assertIn("Tout", node_def.attr)
def testImportIntoNamescope(self):
# Test that we can import a meta graph into a namescope.
test_dir = self._get_test_dir("import_into_namescope")
filename = os.path.join(test_dir, "ckpt")
image = array_ops.placeholder(dtypes.float32, [None, 784], name="image")
label = array_ops.placeholder(dtypes.float32, [None, 10], name="label")
with session.Session() as sess:
weights = variables.Variable(
random_ops.random_uniform([784, 10]), name="weights")
bias = variables.Variable(array_ops.zeros([10]), name="bias")
logit = nn_ops.relu(math_ops.matmul(image, weights) + bias, name="logits")
nn_ops.softmax(logit, name="prediction")
cost = nn_ops.softmax_cross_entropy_with_logits(labels=label,
logits=logit, name="cost")
adam.AdamOptimizer().minimize(cost, name="optimize")
saver = saver_module.Saver()
sess.run(variables.global_variables_initializer())
saver.save(sess, filename)
graph = ops_lib.Graph()
with session.Session(graph=graph) as sess:
new_saver = saver_module.import_meta_graph(
filename + ".meta", graph=graph, import_scope="new_model")
new_saver.restore(sess, filename)
sess.run(["new_model/optimize"], {
"new_model/image:0": np.random.random([1, 784]),
"new_model/label:0": np.random.randint(
10, size=[1, 10])
})
def testImportIntoImplicitNamescope(self):
# Test that we can import a meta graph into an implicit namescope.
test_dir = self._get_test_dir("import_into_namescope")
filename = os.path.join(test_dir, "ckpt")
image = array_ops.placeholder(dtypes.float32, [None, 784], name="image")
label = array_ops.placeholder(dtypes.float32, [None, 10], name="label")
with session.Session() as sess:
weights = variables.Variable(
random_ops.random_uniform([784, 10]), name="weights")
bias = variables.Variable(array_ops.zeros([10]), name="bias")
logit = nn_ops.relu(math_ops.matmul(image, weights) + bias, name="logits")
nn_ops.softmax(logit, name="prediction")
cost = nn_ops.softmax_cross_entropy_with_logits(labels=label,
logits=logit, name="cost")
adam.AdamOptimizer().minimize(cost, name="optimize")
saver = saver_module.Saver()
sess.run(variables.global_variables_initializer())
saver.save(sess, filename)
graph = ops_lib.Graph()
with session.Session(graph=graph) as sess:
with ops_lib.name_scope("new_model"):
new_saver = saver_module.import_meta_graph(
filename + ".meta", graph=graph)
new_saver.restore(sess, filename)
sess.run(["new_model/optimize"], {
"new_model/image:0": np.random.random([1, 784]),
"new_model/label:0": np.random.randint(
10, size=[1, 10])
})
def testClearDevicesOnImport(self):
# Test that we import a graph without its devices and run successfully.
with ops_lib.Graph().as_default():
with ops_lib.device("/job:ps/replica:0/task:0/device:GPU:0"):
image = array_ops.placeholder(dtypes.float32, [None, 784], name="image")
label = array_ops.placeholder(dtypes.float32, [None, 10], name="label")
weights = variables.Variable(
random_ops.random_uniform([784, 10]), name="weights")
bias = variables.Variable(array_ops.zeros([10]), name="bias")
logit = nn_ops.relu(math_ops.matmul(image, weights) + bias)
nn_ops.softmax(logit, name="prediction")
cost = nn_ops.softmax_cross_entropy_with_logits(labels=label,
logits=logit)
adam.AdamOptimizer().minimize(cost, name="optimize")
meta_graph_def = saver_module.export_meta_graph()
with session.Session(graph=ops_lib.Graph()) as sess:
saver_module.import_meta_graph(
meta_graph_def, clear_devices=False, import_scope="new_model")
# Device refers to GPU, which is not available here.
with self.assertRaises(errors_impl.InvalidArgumentError):
sess.run(variables.global_variables_initializer())
with session.Session(graph=ops_lib.Graph()) as sess:
saver_module.import_meta_graph(
meta_graph_def, clear_devices=True, import_scope="new_model")
sess.run(variables.global_variables_initializer())
sess.run(["new_model/optimize"], {
"new_model/image:0": np.random.random([1, 784]),
"new_model/label:0": np.random.randint(
10, size=[1, 10])
})
def testClearDevicesOnExport(self):
# Test that we export a graph without its devices and run successfully.
with ops_lib.Graph().as_default():
with ops_lib.device("/job:ps/replica:0/task:0/device:GPU:0"):
image = array_ops.placeholder(dtypes.float32, [None, 784], name="image")
label = array_ops.placeholder(dtypes.float32, [None, 10], name="label")
weights = variables.Variable(
random_ops.random_uniform([784, 10]), name="weights")
bias = variables.Variable(array_ops.zeros([10]), name="bias")
logit = nn_ops.relu(math_ops.matmul(image, weights) + bias)
nn_ops.softmax(logit, name="prediction")
cost = nn_ops.softmax_cross_entropy_with_logits(labels=label,
logits=logit)
adam.AdamOptimizer().minimize(cost, name="optimize")
meta_graph_def = saver_module.export_meta_graph(clear_devices=True)
graph_io.write_graph(meta_graph_def, self.get_temp_dir(),
"meta_graph.pbtxt")
with session.Session(graph=ops_lib.Graph()) as sess:
saver_module.import_meta_graph(meta_graph_def, import_scope="new_model")
sess.run(variables.global_variables_initializer())
sess.run(["new_model/optimize"], {
"new_model/image:0": np.random.random([1, 784]),
"new_model/label:0": np.random.randint(
10, size=[1, 10])
})
def testPreserveDatasetAndFunctions(self):
with ops_lib.Graph().as_default() as g:
dataset = dataset_ops.Dataset.range(10).map(lambda x: x * x)
iterator = dataset.make_one_shot_iterator()
next_element = iterator.get_next()
_ = array_ops.identity(next_element, name="output")
# Generate three MetaGraphDef protos using different code paths.
meta_graph_def_simple = saver_module.export_meta_graph()
meta_graph_def_devices_cleared = saver_module.export_meta_graph(
clear_devices=True)
meta_graph_def_from_graph_def = saver_module.export_meta_graph(
clear_devices=True, graph_def=g.as_graph_def())
for meta_graph_def in [meta_graph_def_simple,
meta_graph_def_devices_cleared,
meta_graph_def_from_graph_def]:
with session.Session(graph=ops_lib.Graph()) as sess:
saver_module.import_meta_graph(meta_graph_def, import_scope="new_model")
sess.run(variables.global_variables_initializer())
for i in range(10):
self.assertEqual(i * i, sess.run("new_model/output:0"))
with self.assertRaises(errors.OutOfRangeError):
sess.run("new_model/output:0")
class CheckpointReaderTest(test.TestCase):
_WRITE_VERSION = saver_pb2.SaverDef.V1
def testDebugString(self):
# Builds a graph.
v0 = variables.Variable(
[[1, 2, 3], [4, 5, 6]], dtype=dtypes.float32, name="v0")
v1 = variables.Variable(
[[[1], [2]], [[3], [4]], [[5], [6]]], dtype=dtypes.float32, name="v1")
init_all_op = variables.global_variables_initializer()
save = saver_module.Saver(
{
"v0": v0,
"v1": v1
}, write_version=self._WRITE_VERSION)
save_path = os.path.join(self.get_temp_dir(),
"ckpt_for_debug_string" + str(self._WRITE_VERSION))
with self.test_session() as sess:
sess.run(init_all_op)
# Saves a checkpoint.
save.save(sess, save_path)
# Creates a reader.
reader = pywrap_tensorflow.NewCheckpointReader(save_path)
# Verifies that the tensors exist.
self.assertTrue(reader.has_tensor("v0"))
self.assertTrue(reader.has_tensor("v1"))
debug_string = reader.debug_string()
# Verifies that debug string contains the right strings.
self.assertTrue(compat.as_bytes("v0 (DT_FLOAT) [2,3]") in debug_string)
self.assertTrue(compat.as_bytes("v1 (DT_FLOAT) [3,2,1]") in debug_string)
# Verifies get_variable_to_shape_map() returns the correct information.
var_map = reader.get_variable_to_shape_map()
self.assertEqual([2, 3], var_map["v0"])
self.assertEqual([3, 2, 1], var_map["v1"])
# Verifies get_tensor() returns the tensor value.
v0_tensor = reader.get_tensor("v0")
v1_tensor = reader.get_tensor("v1")
self.assertAllEqual(v0.eval(), v0_tensor)
self.assertAllEqual(v1.eval(), v1_tensor)
# Verifies get_tensor() fails for non-existent tensors.
with self.assertRaisesRegexp(errors.NotFoundError,
"v3 not found in checkpoint"):
reader.get_tensor("v3")
def testNonexistentPath(self):
with self.assertRaisesRegexp(errors.NotFoundError,
"Unsuccessful TensorSliceReader"):
pywrap_tensorflow.NewCheckpointReader("non-existent")
class CheckpointReaderForV2Test(CheckpointReaderTest):
_WRITE_VERSION = saver_pb2.SaverDef.V2
class WriteGraphTest(test.TestCase):
def _get_test_dir(self, dirname):
test_dir = os.path.join(self.get_temp_dir(), dirname)
gfile.MakeDirs(test_dir)
return test_dir
def testWriteGraph(self):
test_dir = self._get_test_dir("write_graph_dir")
variables.Variable([[1, 2, 3], [4, 5, 6]], dtype=dtypes.float32, name="v0")
path = graph_io.write_graph(ops_lib.get_default_graph(),
os.path.join(test_dir, "l1"), "graph.pbtxt")
truth = os.path.join(test_dir, "l1", "graph.pbtxt")
self.assertEqual(path, truth)
self.assertTrue(os.path.exists(path))
def testRecursiveCreate(self):
test_dir = self._get_test_dir("deep_dir")
variables.Variable([[1, 2, 3], [4, 5, 6]], dtype=dtypes.float32, name="v0")
path = graph_io.write_graph(ops_lib.get_default_graph().as_graph_def(),
os.path.join(test_dir, "l1", "l2", "l3"),
"graph.pbtxt")
truth = os.path.join(test_dir, "l1", "l2", "l3", "graph.pbtxt")
self.assertEqual(path, truth)
self.assertTrue(os.path.exists(path))
class SaverUtilsTest(test.TestCase):
def setUp(self):
self._base_dir = os.path.join(self.get_temp_dir(), "saver_utils_test")
gfile.MakeDirs(self._base_dir)
def tearDown(self):
gfile.DeleteRecursively(self._base_dir)
def testCheckpointExists(self):
for sharded in (False, True):
for version in (saver_pb2.SaverDef.V2, saver_pb2.SaverDef.V1):
with self.test_session(graph=ops_lib.Graph()) as sess:
unused_v = variables.Variable(1.0, name="v")
variables.global_variables_initializer().run()
saver = saver_module.Saver(sharded=sharded, write_version=version)
path = os.path.join(self._base_dir, "%s-%s" % (sharded, version))
self.assertFalse(
saver_module.checkpoint_exists(path)) # Not saved yet.
ckpt_prefix = saver.save(sess, path)
self.assertTrue(saver_module.checkpoint_exists(ckpt_prefix))
ckpt_prefix = saver_module.latest_checkpoint(self._base_dir)
self.assertTrue(saver_module.checkpoint_exists(ckpt_prefix))
def testGetCheckpointMtimes(self):
prefixes = []
for version in (saver_pb2.SaverDef.V2, saver_pb2.SaverDef.V1):
with self.test_session(graph=ops_lib.Graph()) as sess:
unused_v = variables.Variable(1.0, name="v")
variables.global_variables_initializer().run()
saver = saver_module.Saver(write_version=version)
prefixes.append(
saver.save(sess, os.path.join(self._base_dir, str(version))))
mtimes = saver_module.get_checkpoint_mtimes(prefixes)
self.assertEqual(2, len(mtimes))
self.assertTrue(mtimes[1] >= mtimes[0])
class ScopedGraphTest(test.TestCase):
def _get_test_dir(self, dirname):
test_dir = os.path.join(self.get_temp_dir(), dirname)
gfile.MakeDirs(test_dir)
return test_dir
def _testScopedSave(self, test_dir, exported_filename, ckpt_filename):
graph = ops_lib.Graph()
with graph.as_default():
# Creates an inference graph.
# Hidden 1
images = constant_op.constant(
1.2, dtypes.float32, shape=[100, 28], name="images")
with ops_lib.name_scope("hidden1"):
weights1 = variables.Variable(
random_ops.truncated_normal(
[28, 128], stddev=1.0 / math.sqrt(float(28))),
name="weights")
# The use of control_flow_ops.cond here is purely for adding test
# coverage the save and restore of control flow context (which doesn't
# make any sense here from a machine learning perspective). The typical
# biases is a simple Variable without the conditions.
biases1 = variables.Variable(
control_flow_ops.cond(
math_ops.less(random.random(), 0.5),
lambda: array_ops.ones([128]), lambda: array_ops.zeros([128])),
name="biases")
hidden1 = nn_ops.relu(math_ops.matmul(images, weights1) + biases1)
# Hidden 2
with ops_lib.name_scope("hidden2"):
weights2 = variables.Variable(
random_ops.truncated_normal(
[128, 32], stddev=1.0 / math.sqrt(float(128))),
name="weights")
# The use of control_flow_ops.while_loop here is purely for adding test
# coverage the save and restore of control flow context (which doesn't
# make any sense here from a machine learning perspective). The typical
# biases is a simple Variable without the conditions.
def loop_cond(it, _):
return it < 2
def loop_body(it, biases2):
biases2 += constant_op.constant(0.1, shape=[32])
return it + 1, biases2
_, biases2 = control_flow_ops.while_loop(loop_cond, loop_body, [
constant_op.constant(0), variables.Variable(array_ops.zeros([32]))
])
hidden2 = nn_ops.relu(math_ops.matmul(hidden1, weights2) + biases2)
# Linear
with ops_lib.name_scope("softmax_linear"):
weights3 = variables.Variable(
random_ops.truncated_normal(
[32, 10], stddev=1.0 / math.sqrt(float(32))),
name="weights")
biases3 = variables.Variable(array_ops.zeros([10]), name="biases")
logits = math_ops.matmul(hidden2, weights3) + biases3
ops_lib.add_to_collection("logits", logits)
# Adds user_defined proto in three formats: string, bytes and Any.
# Any proto should just pass through.
queue_runner = queue_runner_pb2.QueueRunnerDef(queue_name="test_queue")
ops_lib.add_to_collection("user_defined_string_collection",
str(queue_runner))
ops_lib.add_to_collection("user_defined_bytes_collection",
queue_runner.SerializeToString())
any_buf = Any()
any_buf.Pack(queue_runner)
ops_lib.add_to_collection("user_defined_any_collection", any_buf)
_, var_list = meta_graph.export_scoped_meta_graph(
filename=os.path.join(test_dir, exported_filename),
graph=ops_lib.get_default_graph(),
export_scope="hidden1")
self.assertEqual(["biases:0", "weights:0"], sorted(var_list.keys()))
with self.test_session(graph=graph) as sess:
sess.run(variables.global_variables_initializer())
saver = saver_module.Saver(var_list=var_list, max_to_keep=1)
saver.save(sess, os.path.join(test_dir, ckpt_filename), write_state=False)
def _testScopedRestore(self, test_dir, exported_filename,
new_exported_filename, ckpt_filename):
graph = ops_lib.Graph()
# Create all the missing inputs.
with graph.as_default():
new_image = constant_op.constant(
1.2, dtypes.float32, shape=[100, 28], name="images")
var_list = meta_graph.import_scoped_meta_graph(
os.path.join(test_dir, exported_filename),
graph=graph,
input_map={"$unbound_inputs_images": new_image},
import_scope="new_hidden1")
self.assertEqual(["biases:0", "weights:0"], sorted(var_list.keys()))
hidden1 = graph.as_graph_element("new_hidden1/Relu:0")
weights1 = graph.as_graph_element("new_hidden1/weights:0")
biases1 = graph.as_graph_element("new_hidden1/biases:0")
with graph.as_default():
# Hidden 2
with ops_lib.name_scope("hidden2"):
weights = variables.Variable(
random_ops.truncated_normal(
[128, 32], stddev=1.0 / math.sqrt(float(128))),
name="weights")
# The use of control_flow_ops.while_loop here is purely for adding test
# coverage the save and restore of control flow context (which doesn't
# make any sense here from a machine learning perspective). The typical
# biases is a simple Variable without the conditions.
def loop_cond(it, _):
return it < 2
def loop_body(it, biases):
biases += constant_op.constant(0.1, shape=[32])
return it + 1, biases
_, biases = control_flow_ops.while_loop(loop_cond, loop_body, [
constant_op.constant(0), variables.Variable(array_ops.zeros([32]))
])
hidden2 = nn_ops.relu(math_ops.matmul(hidden1, weights) + biases)
# Linear
with ops_lib.name_scope("softmax_linear"):
weights = variables.Variable(
random_ops.truncated_normal(
[32, 10], stddev=1.0 / math.sqrt(float(32))),
name="weights")
biases = variables.Variable(array_ops.zeros([10]), name="biases")
logits = math_ops.matmul(hidden2, weights) + biases
ops_lib.add_to_collection("logits", logits)
# The rest of the variables.
rest_variables = list(
set(variables.global_variables()) - set(var_list.keys()))
init_rest_op = variables.variables_initializer(rest_variables)
with self.test_session(graph=graph) as sess:
saver = saver_module.Saver(var_list=var_list, max_to_keep=1)
saver.restore(sess, os.path.join(test_dir, ckpt_filename))
# Verify that we have restored weights1 and biases1.
sess.run([weights1, biases1])
# Initialize the rest of the variables and run logits.
sess.run(init_rest_op)
sess.run(logits)
# Verifies that we can save the subgraph under "hidden1" and restore it
# into "new_hidden1" in the new graph.
def testScopedSaveAndRestore(self):
test_dir = self._get_test_dir("scoped_export_import")
ckpt_filename = "ckpt"
self._testScopedSave(test_dir, "exported_hidden1.pbtxt", ckpt_filename)
self._testScopedRestore(test_dir, "exported_hidden1.pbtxt",
"exported_new_hidden1.pbtxt", ckpt_filename)
# Verifies that we can copy the subgraph under "hidden1" and copy it
# to different name scope in the same graph or different graph.
def testCopyScopedGraph(self):
test_dir = self._get_test_dir("scoped_copy")
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
graph1 = ops_lib.Graph()
with graph1.as_default():
with ops_lib.name_scope("hidden1"):
images = constant_op.constant(
1.0, dtypes.float32, shape=[3, 2], name="images")
weights1 = variables.Variable(
[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], name="weights")
biases1 = variables.Variable([0.1] * 3, name="biases")
nn_ops.relu(math_ops.matmul(images, weights1) + biases1, name="relu")
# Run the graph and save scoped checkpoint.
with self.test_session(graph=graph1) as sess:
sess.run(variables.global_variables_initializer())
_, var_list_1 = meta_graph.export_scoped_meta_graph(
export_scope="hidden1")
saver = saver_module.Saver(var_list=var_list_1, max_to_keep=1)
saver.save(sess, saver0_ckpt, write_state=False)
expected = np.reshape([[5.0999999, 7.0999999, 9.10000038] * 3], (3, 3))
# Verifies copy to the same graph with the same name fails.
with graph1.as_default():
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "need to be different" in str(e)):
meta_graph.copy_scoped_meta_graph(
from_scope="hidden1", to_scope="hidden1")
# Verifies copy to the same graph.
with graph1.as_default():
var_list_2 = meta_graph.copy_scoped_meta_graph(
from_scope="hidden1", to_scope="hidden2")
with self.test_session(graph=graph1) as sess:
saver1 = saver_module.Saver(var_list=var_list_1, max_to_keep=1)
saver1.restore(sess, saver0_ckpt)
saver2 = saver_module.Saver(var_list=var_list_2, max_to_keep=1)
saver2.restore(sess, saver0_ckpt)
self.assertAllClose(expected, sess.run("hidden1/relu:0"))
self.assertAllClose(expected, sess.run("hidden2/relu:0"))
# Verifies copy to differen graph.
graph2 = ops_lib.Graph()
new_var_list_1 = meta_graph.copy_scoped_meta_graph(
from_scope="hidden1",
to_scope="new_hidden1",
from_graph=graph1,
to_graph=graph2)
with self.test_session(graph=graph2) as sess:
saver3 = saver_module.Saver(var_list=new_var_list_1, max_to_keep=1)
saver3.restore(sess, saver0_ckpt)
self.assertAllClose(expected, sess.run("new_hidden1/relu:0"))
def testExportGraphDefWithScope(self):
test_dir = self._get_test_dir("export_graph_def")
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
graph1 = ops_lib.Graph()
with graph1.as_default():
with ops_lib.name_scope("hidden1"):
images = constant_op.constant(
1.0, dtypes.float32, shape=[3, 2], name="images")
weights1 = variables.Variable(
[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], name="weights")
biases1 = variables.Variable([0.1] * 3, name="biases")
nn_ops.relu(math_ops.matmul(images, weights1) + biases1, name="relu")
# Run the graph and save scoped checkpoint.
with self.test_session(graph=graph1) as sess:
sess.run(variables.global_variables_initializer())
_, var_list_1 = meta_graph.export_scoped_meta_graph(
graph_def=graph1.as_graph_def(), export_scope="hidden1")
saver = saver_module.Saver(var_list=var_list_1, max_to_keep=1)
saver.save(sess, saver0_ckpt, write_state=False)
expected = np.reshape([[5.0999999, 7.0999999, 9.10000038] * 3], (3, 3))
# Verifies that we can run successfully after restoring.
graph2 = ops_lib.Graph()
new_var_list_1 = meta_graph.copy_scoped_meta_graph(
from_scope="hidden1",
to_scope="new_hidden1",
from_graph=graph1,
to_graph=graph2)
with self.test_session(graph=graph2) as sess:
saver3 = saver_module.Saver(var_list=new_var_list_1, max_to_keep=1)
saver3.restore(sess, saver0_ckpt)
self.assertAllClose(expected, sess.run("new_hidden1/relu:0"))
def testSerializeSaverWithScope(self):
test_dir = self._get_test_dir("export_graph_def")
saver1_ckpt = os.path.join(test_dir, "saver1.ckpt")
saver2_ckpt = os.path.join(test_dir, "saver2.ckpt")
graph = ops_lib.Graph()
with graph.as_default():
with ops_lib.name_scope("hidden1"):
variable1 = variables.Variable([1.0], name="variable1")
saver1 = saver_module.Saver(var_list=[variable1])
graph.add_to_collection(ops_lib.GraphKeys.SAVERS, saver1)
with ops_lib.name_scope("hidden2"):
variable2 = variables.Variable([2.0], name="variable2")
saver2 = saver_module.Saver(var_list=[variable2], name="hidden2/")
graph.add_to_collection(ops_lib.GraphKeys.SAVERS, saver2)
with self.test_session(graph=graph) as sess:
variables.global_variables_initializer().run()
saver1.save(sess, saver1_ckpt, write_state=False)
saver2.save(sess, saver2_ckpt, write_state=False)
graph1 = ops_lib.Graph()
var_dict1 = meta_graph.copy_scoped_meta_graph(
from_scope="hidden1",
to_scope="new_hidden1",
from_graph=graph,
to_graph=graph1)
self.assertEqual(1, len(var_dict1))
saver_list1 = graph1.get_collection(ops_lib.GraphKeys.SAVERS)
self.assertEqual(1, len(saver_list1))
with self.test_session(graph=graph1) as sess:
saver_list1[0].restore(sess, saver1_ckpt)
self.assertEqual(1.0, var_dict1["variable1:0"].eval())
graph2 = ops_lib.Graph()
var_dict2 = meta_graph.copy_scoped_meta_graph(
from_scope="hidden2",
to_scope="new_hidden2",
from_graph=graph,
to_graph=graph2)
self.assertEqual(1, len(var_dict2))
saver_list2 = graph2.get_collection(ops_lib.GraphKeys.SAVERS)
self.assertEqual(1, len(saver_list2))
with self.test_session(graph=graph2) as sess:
saver_list2[0].restore(sess, saver2_ckpt)
self.assertEqual(2.0, var_dict2["variable2:0"].eval())
class _OwnsAVariableSimple(checkpointable.CheckpointableBase):
"""A Checkpointable object which can be saved using a tf.train.Saver."""
def __init__(self):
self.non_dep_variable = variable_scope.get_variable(
name="non_dep_variable", initializer=6., use_resource=True)
def _gather_saveables_for_checkpoint(self):
return {checkpointable.VARIABLE_VALUE_KEY: self.non_dep_variable}
# The Saver sorts by name before parsing, so we need a name property.
@property
def name(self):
return self.non_dep_variable.name
class _MirroringSaveable(
saver_module.BaseSaverBuilder.ResourceVariableSaveable):
def __init__(self, primary_variable, mirrored_variable, name):
self._primary_variable = primary_variable
self._mirrored_variable = mirrored_variable
super(_MirroringSaveable, self).__init__(
self._primary_variable, "", name)
def restore(self, restored_tensors, restored_shapes):
"""Restore the same value into both variables."""
tensor, = restored_tensors
return control_flow_ops.group(
self._primary_variable.assign(tensor),
self._mirrored_variable.assign(tensor))
class _OwnsMirroredVariables(checkpointable.CheckpointableBase):
"""A Checkpointable object which returns a more complex SaveableObject."""
def __init__(self):
self.non_dep_variable = variable_scope.get_variable(
name="non_dep_variable", initializer=6., use_resource=True)
self.mirrored = variable_scope.get_variable(
name="mirrored", initializer=15., use_resource=True)
def _gather_saveables_for_checkpoint(self):
def _saveable_factory(name=self.non_dep_variable.name):
return _MirroringSaveable(
primary_variable=self.non_dep_variable,
mirrored_variable=self.mirrored,
name=name)
return {checkpointable.VARIABLE_VALUE_KEY: _saveable_factory}
# The Saver sorts by name before parsing, so we need a name property.
@property
def name(self):
return self.non_dep_variable.name
class NonLayerCheckpointable(checkpointable.Checkpointable):
def __init__(self):
super(NonLayerCheckpointable, self).__init__()
self.a_variable = checkpointable_utils.add_variable(
self, name="a_variable", shape=[])
class MyModel(training.Model):
"""A concrete Model for testing."""
def __init__(self):
super(MyModel, self).__init__()
self._named_dense = core.Dense(1, use_bias=True)
self._second = core.Dense(1, use_bias=False)
# We can still track Checkpointables which aren't Layers.
self._non_layer = NonLayerCheckpointable()
def call(self, values):
ret = self._second(self._named_dense(values))
return ret
class CheckpointableCompatibilityTests(test.TestCase):
# TODO(allenl): Track down python3 reference cycles in these tests.
@test_util.run_in_graph_and_eager_modes()
def testNotSaveableButIsCheckpointable(self):
v = _OwnsAVariableSimple()
saver = saver_module.Saver(var_list=[v])
test_dir = self.get_temp_dir()
prefix = os.path.join(test_dir, "ckpt")
self.evaluate(v.non_dep_variable.assign(42.))
with self.test_session() as sess:
save_path = saver.save(sess, prefix)
self.evaluate(v.non_dep_variable.assign(43.))
saver.restore(sess, save_path)
self.assertEqual(42., self.evaluate(v.non_dep_variable))
@test_util.run_in_graph_and_eager_modes()
def testMoreComplexSaveableReturned(self):
v = _OwnsMirroredVariables()
saver = saver_module.Saver(var_list=[v])
test_dir = self.get_temp_dir()
prefix = os.path.join(test_dir, "ckpt")
self.evaluate(v.non_dep_variable.assign(42.))
with self.test_session() as sess:
save_path = saver.save(sess, prefix)
self.evaluate(v.non_dep_variable.assign(43.))
self.evaluate(v.mirrored.assign(44.))
saver.restore(sess, save_path)
self.assertEqual(42., self.evaluate(v.non_dep_variable))
self.assertEqual(42., self.evaluate(v.mirrored))
def testSingleTensorEvaluation(self):
class _CountingSaveable(saver_module.BaseSaverBuilder.SaveableObject):
def __init__(self, name):
self.eval_count = 0
def _tensor():
self.eval_count += 1
return constant_op.constant([1.])
dummy_op = constant_op.constant([2.])
super(_CountingSaveable, self).__init__(
dummy_op,
[saver_module.BaseSaverBuilder.SaveSpec(
_tensor, "", name, dtype=dummy_op.dtype)],
name)
def restore(self, restored_tensors, restored_shapes):
"""Restore the same value into both variables."""
pass
with context.eager_mode():
v = _CountingSaveable("foo")
saver = saver_module.Saver(var_list=[v])
test_dir = self.get_temp_dir()
prefix = os.path.join(test_dir, "ckpt")
with self.test_session() as sess:
save_path = saver.save(sess, prefix)
self.assertEqual(1, v.eval_count)
saver.restore(sess, save_path)
self.assertEqual(1, v.eval_count)
def _initialized_model(self):
input_value = constant_op.constant([[3.]])
model = MyModel()
optimizer = adam.AdamOptimizer(0.001)
optimizer_step = training_util.get_or_create_global_step()
root_checkpointable = checkpointable_utils.Checkpoint(
optimizer=optimizer, model=model, optimizer_step=optimizer_step)
train_op = optimizer.minimize(
functools.partial(model, input_value),
global_step=optimizer_step)
self.evaluate(checkpointable_utils.gather_initializers(
root_checkpointable))
self.evaluate(train_op)
# A regular variable, a slot variable, and a non-slot Optimizer variable
# with known values to check when loading.
self.evaluate(model._named_dense.bias.assign([1.]))
self.evaluate(optimizer.get_slot(
var=model._named_dense.bias, name="m").assign([2.]))
beta1_power, _ = optimizer._get_beta_accumulators()
self.evaluate(beta1_power.assign(3.))
return root_checkpointable
def _set_sentinels(self, root_checkpointable):
self.evaluate(root_checkpointable.model._named_dense.bias.assign([101.]))
self.evaluate(
root_checkpointable.optimizer.get_slot(
var=root_checkpointable.model._named_dense.bias, name="m")
.assign([102.]))
beta1_power, _ = root_checkpointable.optimizer._get_beta_accumulators()
self.evaluate(beta1_power.assign(103.))
def _check_sentinels(self, root_checkpointable):
self.assertAllEqual(
[1.], self.evaluate(root_checkpointable.model._named_dense.bias))
self.assertAllEqual([2.], self.evaluate(
root_checkpointable.optimizer.get_slot(
var=root_checkpointable.model._named_dense.bias, name="m")))
beta1_power, _ = root_checkpointable.optimizer._get_beta_accumulators()
self.assertAllEqual(3., self.evaluate(beta1_power))
def testVariableNotFoundErrorRaised(self):
# Restore does some tricky exception handling to figure out if it should
# load an object-based checkpoint. Tests that the exception handling isn't
# too broad.
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
a = resource_variable_ops.ResourceVariable(1., name="a")
b = resource_variable_ops.ResourceVariable(1., name="b")
a_saver = saver_module.Saver([a])
b_saver = saver_module.Saver([b])
with self.test_session() as sess:
sess.run(a.initializer)
save_path = a_saver.save(sess=sess, save_path=checkpoint_prefix)
with self.assertRaisesRegexp(
errors.NotFoundError, "Key b not found in checkpoint"):
b_saver.restore(sess=sess, save_path=save_path)
def testCheckpointNotFoundErrorRaised(self):
# Restore does some tricky exception handling to figure out if it should
# load an object-based checkpoint. Tests that the exception handling isn't
# too broad.
a = resource_variable_ops.ResourceVariable(1., name="a")
saver = saver_module.Saver([a])
with self.test_session() as sess:
with self.assertRaisesRegexp(
errors.NotFoundError,
"Failed to find any matching files for path_which_does_not_exist"):
saver.restore(sess=sess, save_path="path_which_does_not_exist")
try:
saver.restore(sess=sess, save_path="path_which_does_not_exist")
except errors.NotFoundError:
# Make sure we don't have a confusing "During handling of the above
# exception" block in Python 3.
# pylint: disable=no-value-for-parameter
exception_string = "\n".join(
traceback.format_exception(*sys.exc_info()))
# pylint: enable=no-value-for-parameter
self.assertNotIn("NewCheckpointReader", exception_string)
def testLoadFromObjectBasedGraph(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
save_graph = ops_lib.Graph()
with save_graph.as_default(), self.test_session(graph=save_graph) as sess:
root = self._initialized_model()
object_saver = checkpointable_utils.CheckpointableSaver(root)
save_path = object_saver.save(file_prefix=checkpoint_prefix)
# An incompatible object-based checkpoint to check error messages
var = resource_variable_ops.ResourceVariable(1., name="a")
self.evaluate(var.initializer)
second_saver = checkpointable_utils.CheckpointableSaver(var)
second_path = second_saver.save(file_prefix=os.path.join(
checkpoint_directory, "second"))
restore_graph = ops_lib.Graph()
with restore_graph.as_default(), self.test_session(
graph=restore_graph) as sess:
root = self._initialized_model()
self._set_sentinels(root)
saver = saver_module.Saver()
saver.restore(sess=sess, save_path=save_path)
self._check_sentinels(root)
before_second_restore_ops = restore_graph.get_operations()
# Test that multiple restores do not pollute the graph
saver.restore(sess=sess, save_path=save_path)
self.assertEqual(before_second_restore_ops,
restore_graph.get_operations())
with self.assertRaisesRegexp(errors.NotFoundError,
"could not find a_variable"):
saver.restore(sess=sess, save_path=second_path)
def testLoadFromObjectBasedEager(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
save_graph = ops_lib.Graph()
with save_graph.as_default(), self.test_session(graph=save_graph):
root = self._initialized_model()
object_saver = checkpointable_utils.CheckpointableSaver(root)
save_path = object_saver.save(file_prefix=checkpoint_prefix)
with context.eager_mode():
root = self._initialized_model()
self._set_sentinels(root)
saver = saver_module.Saver(
root.model.variables + root.optimizer.variables())
saver.restore(sess=None, save_path=save_path)
self._check_sentinels(root)
if __name__ == "__main__":
test.main()
|
{
"content_hash": "0dcfc3432efa98db6c00769089ae9f15",
"timestamp": "",
"source": "github",
"line_count": 3149,
"max_line_length": 82,
"avg_line_length": 41.86884725309622,
"alnum_prop": 0.6461678486101103,
"repo_name": "dendisuhubdy/tensorflow",
"id": "f1991093e0b519da7448809e759a1cd5c57b80d9",
"size": "132533",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tensorflow/python/training/saver_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "9258"
},
{
"name": "C",
"bytes": "304178"
},
{
"name": "C++",
"bytes": "43473103"
},
{
"name": "CMake",
"bytes": "202538"
},
{
"name": "Go",
"bytes": "1148824"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "755551"
},
{
"name": "Jupyter Notebook",
"bytes": "2211560"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "48603"
},
{
"name": "Objective-C",
"bytes": "12456"
},
{
"name": "Objective-C++",
"bytes": "94385"
},
{
"name": "PHP",
"bytes": "2140"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "36820408"
},
{
"name": "Ruby",
"bytes": "533"
},
{
"name": "Shell",
"bytes": "428510"
},
{
"name": "Smarty",
"bytes": "6870"
}
],
"symlink_target": ""
}
|
"""Create the Initial Data fixture from SQL statements.
The :file:`claims/SQL` directory contains the SQL required to populate
the database.
This is used to create the :file:`claims/fixtures/initial_data.json` file.
"""
import logging
import sys
import os.path
from django.core.management import call_command
def make_fixture():
call_command("syncdb", interactive=False, database='wstest')
call_command("sqlcustom", "claims", database='wstest')
result= os.path.join('web','claims','fixtures','initial_data.json')
with open(result,'w') as target:
sys.stdout= target
call_command("dumpdata", "claims", indent=2, database='wstest')
sys.stdout= sys.__stdout__
if __name__ == "__main__":
make_fixture()
|
{
"content_hash": "dcbec5cbaf9795415166cadc700aaf3d",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 74,
"avg_line_length": 32.041666666666664,
"alnum_prop": 0.6723016905071522,
"repo_name": "jdavisp3/TigerShark",
"id": "75923b99ac683fc36399d49fe1424490b5dac7b4",
"size": "792",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "web/claims/make_initial_data_fixture.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "36926"
},
{
"name": "HTML",
"bytes": "120474"
},
{
"name": "JavaScript",
"bytes": "46605"
},
{
"name": "PLpgSQL",
"bytes": "113"
},
{
"name": "Perl",
"bytes": "23425"
},
{
"name": "Python",
"bytes": "4426497"
}
],
"symlink_target": ""
}
|
"""
Find intermediate evalutation results in assert statements through builtin AST.
This should replace oldinterpret.py eventually.
"""
import sys
import ast
import py
from _pytest.assertion import util
from _pytest.assertion.reinterpret import BuiltinAssertionError
if sys.platform.startswith("java") and sys.version_info < (2, 5, 2):
# See http://bugs.jython.org/issue1497
_exprs = ("BoolOp", "BinOp", "UnaryOp", "Lambda", "IfExp", "Dict",
"ListComp", "GeneratorExp", "Yield", "Compare", "Call",
"Repr", "Num", "Str", "Attribute", "Subscript", "Name",
"List", "Tuple")
_stmts = ("FunctionDef", "ClassDef", "Return", "Delete", "Assign",
"AugAssign", "Print", "For", "While", "If", "With", "Raise",
"TryExcept", "TryFinally", "Assert", "Import", "ImportFrom",
"Exec", "Global", "Expr", "Pass", "Break", "Continue")
_expr_nodes = set(getattr(ast, name) for name in _exprs)
_stmt_nodes = set(getattr(ast, name) for name in _stmts)
def _is_ast_expr(node):
return node.__class__ in _expr_nodes
def _is_ast_stmt(node):
return node.__class__ in _stmt_nodes
else:
def _is_ast_expr(node):
return isinstance(node, ast.expr)
def _is_ast_stmt(node):
return isinstance(node, ast.stmt)
class Failure(Exception):
"""Error found while interpreting AST."""
def __init__(self, explanation=""):
self.cause = sys.exc_info()
self.explanation = explanation
def interpret(source, frame, should_fail=False):
mod = ast.parse(source)
visitor = DebugInterpreter(frame)
try:
visitor.visit(mod)
except Failure:
failure = sys.exc_info()[1]
return getfailure(failure)
if should_fail:
return ("(assertion failed, but when it was re-run for "
"printing intermediate values, it did not fail. Suggestions: "
"compute assert expression before the assert or use --assert=plain)")
def run(offending_line, frame=None):
if frame is None:
frame = py.code.Frame(sys._getframe(1))
return interpret(offending_line, frame)
def getfailure(e):
explanation = util.format_explanation(e.explanation)
value = e.cause[1]
if str(value):
lines = explanation.split('\n')
lines[0] += " << %s" % (value,)
explanation = '\n'.join(lines)
text = "%s: %s" % (e.cause[0].__name__, explanation)
if text.startswith('AssertionError: assert '):
text = text[16:]
return text
operator_map = {
ast.BitOr : "|",
ast.BitXor : "^",
ast.BitAnd : "&",
ast.LShift : "<<",
ast.RShift : ">>",
ast.Add : "+",
ast.Sub : "-",
ast.Mult : "*",
ast.Div : "/",
ast.FloorDiv : "//",
ast.Mod : "%",
ast.Eq : "==",
ast.NotEq : "!=",
ast.Lt : "<",
ast.LtE : "<=",
ast.Gt : ">",
ast.GtE : ">=",
ast.Pow : "**",
ast.Is : "is",
ast.IsNot : "is not",
ast.In : "in",
ast.NotIn : "not in"
}
unary_map = {
ast.Not : "not %s",
ast.Invert : "~%s",
ast.USub : "-%s",
ast.UAdd : "+%s"
}
class DebugInterpreter(ast.NodeVisitor):
"""Interpret AST nodes to gleam useful debugging information. """
def __init__(self, frame):
self.frame = frame
def generic_visit(self, node):
# Fallback when we don't have a special implementation.
if _is_ast_expr(node):
mod = ast.Expression(node)
co = self._compile(mod)
try:
result = self.frame.eval(co)
except Exception:
raise Failure()
explanation = self.frame.repr(result)
return explanation, result
elif _is_ast_stmt(node):
mod = ast.Module([node])
co = self._compile(mod, "exec")
try:
self.frame.exec_(co)
except Exception:
raise Failure()
return None, None
else:
raise AssertionError("can't handle %s" %(node,))
def _compile(self, source, mode="eval"):
return compile(source, "<assertion interpretation>", mode)
def visit_Expr(self, expr):
return self.visit(expr.value)
def visit_Module(self, mod):
for stmt in mod.body:
self.visit(stmt)
def visit_Name(self, name):
explanation, result = self.generic_visit(name)
# See if the name is local.
source = "%r in locals() is not globals()" % (name.id,)
co = self._compile(source)
try:
local = self.frame.eval(co)
except Exception:
# have to assume it isn't
local = None
if local is None or not self.frame.is_true(local):
return name.id, result
return explanation, result
def visit_Compare(self, comp):
left = comp.left
left_explanation, left_result = self.visit(left)
for op, next_op in zip(comp.ops, comp.comparators):
next_explanation, next_result = self.visit(next_op)
op_symbol = operator_map[op.__class__]
explanation = "%s %s %s" % (left_explanation, op_symbol,
next_explanation)
source = "__exprinfo_left %s __exprinfo_right" % (op_symbol,)
co = self._compile(source)
try:
result = self.frame.eval(co, __exprinfo_left=left_result,
__exprinfo_right=next_result)
except Exception:
raise Failure(explanation)
try:
if not self.frame.is_true(result):
break
except KeyboardInterrupt:
raise
except:
break
left_explanation, left_result = next_explanation, next_result
if util._reprcompare is not None:
res = util._reprcompare(op_symbol, left_result, next_result)
if res:
explanation = res
return explanation, result
def visit_BoolOp(self, boolop):
is_or = isinstance(boolop.op, ast.Or)
explanations = []
for operand in boolop.values:
explanation, result = self.visit(operand)
explanations.append(explanation)
if result == is_or:
break
name = is_or and " or " or " and "
explanation = "(" + name.join(explanations) + ")"
return explanation, result
def visit_UnaryOp(self, unary):
pattern = unary_map[unary.op.__class__]
operand_explanation, operand_result = self.visit(unary.operand)
explanation = pattern % (operand_explanation,)
co = self._compile(pattern % ("__exprinfo_expr",))
try:
result = self.frame.eval(co, __exprinfo_expr=operand_result)
except Exception:
raise Failure(explanation)
return explanation, result
def visit_BinOp(self, binop):
left_explanation, left_result = self.visit(binop.left)
right_explanation, right_result = self.visit(binop.right)
symbol = operator_map[binop.op.__class__]
explanation = "(%s %s %s)" % (left_explanation, symbol,
right_explanation)
source = "__exprinfo_left %s __exprinfo_right" % (symbol,)
co = self._compile(source)
try:
result = self.frame.eval(co, __exprinfo_left=left_result,
__exprinfo_right=right_result)
except Exception:
raise Failure(explanation)
return explanation, result
def visit_Call(self, call):
func_explanation, func = self.visit(call.func)
arg_explanations = []
ns = {"__exprinfo_func" : func}
arguments = []
for arg in call.args:
arg_explanation, arg_result = self.visit(arg)
arg_name = "__exprinfo_%s" % (len(ns),)
ns[arg_name] = arg_result
arguments.append(arg_name)
arg_explanations.append(arg_explanation)
for keyword in call.keywords:
arg_explanation, arg_result = self.visit(keyword.value)
arg_name = "__exprinfo_%s" % (len(ns),)
ns[arg_name] = arg_result
keyword_source = "%s=%%s" % (keyword.arg)
arguments.append(keyword_source % (arg_name,))
arg_explanations.append(keyword_source % (arg_explanation,))
if call.starargs:
arg_explanation, arg_result = self.visit(call.starargs)
arg_name = "__exprinfo_star"
ns[arg_name] = arg_result
arguments.append("*%s" % (arg_name,))
arg_explanations.append("*%s" % (arg_explanation,))
if call.kwargs:
arg_explanation, arg_result = self.visit(call.kwargs)
arg_name = "__exprinfo_kwds"
ns[arg_name] = arg_result
arguments.append("**%s" % (arg_name,))
arg_explanations.append("**%s" % (arg_explanation,))
args_explained = ", ".join(arg_explanations)
explanation = "%s(%s)" % (func_explanation, args_explained)
args = ", ".join(arguments)
source = "__exprinfo_func(%s)" % (args,)
co = self._compile(source)
try:
result = self.frame.eval(co, **ns)
except Exception:
raise Failure(explanation)
pattern = "%s\n{%s = %s\n}"
rep = self.frame.repr(result)
explanation = pattern % (rep, rep, explanation)
return explanation, result
def _is_builtin_name(self, name):
pattern = "%r not in globals() and %r not in locals()"
source = pattern % (name.id, name.id)
co = self._compile(source)
try:
return self.frame.eval(co)
except Exception:
return False
def visit_Attribute(self, attr):
if not isinstance(attr.ctx, ast.Load):
return self.generic_visit(attr)
source_explanation, source_result = self.visit(attr.value)
explanation = "%s.%s" % (source_explanation, attr.attr)
source = "__exprinfo_expr.%s" % (attr.attr,)
co = self._compile(source)
try:
result = self.frame.eval(co, __exprinfo_expr=source_result)
except Exception:
raise Failure(explanation)
explanation = "%s\n{%s = %s.%s\n}" % (self.frame.repr(result),
self.frame.repr(result),
source_explanation, attr.attr)
# Check if the attr is from an instance.
source = "%r in getattr(__exprinfo_expr, '__dict__', {})"
source = source % (attr.attr,)
co = self._compile(source)
try:
from_instance = self.frame.eval(co, __exprinfo_expr=source_result)
except Exception:
from_instance = None
if from_instance is None or self.frame.is_true(from_instance):
rep = self.frame.repr(result)
pattern = "%s\n{%s = %s\n}"
explanation = pattern % (rep, rep, explanation)
return explanation, result
def visit_Assert(self, assrt):
test_explanation, test_result = self.visit(assrt.test)
explanation = "assert %s" % (test_explanation,)
if not self.frame.is_true(test_result):
try:
raise BuiltinAssertionError
except Exception:
raise Failure(explanation)
return explanation, test_result
def visit_Assign(self, assign):
value_explanation, value_result = self.visit(assign.value)
explanation = "... = %s" % (value_explanation,)
name = ast.Name("__exprinfo_expr", ast.Load(),
lineno=assign.value.lineno,
col_offset=assign.value.col_offset)
new_assign = ast.Assign(assign.targets, name, lineno=assign.lineno,
col_offset=assign.col_offset)
mod = ast.Module([new_assign])
co = self._compile(mod, "exec")
try:
self.frame.exec_(co, __exprinfo_expr=value_result)
except Exception:
raise Failure(explanation)
return explanation, value_result
|
{
"content_hash": "a810fc9d35a0253995c2405a7ab99b06",
"timestamp": "",
"source": "github",
"line_count": 333,
"max_line_length": 85,
"avg_line_length": 36.909909909909906,
"alnum_prop": 0.5517044992270768,
"repo_name": "npinto/pytest",
"id": "de03eaf8b0351aff1e798e1b8a2f3932ed52bddc",
"size": "12291",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "_pytest/assertion/newinterpret.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "692760"
}
],
"symlink_target": ""
}
|
from test.test_funs import count_words_at_url
from rq import Queue, use_connection
from utils.worker import redis_connection
def submit_work(executor, args, queue="default"):
use_connection(redis_connection)
q = Queue(queue)
result = q.enqueue(executor, args)
return result
|
{
"content_hash": "8e83ed874e3c1a19ca88eeb647aeae26",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 49,
"avg_line_length": 24.5,
"alnum_prop": 0.7380952380952381,
"repo_name": "LoopSun/linux-client",
"id": "01731b4d75ad6066260a9045b62f06dd345d63fa",
"size": "311",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils/rq_handler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4585"
}
],
"symlink_target": ""
}
|
import os
from django.conf import settings
from django.contrib.auth import models
from django.contrib.auth.models import User, Group, Permission
from django.contrib.contenttypes.models import ContentType
from django.core import management
from django.core.urlresolvers import reverse
from django.db.models import get_model
from django.test import TestCase
from django.test.client import Client
from authority.models import Permission
from django_addons.autodiscover import autodiscover_notifications
from transifex.txcommon.tests.base import BaseTestCase, USER_ROLES, NoticeTypes
from transifex.txcommon.log import logger
from transifex.languages.models import Language
from transifex.projects.models import Project
from transifex.projects.permissions.project import ProjectPermission
from transifex.teams.models import Team
from django.utils import unittest
# TODO: POST requests should also be tested everywhere (teams, tr. actions etc.)
Watch = get_model('repowatch', 'Watch')
POFileLock = get_model('locks', 'POFileLock')
def skip(func):
func_name = func.__name__
def decorator(func):
msg = "%s skipped. Please implement it in your project path."%func_name
if settings.TX_ROOT != settings.PROJECT_PATH:
logger.debug(msg)
return unittest.skipUnless(settings.TX_ROOT == settings.PROJECT_PATH, msg)
return decorator
class PrivateProjectTest(BaseTestCase):
"""
Test private projects overall.
Permissions, get, post return codes etc.
"""
@skip
def test_project_list_with_anonymous_user(self):
"""
Test that project list pages contain only the public project and not
the private, if the user is anonymous.
"""
### project list ALL
# Issue a GET request.
response = self.client['anonymous'].get(reverse('project_list'))
# Check that the response is 200 OK.
self.failUnlessEqual(response.status_code, 200)
# Check that the rendered context contains all public projects (see setup)
self.failUnlessEqual(len(response.context['project_list']),
Project.objects.filter(private=False).count())
# Ensure that private project does NOT appear
for project in response.context['project_list']:
self.failIfEqual(project.slug, self.project_private.slug)
### project list RECENT
# Issue a GET request.
response = self.client['anonymous'].get(reverse('project_list_recent'))
# Check that the response is 200 OK.
self.failUnlessEqual(response.status_code, 200)
# Check that the rendered context contains all (public) projects (see setup)
self.failUnlessEqual(len(response.context['project_list']),
Project.objects.filter(private=False).count())
# Ensure that private project does NOT appear
self.failIfEqual(response.context['project_list'][0].slug, self.project_private.slug)
#TODO: FEATURED, OPEN TRANSLATIONS list testing
@skip
def test_project_list_with_logged_in_user(self):
"""
Test that project list pages contain only the public project and not
the private, if the user is logged in.
"""
### project list ALL
# Issue a GET request.
response = self.client['registered'].get(reverse('project_list'))
# Check that the response is 200 OK.
self.failUnlessEqual(response.status_code, 200)
# Check that the rendered context contains all public projects (see setup)
self.failUnlessEqual(len(response.context['project_list']),
Project.objects.filter(private=False).count())
# Ensure that private project does NOT appear
for project in response.context['project_list']:
self.failIfEqual(project.slug, self.project_private.slug)
### project list RECENT
# Issue a GET request.
response = self.client['registered'].get(reverse('project_list_recent'))
# Check that the response is 200 OK.
self.failUnlessEqual(response.status_code, 200)
# Check that the rendered context contains all (public) projects (see setup)
self.failUnlessEqual(len(response.context['project_list']),
Project.objects.filter(private=False).count())
# Ensure that private project does NOT appear
for project in response.context['project_list']:
self.failIfEqual(project.slug, self.project_private.slug)
def test_project_detail(self):
"""
Check private project details access.
"""
# Check anonymous user and logged in user with no permissions
for user in ['anonymous', 'registered']:
response = self.client[user].get(self.urls['project_private'])
self.failUnlessEqual(response.status_code, 403)
# Check people who should have access to the private project
for user in ['maintainer', 'team_coordinator', 'team_member']: # 'writer',
response = self.client[user].get(self.urls['project_private'])
self.failUnlessEqual(response.status_code, 200)
def test_resource_details(self):
"""
Check private project components' detail access.
"""
# Check anonymous user and logged in user with no permissions
for user in ['anonymous', 'registered']:
response = self.client[user].get(self.urls['resource_private'])
self.failUnlessEqual(response.status_code, 403)
# Check people who should have access to the private project
for user in ['maintainer', 'team_coordinator', 'team_member']: # 'writer',
response = self.client[user].get(self.urls['resource_private'])
self.failUnlessEqual(response.status_code, 200)
def test_widgets(self):
"""
Test if the permissions to project widgets page are the correct ones.
"""
#'/projects/p/priv_test/''/projects/p/priv_test/widgets/'
url = reverse('project_widgets', kwargs={'project_slug':self.project_private.slug})
# Check anonymous user and logged in user with no permissions
for user in ['anonymous', 'registered']:
response = self.client[user].get(url)
self.failUnlessEqual(response.status_code, 403)
# Check people who should have access to the private project
for user in ['maintainer', 'writer', 'team_coordinator', 'team_member']:
response = self.client[user].get(url)
self.failUnlessEqual(response.status_code, 403)
def test_search_project(self):
"""
Test that searching for the private project does not return it.
We also check the appearance of the public project
"""
URL = reverse('search')
TERMS_1_1 = {'q': self.project_private.slug}
TERMS_1_2 = {'q': self.resource_private.slug}
TERMS_1_3 = {'q': self.project_private.name}
# All type of users should not see private projects in search results!
for user in USER_ROLES:
response = self.client[user].get(URL, TERMS_1_1)
self.failUnlessEqual(response.status_code, 200)
self.assertFalse(self.project_private in response.context['results'])
response = self.client[user].get(URL, TERMS_1_2)
self.failUnlessEqual(response.status_code, 200)
self.assertFalse(self.project_private in response.context['results'])
response = self.client[user].get(URL, TERMS_1_3)
self.failUnlessEqual(response.status_code, 200)
self.assertFalse(self.project_private in response.context['results'])
def test_teams_access(self):
"""
Check private project teams' pages access.
"""
URLs = {
'anonymous' : {
403 : [
'/projects/p/%s/teams/' % self.project_private.slug,
'/projects/p/%s/team/%s/' % (self.project_private.slug,
self.language.code)
],
302 : [
'/projects/p/%s/teams/add/' % self.project_private.slug,
'/projects/p/%s/team/%s/edit/' % (self.project_private.slug,
self.language.code),
'/projects/p/%s/team/%s/delete/' % (self.project_private.slug,
self.language.code),
'/projects/p/%s/team/%s/request/' % (self.project_private.slug, self.language.code),
'/projects/p/%s/team/%s/approve/%s/' % (self.project_private.slug, self.language.code,
self.user['team_member'].username),
'/projects/p/%s/team/%s/deny/%s/' % (self.project_private.slug, self.language.code,
self.user['team_member'].username),
'/projects/p/%s/team/%s/withdraw/' %(self.project_private.slug, self.language.code),
'/projects/p/%s/team/%s/leave/' %(self.project_private.slug, self.language.code),
'/projects/p/%s/teams/request/' % self.project_private.slug,
'/projects/p/%s/team/%s/approve/' % (self.project_private.slug, self.language.code),
'/projects/p/%s/team/%s/deny/' %(self.project_private.slug, self.language.code),
]
},
'registered' : {
403 : [
'/projects/p/%s/teams/' % self.project_private.slug, # FIXME: returns 200
'/projects/p/%s/team/%s/' %(self.project_private.slug, self.language.code),
'/projects/p/%s/teams/add/' % self.project_private.slug,
'/projects/p/%s/team/%s/edit/' %(self.project_private.slug, self.language.code),
'/projects/p/%s/team/%s/delete/' %(self.project_private.slug, self.language.code),
'/projects/p/%s/team/%s/request/' %(self.project_private.slug, self.language.code),
'/projects/p/%s/team/%s/approve/%s/' % (self.project_private.slug, self.language.code,
self.user['team_member'].username),
'/projects/p/%s/team/%s/deny/%s/' % (self.project_private.slug, self.language.code,
self.user['team_member'].username),
'/projects/p/%s/team/%s/withdraw/' %(self.project_private.slug, self.language.code),
'/projects/p/%s/team/%s/leave/' %(self.project_private.slug, self.language.code),
'/projects/p/%s/teams/request/' % self.project_private.slug,
'/projects/p/%s/team/%s/approve/' %(self.project_private.slug, self.language.code),
'/projects/p/%s/team/%s/deny/' %(self.project_private.slug, self.language.code)
]
},
'maintainer' : {
200 : [
'/projects/p/%s/teams/' % self.project_private.slug, #200
'/projects/p/%s/team/%s/' %(self.project_private.slug, self.language.code), #200
'/projects/p/%s/teams/add/' % self.project_private.slug, #200
'/projects/p/%s/team/%s/edit/' %(self.project_private.slug, self.language.code), #200
'/projects/p/%s/team/%s/delete/' %(self.project_private.slug, self.language.code) #200
],
302 : [
'/projects/p/%s/team/%s/request/' %(self.project_private.slug, self.language.code),
'/projects/p/%s/team/%s/leave/' %(self.project_private.slug, self.language.code),
'/projects/p/%s/teams/request/' % self.project_private.slug
],
404 : [
'/projects/p/%s/team/%s/approve/%s/' % (self.project_private.slug, self.language.code,
self.user['team_member'].username),
'/projects/p/%s/team/%s/deny/%s/' % (self.project_private.slug, self.language.code,
self.user['team_member'].username),
'/projects/p/%s/team/%s/withdraw/' %(self.project_private.slug, self.language.code),
'/projects/p/%s/team/%s/approve/' %(self.project_private.slug, self.language.code),
'/projects/p/%s/team/%s/deny/' %(self.project_private.slug, self.language.code)
],
},
#'writer' : {
# 200 : [
# '/projects/p/%s/teams/',
# '/projects/p/%s/team/%s/' %(self.project_private.slug, self.language.code)
# ],
# 302 : [
# '/projects/p/%s/team/%s/request/' %(self.project_private.slug, self.language.code),
# '/projects/p/%s/team/%s/leave/' %(self.project_private.slug, self.language.code),
# '/projects/p/%s/teams/request/'
# ],
# 404 : [
# '/projects/p/%s/team/%s/withdraw/' %(self.project_private.slug, self.language.code)
# ],
# 403 : [
# '/projects/p/%s/teams/add/',
# '/projects/p/%s/team/%s/edit/' %(self.project_private.slug, self.language.code),
# '/projects/p/%s/team/%s/delete/' %(self.project_private.slug, self.language.code),
# '/projects/p/%s/team/%s/approve/%s/' % (self.project_private.slug, self.language.code,
# self.user['team_member'].username),
# '/projects/p/%s/team/%s/deny/%s/' % (self.project_private.slug, self.language.code,
# self.user['team_member'].username),
# '/projects/p/%s/team/%s/approve/' %(self.project_private.slug, self.language.code),
# '/projects/p/%s/team/%s/deny/' %(self.project_private.slug, self.language.code)
# ]
#},
'team_coordinator' : {
200 : [
'/projects/p/%s/teams/' % self.project_private.slug,
'/projects/p/%s/team/%s/' %(self.project_private.slug, self.language.code),
'/projects/p/%s/team/%s/edit/' %(self.project_private.slug, self.language.code)
],
302 : [
'/projects/p/%s/team/%s/request/' %(self.project_private.slug, self.language.code),
'/projects/p/%s/team/%s/leave/' %(self.project_private.slug, self.language.code),
'/projects/p/%s/teams/request/' % self.project_private.slug
],
404 : [
'/projects/p/%s/team/%s/withdraw/' %(self.project_private.slug, self.language.code),
'/projects/p/%s/team/%s/approve/%s/' % (self.project_private.slug, self.language.code,
self.user['team_member'].username),
'/projects/p/%s/team/%s/deny/%s/' % (self.project_private.slug, self.language.code,
self.user['team_member'].username)
],
403 : [
'/projects/p/%s/teams/add/' % self.project_private.slug,
'/projects/p/%s/team/%s/delete/' %(self.project_private.slug, self.language.code),
# TODO: Add a second team to check if coordinator has access too.
'/projects/p/%s/team/%s/approve/' %(self.project_private.slug, self.language.code),
'/projects/p/%s/team/%s/deny/' %(self.project_private.slug, self.language.code)
]
},
'team_member' : {
200 : [
'/projects/p/%s/teams/' % self.project_private.slug,
'/projects/p/%s/team/%s/' %(self.project_private.slug, self.language.code)
],
302 : [
'/projects/p/%s/team/%s/request/' %(self.project_private.slug, self.language.code),
'/projects/p/%s/team/%s/leave/' %(self.project_private.slug, self.language.code),
'/projects/p/%s/teams/request/' % self.project_private.slug
],
404 : [
'/projects/p/%s/team/%s/withdraw/' %(self.project_private.slug, self.language.code),
],
403 : [
'/projects/p/%s/teams/add/' % self.project_private.slug,
'/projects/p/%s/team/%s/edit/' %(self.project_private.slug, self.language.code),
'/projects/p/%s/team/%s/delete/' %(self.project_private.slug, self.language.code),
# TODO: Add a second team to check if coordinator has access too.
'/projects/p/%s/team/%s/approve/' %(self.project_private.slug, self.language.code),
'/projects/p/%s/team/%s/deny/' %(self.project_private.slug, self.language.code),
'/projects/p/%s/team/%s/approve/%s/' % (self.project_private.slug, self.language.code,
self.user['team_member'].username),
'/projects/p/%s/team/%s/deny/%s/' % (self.project_private.slug, self.language.code,
self.user['team_member'].username)
]
}
}
for user in URLs.keys():
for status_code in URLs[user].keys():
for url in URLs[user][status_code]:
response = self.client[user].get(url)
self.failUnlessEqual(response.status_code, status_code,
"Wrong status code for user '%s' and url '%s' ( %s != %s)" % (
user, url, response.status_code,status_code))
def test_view_strings(self):
"""
Check access to view lotte for a resource in a private project (read
only access)
"""
# Check access to lotte for a language with a team.
URL = self.urls['translate_private']
for user in ['anonymous']:
response = self.client[user].get(URL)
self.failUnlessEqual(response.status_code, 302)
for user in ['registered']:
response = self.client[user].get(URL)
self.failUnlessEqual(response.status_code, 403)
for user in ['maintainer', 'team_coordinator', 'team_member']:# 'writer',
response = self.client[user].get(URL)
self.failUnlessEqual(response.status_code, 200)
# Check access to lotte for a language without a team.
URL = reverse('translate_resource', kwargs={'project_slug':self.project_private.slug,
'resource_slug':self.resource_private.slug,
'lang_code': self.language_ar.code })
for user in ['anonymous']:
response = self.client[user].get(URL)
self.failUnlessEqual(response.status_code, 302)
for user in ['registered', 'team_coordinator', 'team_member']:
response = self.client[user].get(URL)
self.failUnlessEqual(response.status_code, 403)
for user in ['maintainer']: #'writer',
response = self.client[user].get(URL)
self.failUnlessEqual(response.status_code, 200)
def test_edit_strings(self):
"""
Check access to view lotte for a resource in a private project
"""
# Check access to lotte for a language with a team.
URL = self.urls['translate_private']
for user in ['anonymous']:
response = self.client[user].get(URL)
self.failUnlessEqual(response.status_code, 302)
# Maybe this should be 404?
for user in ['registered']:
response = self.client[user].get(URL)
self.failUnlessEqual(response.status_code, 403)
for user in ['maintainer', 'team_coordinator', 'team_member']: # 'writer'?
response = self.client[user].get(URL)
self.failUnlessEqual(response.status_code, 200)
# Check access to lotte for a language without a team.
URL = reverse('translate_resource', kwargs={'project_slug':self.project_private.slug,
'resource_slug':self.resource_private.slug,
'lang_code': self.language_ar.code })
for user in ['anonymous']:
response = self.client[user].get(URL)
self.failUnlessEqual(response.status_code, 302)
for user in ['registered']:
response = self.client[user].get(URL)
self.failUnlessEqual(response.status_code, 403)
for user in ['team_coordinator', 'team_member']:
response = self.client[user].get(URL)
self.failUnlessEqual(response.status_code, 403)
for user in ['maintainer']: # 'writer'?
response = self.client[user].get(URL)
self.failUnlessEqual(response.status_code, 200)
def test_download_file(self):
"""
Check access to download translation file for a resource in a private
project.
"""
# Check who has access to download pofile for language with team
URL = reverse('download_translation', kwargs={'project_slug':self.project_private.slug,
'resource_slug':self.resource_private.slug,
'lang_code': self.language.code })
for user in ['anonymous']:
response = self.client[user].get(URL)
self.failUnlessEqual(response.status_code, 302)
for user in ['registered']:
response = self.client[user].get(URL)
self.failUnlessEqual(response.status_code, 403) # better 404?
for user in ['maintainer', 'team_coordinator', 'team_member']: #'writer'?
response = self.client[user].get(URL)
self.failUnlessEqual(response.status_code, 302) # why not 200?
# Check who has access to download pofile for language without team
URL = reverse('translate_resource', kwargs={'project_slug':self.project_private.slug,
'resource_slug':self.resource_private.slug,
'lang_code': self.language_ar.code })
for user in ['anonymous']:
response = self.client[user].get(URL)
self.failUnlessEqual(response.status_code, 302)
for user in ['team_coordinator', 'team_member', 'registered']:
response = self.client[user].get(URL)
self.failUnlessEqual(response.status_code, 403)
for user in ['maintainer']: # 'writer'?
response = self.client[user].get(URL)
self.failUnlessEqual(response.status_code, 200)
#def test_submit_file(self):
#"""
#Check access to submit pofile in a component of a private project.
#"""
#URL = reverse('component_edit_file', kwargs={'project_slug':self.project_private.slug,
# 'component_slug':'priv_component',
# 'filename': self.FILEPATHS[0] })
#
## POST Requests
## Anonymous user should not have access to submit files!
#response = self.client.post(URL, follow=True)
## Login required will redirect use to the login page
#self.failUnlessEqual(response.status_code, 200)
#self.failUnlessEqual(('http://testserver/accounts/login/?next=%s' %
# (URL), 302), response.redirect_chain[0])
#
## Logged in user without permissions should not have acces too!
#test_user = User.objects.create_user('test_login', 'test@transifex.net',
# 'test_login')
#self.assertTrue(self.client.login(username='test_login',
# password='test_login'))
#response = self.client.post(URL)
#self.failUnlessEqual(response.status_code, 403)
#self.client.logout()
#
## Maintainer should have permission to submit files
## (Owner should have been put to maintainers!)
#self.assertTrue(self.client.login(username='priv_owner',
# password='priv_owner'))
#response = self.client.post(URL, follow=True)
#self.failUnlessEqual(response.status_code, 200)
#self.client.logout()
#
## Check that a submitter (writer) has access to submit file.
#self.assertTrue(self.client.login(username='priv_submitter',
# password='priv_submitter'))
#response = self.client.post(URL, follow=True)
#self.failUnlessEqual(response.status_code, 200)
#self.client.logout()
#
##TODO: ONLY team members and coordinators of the specific team where
## the file belongs to must have access to it.
#
## Check that a team coordinator (writer) has access to submit a file of his team
#self.assertTrue(self.client.login(username='priv_coordinator',
# password='priv_coordinator'))
#response = self.client.post(URL, follow=True)
#self.failUnlessEqual(response.status_code, 200)
#self.client.logout()
#
## Check that a team member (writer) has access to submit a file of his team.
#self.assertTrue(self.client.login(username='priv_member',
# password='priv_member'))
#response = self.client.post(URL, follow=True)
#self.failUnlessEqual(response.status_code, 200)
#self.client.logout()
def test_lock_unlock_file(self):
"""
Check access to lock and unlock pofile in a component of a private project.
"""
URL = reverse('resource_language_lock', kwargs={'project_slug':self.project_private.slug,
'resource_slug': self.resource_private.slug,
'language_code': self.language.code} )
# POST Requests
for user in ['anonymous']:
# the redirect works for the login page but we get 200 status? how
# come?? XXX! FIXME
response = self.client[user].post(URL, follow=True)
self.failUnlessEqual(response.status_code, 200)
for user in ['registered']:
# Anonymous and registered user should not have access to lock the files!
response = self.client[user].post(URL, follow=True)
self.failUnlessEqual(response.status_code, 403)
for user in ['maintainer', 'team_coordinator', 'team_member']: #'writer',
response = self.client[user].post(URL, follow=True)
self.failUnlessEqual(response.status_code, 200)
URL = reverse('resource_language_lock', kwargs={'project_slug':self.project_private.slug,
'resource_slug': self.resource_private.slug,
'language_code': self.language_ar.code})
for user in ['anonymous']: #, 'writer'
# the redirect works for the login page but we get 200 status? how
# come?? XXX! FIXME
response = self.client[user].post(URL, follow=True)
self.failUnlessEqual(response.status_code, 200)
# Why do team_co && team_member return 200? XXX ! FIXME
for user in ['registered']:# 'team_coordinator','team_member'
response = self.client[user].post(URL, follow=True)
self.failUnlessEqual(response.status_code, 403)
for user in ['maintainer']: #, 'writer'
response = self.client[user].post(URL, follow=True)
self.failUnlessEqual(response.status_code, 200)
def test_watch_unwatch_file(self):
"""
Check access to watch/unwatch file in a component of a private project.
"""
from notification.models import NoticeType
URL = reverse('resource_translation_toggle_watch',
kwargs={ 'project_slug':self.project_private.slug,
'resource_slug': self.resource_private.slug,
'language_code': self.language.code })
# POST Requests
for user in ['anonymous']:
response = self.client[user].post(URL)
self.failUnlessEqual(response.status_code, 302)
for user in ['registered']:
response = self.client[user].post(URL, follow=True)
self.failUnlessEqual(response.status_code, 403)
for user in ['maintainer', 'team_coordinator', 'team_member']: #'writer',
response = self.client[user].post(URL, follow=True)
self.failUnlessEqual(response.status_code, 200)
URL = reverse('resource_language_lock', kwargs={'project_slug':self.project_private.slug,
'resource_slug': self.resource_private.slug,
'language_code': self.language_ar.code})
for user in ['anonymous']:
response = self.client[user].post(URL)
self.failUnlessEqual(response.status_code, 302)
# Why do team_co && team_member return 200? XXX ! FIXME
for user in ['registered']: # , 'team_coordinator', 'team_member'
# Anonymous and registered user should not have access to lock the files!
response = self.client[user].post(URL, follow=True)
self.failUnlessEqual(response.status_code, 403)
for user in ['maintainer']: # , 'writer'
response = self.client[user].post(URL, follow=True)
self.failUnlessEqual(response.status_code, 200)
def test_watch_unwatch_project(self):
"""
Check access to watch/unwatch project in a component of a private project.
"""
from notification.models import NoticeType
URL = reverse('project_toggle_watch',
kwargs={ 'project_slug':self.project_private.slug})
# POST Requests
for user in ['anonymous']:
response = self.client[user].post(URL)
self.failUnlessEqual(response.status_code, 302)
for user in ['registered']:
response = self.client[user].post(URL, follow=True)
self.failUnlessEqual(response.status_code, 403)
for user in ['maintainer', 'team_coordinator', 'team_member']: # 'writer',
response = self.client[user].post(URL, follow=True)
self.failUnlessEqual(response.status_code, 200)
def test_charts(self):
"""
Check access to component charts.
"""
# images and charts urls
URLs = [
reverse('chart_resource_image', kwargs={'project_slug':self.project_private.slug,
'resource_slug': self.resource_private.slug}),
reverse('chart_resource_js', kwargs={'project_slug':self.project_private.slug,
'resource_slug': self.resource_private.slug}),
reverse('chart_resource_html', kwargs={'project_slug':self.project_private.slug,
'resource_slug': self.resource_private.slug}),
reverse('chart_resource_json', kwargs={'project_slug':self.project_private.slug,
'resource_slug': self.resource_private.slug})
]
for user in ['anonymous', 'registered']:
for url in URLs:
response = self.client[user].get(url)
self.failUnlessEqual(response.status_code, 403)
# For now charts are disabled for private projects
for user in ['maintainer', 'writer', 'team_coordinator', 'team_member']:
for url in URLs:
response = self.client[user].get(url)
self.failUnlessEqual(response.status_code, 403)
def test_timeline(self):
"""
Check access to component charts.
"""
URL = reverse('project_timeline', kwargs={'project_slug':self.project_private.slug,})
# Only maintainers have access to the project timeline ???
for user in ['anonymous']:
response = self.client[user].get(URL)
self.failUnlessEqual(response.status_code, 302)
for user in ['registered', 'writer']:
response = self.client[user].get(URL)
self.failUnlessEqual(response.status_code, 403)
for user in ['maintainer', 'team_coordinator', 'team_member']:
response = self.client[user].get(URL)
self.failUnlessEqual(response.status_code, 200)
def test_public_to_private(self):
"""
Test the process of transforming a public project to private.
"""
pass
def test_private_to_public(self):
"""
Test the process of transforming a public project to private.
"""
pass
class ProjectLookupsTests(BaseTestCase):
def test_private_projects_ajax_lookup(self):
"""Test that a private project isn't present in lookups.
This AJAX lookup/dropdown is present in the Team Outsource form.
"""
public_project = "Test Project"
private_project = "Test Private Project"
# Test that a private project is not visible to a random user
self.assertTrue(self.user['registered'] not in self.project_private.maintainers.all())
resp = self.client['registered'].get('/ajax/ajax_lookup/projects', {'q': 'p', 'limit': '150', })
self.assertContains(resp, public_project, status_code=200)
self.assertNotContains(resp, private_project, status_code=200)
# Test that a private project is visible to its maintainer
self.assertTrue(self.user['maintainer'] in self.project_private.maintainers.all())
resp = self.client['maintainer'].get('/ajax/ajax_lookup/projects', {'q': 'p', 'limit': '150', })
self.assertContains(resp, public_project, status_code=200)
self.assertContains(resp, private_project, status_code=200)
# Test that a private project is visible to a member of its teams
self.assertTrue(self.user['team_member'] in self.team_private.members.all())
self.assertFalse(self.user['team_member'] in self.project_private.maintainers.all())
resp = self.client['team_member'].get('/ajax/ajax_lookup/projects', {'q': 'p', 'limit': '150', })
self.assertContains(resp, public_project, status_code=200)
self.assertContains(resp, private_project, status_code=200)
|
{
"content_hash": "be5a712260cec56dcd2172af7ac8c10d",
"timestamp": "",
"source": "github",
"line_count": 728,
"max_line_length": 107,
"avg_line_length": 46.71016483516483,
"alnum_prop": 0.5907954712542273,
"repo_name": "rvanlaar/easy-transifex",
"id": "2e8038af5b00376557bfffadfd2390a4b495b4ac",
"size": "34005",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/transifex/transifex/projects/tests/private_projects.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "105585"
},
{
"name": "HTML",
"bytes": "365175"
},
{
"name": "JavaScript",
"bytes": "187021"
},
{
"name": "Python",
"bytes": "2303001"
},
{
"name": "Shell",
"bytes": "1358"
}
],
"symlink_target": ""
}
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
from moocng.courses.models import Course
class Category(models.Model):
name = models.CharField(
verbose_name=_(u'Name'), max_length=200,blank=False, null=False)
slug = models.SlugField(
verbose_name=_(u'Slug'), blank=False, null=False)
icon = models.ImageField(
verbose_name=_(u'Category\'s icon'), upload_to='category_icons',
blank=True, null=True,
help_text=_('The image dimensions should be 140px x 140px'))
banner = models.ImageField(
verbose_name=_(u'Background banner for the category\'s header'),
upload_to='category_icons', blank=True, null=True,
help_text=_('The image dimensions should be 940px x 240px'))
courses = models.ManyToManyField(
Course, related_name='categories', verbose_name=_(u'Courses'),
blank=True, null=True)
only_admins = models.BooleanField(
verbose_name=_(u'Only administrators can asign this category'),
default=False)
class Meta:
verbose_name = _(u'category')
verbose_name_plural = _(u'categories')
def __unicode__(self):
return self.name
|
{
"content_hash": "97f745a4ac23f74087bac351d3128031",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 73,
"avg_line_length": 36.81818181818182,
"alnum_prop": 0.6559670781893004,
"repo_name": "OpenMOOC/moocng",
"id": "dfb8f372f97036ae54cfc83a3bbc1487e72e44c8",
"size": "1215",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "moocng/categories/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "38819"
},
{
"name": "JavaScript",
"bytes": "2252503"
},
{
"name": "Python",
"bytes": "1484124"
}
],
"symlink_target": ""
}
|
import os
for module in os.listdir(os.path.dirname(__file__)):
if module == '__init__.py' or module[-3:] != '.py':
continue
__import__(module[:-3], locals(), globals())
del module
del os
|
{
"content_hash": "a8a391c46aed59298d2ad6ad0fc23920",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 53,
"avg_line_length": 27.857142857142858,
"alnum_prop": 0.6051282051282051,
"repo_name": "scommab/can-opener",
"id": "e9ed46da036efa4cf6b876d4fbf3d3ccd099db70",
"size": "249",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "openers/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "5512"
}
],
"symlink_target": ""
}
|
import uuid
from datetime import datetime
from framework.sessions import session, create_session, Session
from modularodm import Q
from framework import bcrypt
from framework.auth import signals
from framework.auth.exceptions import DuplicateEmailError
from .core import User, Auth
from .core import get_user
__all__ = [
'get_display_name',
'Auth',
'User',
'get_user',
'check_password',
'authenticate',
'logout',
'register_unconfirmed',
]
def get_display_name(username):
"""Return the username to display in the navbar. Shortens long usernames."""
if len(username) > 40:
return '%s...%s' % (username[:20].strip(), username[-15:].strip())
return username
# check_password(actual_pw_hash, given_password) -> Boolean
check_password = bcrypt.check_password_hash
def authenticate(user, access_token, response):
data = session.data if session._get_current_object() else {}
data.update({
'auth_user_username': user.username,
'auth_user_id': user._primary_key,
'auth_user_fullname': user.fullname,
'auth_user_access_token': access_token,
})
user.date_last_login = datetime.utcnow()
user.clean_email_verifications()
user.update_affiliated_institutions_by_email_domain()
user.save()
response = create_session(response, data=data)
return response
def logout():
for key in ['auth_user_username', 'auth_user_id', 'auth_user_fullname', 'auth_user_access_token']:
try:
del session.data[key]
except KeyError:
pass
Session.remove(Q('_id', 'eq', session._id))
return True
def register_unconfirmed(username, password, fullname, campaign=None):
user = get_user(email=username)
if not user:
user = User.create_unconfirmed(
username=username,
password=password,
fullname=fullname,
campaign=campaign,
)
user.save()
signals.unconfirmed_user_created.send(user)
elif not user.is_registered: # User is in db but not registered
user.add_unconfirmed_email(username)
user.set_password(password)
user.fullname = fullname
user.update_guessed_names()
user.save()
else:
raise DuplicateEmailError('User {0!r} already exists'.format(username))
return user
def get_or_create_user(fullname, address, is_spam=False):
"""Get or create user by email address.
:param str fullname: User full name
:param str address: User email address
:param bool is_spam: User flagged as potential spam
:return: Tuple of (user, created)
"""
user = get_user(email=address)
if user:
return user, False
else:
from website import security # Avoid circular imports
password = str(uuid.uuid4())
user = User.create_confirmed(address, password, fullname)
user.verification_key = security.random_string(20)
if is_spam:
user.system_tags.append('is_spam')
return user, True
|
{
"content_hash": "50d4fc108be9c9eca41f610be47f79fc",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 102,
"avg_line_length": 29.59223300970874,
"alnum_prop": 0.651246719160105,
"repo_name": "zamattiac/osf.io",
"id": "b5c36b566b3cd3aac52606c8ef4effe96fbfe90c",
"size": "3073",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "framework/auth/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "158060"
},
{
"name": "HTML",
"bytes": "110361"
},
{
"name": "JavaScript",
"bytes": "1621216"
},
{
"name": "Mako",
"bytes": "669918"
},
{
"name": "Perl",
"bytes": "13885"
},
{
"name": "Python",
"bytes": "5400704"
}
],
"symlink_target": ""
}
|
"""
Unittests for graph.algorithms.sorting
"""
import unittest
import pygraph.classes
from pygraph.algorithms.sorting import topological_sorting
from pygraph.algorithms.searching import depth_first_search
from sys import getrecursionlimit
import testlib
class test_topological_sorting(unittest.TestCase):
def test_topological_sorting_on_tree(self):
gr = testlib.new_graph()
st, pre, post = depth_first_search(gr)
tree = pygraph.classes.digraph.digraph()
for each in st:
if st[each]:
if (each not in tree.nodes()):
tree.add_node(each)
if (st[each] not in tree.nodes()):
tree.add_node(st[each])
tree.add_edge((st[each], each))
ts = topological_sorting(tree)
for each in ts:
if (st[each]):
assert ts.index(each) > ts.index(st[each])
def test_topological_sorting_on_digraph(self):
def is_ordered(node, list):
# Has parent on list
for each in list:
if gr.has_edge((each, node)):
return True
# Has no possible ancestors on list
st, pre, post = depth_first_search(gr, node)
for each in list:
if (each in st):
return False
return True
gr = testlib.new_digraph()
ts = topological_sorting(gr)
while (ts):
x = ts.pop()
assert is_ordered(x, ts)
def test_topological_sort_on_very_deep_graph(self):
gr = pygraph.classes.graph.graph()
gr.add_nodes(range(0,20001))
for i in range(0,20000):
gr.add_edge((i,i+1))
recursionlimit = getrecursionlimit()
topological_sorting(gr)
assert getrecursionlimit() == recursionlimit
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "dd0b52821937c6a102285a4ae3fc12a3",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 59,
"avg_line_length": 29.833333333333332,
"alnum_prop": 0.5449466734382935,
"repo_name": "wdv4758h/ZipPy",
"id": "24bedbcf06372204cbd69b887fd3a4902bdf7672",
"size": "3085",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "edu.uci.python.benchmark/src/benchmarks/python-graph/tests/unittests-sorting.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "9447"
},
{
"name": "C",
"bytes": "106932"
},
{
"name": "CSS",
"bytes": "32004"
},
{
"name": "Groff",
"bytes": "27753"
},
{
"name": "HTML",
"bytes": "721863"
},
{
"name": "Java",
"bytes": "1550721"
},
{
"name": "JavaScript",
"bytes": "10581"
},
{
"name": "Makefile",
"bytes": "16156"
},
{
"name": "PLSQL",
"bytes": "22886"
},
{
"name": "Python",
"bytes": "33672733"
},
{
"name": "R",
"bytes": "1959"
},
{
"name": "Ruby",
"bytes": "304"
},
{
"name": "Scheme",
"bytes": "125"
},
{
"name": "Shell",
"bytes": "3119"
},
{
"name": "Tcl",
"bytes": "1048"
},
{
"name": "TeX",
"bytes": "8790"
},
{
"name": "Visual Basic",
"bytes": "481"
},
{
"name": "XSLT",
"bytes": "366202"
}
],
"symlink_target": ""
}
|
"""
Window functions used e.g. for FFTs
"""
import numpy as np
__all__ = ["WindowFunctor", "create_window",
"create_and_apply_window"]
# Predefined windows
def create_window(size, window_id="blackman", param=None):
"""
Create a new window numpy array
param is only used for some windows.
window_id can also be a function/functor which
is used to create the window.
>>> create_window("blackman", 500)
... # NumPy array of size 500
>>> create_window(myfunc, 500, param=3.5)
... # result of calling myfunc(500, 3.5)
"""
if window_id == "blackman":
return np.blackman(size)
elif window_id == "bartlett":
return np.bartlett(size)
elif window_id == "hamming":
return np.hamming(size)
elif window_id == "hanning":
return np.hanning(size)
elif window_id == "kaiser":
return np.kaiser(size, 2.0 if param is None else param)
elif window_id in ["ones", "none"]:
return np.ones(size)
elif callable(window_id):
return window_id(size, param)
else:
raise ValueError(f"Unknown window {window_id}")
def create_and_apply_window(data, window_id="blackman", param=None, inplace=False):
"""
Create a window suitable for data, multiply it with
data and return the result
Parameters
----------
data : numpy array-like
The data to use. Must be 1D.
window_id : string or functor
The name of the window to use.
See create_window() documentation
param : number or None
The parameter used for certain windows.
See create_window() documentation
inplace : bool
If True, data is modified in-place
If False, data is not modified.
"""
window = create_window(len(data), window_id, param)
if inplace:
data *= window
return data
else:
return data * window
class WindowFunctor(object):
"""
Initialize a window functor that initializes
"""
def __init__(self, size, window_id="blackman", param=None):
"""
Create a new WindowFunctor.
__init__ initialized the window array
window_id : string or functor
The name of the window to use.
See create_window() documentation
param : number or None
The parameter used for certain windows.
See create_window() documentation
"""
self.size = size
self.window = create_window(size, window_id, param=param)
def __len__(self):
return self.size
def __call__(self, data, inplace=False):
"""
Apply this window to a data array.
Parameters
----------
data : numpy array-like
The data to apply the window to.
The length of data must match self.size.
This is verified.
inplace : bool
If True, data is modified in-place
If False, data is not modified.
"""
if len(data) != self.size:
raise ValueError(f"Data size {len(data)} does not match WindowFunctor size {self.size}")
# Apply
if inplace:
data *= self.window
return data
else:
return data * self.window
|
{
"content_hash": "1f6669c5f5f4890f26169fd00ff36cdd",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 100,
"avg_line_length": 29.07079646017699,
"alnum_prop": 0.5863013698630137,
"repo_name": "ulikoehler/UliEngineering",
"id": "ea34fce4b24f6ebc9242e828bb73152cd2aa4eef",
"size": "3332",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "UliEngineering/SignalProcessing/Window.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "396308"
}
],
"symlink_target": ""
}
|
from msrest.serialization import Model
class VirtualMachineSize(Model):
"""Describes the properties of a VM size.
:param name: The name of the virtual machine size.
:type name: str
:param number_of_cores: The number of cores supported by the virtual
machine size.
:type number_of_cores: int
:param os_disk_size_in_mb: The OS disk size, in MB, allowed by the virtual
machine size.
:type os_disk_size_in_mb: int
:param resource_disk_size_in_mb: The resource disk size, in MB, allowed by
the virtual machine size.
:type resource_disk_size_in_mb: int
:param memory_in_mb: The amount of memory, in MB, supported by the virtual
machine size.
:type memory_in_mb: int
:param max_data_disk_count: The maximum number of data disks that can be
attached to the virtual machine size.
:type max_data_disk_count: int
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'number_of_cores': {'key': 'numberOfCores', 'type': 'int'},
'os_disk_size_in_mb': {'key': 'osDiskSizeInMB', 'type': 'int'},
'resource_disk_size_in_mb': {'key': 'resourceDiskSizeInMB', 'type': 'int'},
'memory_in_mb': {'key': 'memoryInMB', 'type': 'int'},
'max_data_disk_count': {'key': 'maxDataDiskCount', 'type': 'int'},
}
def __init__(self, **kwargs):
super(VirtualMachineSize, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.number_of_cores = kwargs.get('number_of_cores', None)
self.os_disk_size_in_mb = kwargs.get('os_disk_size_in_mb', None)
self.resource_disk_size_in_mb = kwargs.get('resource_disk_size_in_mb', None)
self.memory_in_mb = kwargs.get('memory_in_mb', None)
self.max_data_disk_count = kwargs.get('max_data_disk_count', None)
|
{
"content_hash": "9c84fae4ca5cb839ebfb2bb850a2e891",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 84,
"avg_line_length": 43.54761904761905,
"alnum_prop": 0.63039912520503,
"repo_name": "lmazuel/azure-sdk-for-python",
"id": "0fef3efd6c8ea6f1cbdd3b2eb966230ef071e22e",
"size": "2303",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "azure-mgmt-compute/azure/mgmt/compute/v2016_04_30_preview/models/virtual_machine_size.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42572767"
}
],
"symlink_target": ""
}
|
"""Tiles should be held by players in the game and are worth points.
Tiles can be one of three types: Tower, Tea or Palace. Palace tiles are
awarded for building the largest of a building color. Tower tiles are
awarded for building a building connected to a wall from a tower. Tea tiles
are awarded for claiming purple buildings.
If a tile is a tower tile, it will also hold merchants for the first player
who claims the tile.
Tiles are saved as a dictionary of either two or three elements.
type: Type of tile, Tea, Tower, or Palace.
value: Point value of the tile.
merchants: Only for Tower tiles, the number of merchants left on the
tile for players to pickup.
>>> tile = make_tile(TEA_TILE)
>>> get_tile_type(tile)
'TEA'
>>> get_tile_value(tile)
0
"""
from GameConstants import *
TEA_TILE = 'TEA'
TOWER_TILE = 'TOW'
PALACE_TILE = 'PAL'
PALACE_VALUES = {BUILDINGS_COLORS[i]:i+1 for i in range(4)}
PALACE_COLORS = {i+1:BUILDINGS_COLORS[i] for i in range(4)}
def get_all_tiles():
"""Gets a new set of tiles for a game"""
return get_tower_tiles() + get_palace_tiles() + get_tea_tiles()
def get_tower_tiles():
"""Gets all the tower tiles in the game"""
return [make_tile(TOWER_TILE, value+1) for value in range(4)]
def get_palace_tiles():
"""Gets all the palace tiles in the game"""
return [make_tile(PALACE_TILE, value+1) for value in range(4)]
def get_tea_tiles():
"""Gets all the tea tiles in the game"""
return [make_tile(TEA_TILE) for i in range(6)]
def get_tile_type(tile):
"""Gets the type of a tile"""
return tile['type']
def get_tile_value(tile):
"""Gets the value of a tile"""
return tile['value']
def get_num_merchants(tile):
"""Gets the number of merchants on a tile. If the tile does not have a
merchants field, this will raise an exception.
>>> tile = make_tile(TOWER_TILE, 3)
>>> get_tile_type(tile)
'TOW'
>>> get_tile_value(tile)
3
>>> get_num_merchants(tile)
1
"""
assert 'merchants' in tile
return tile['merchants']
def get_palace_tile_color(tile):
"""If a tile is a palace tile, this will get the string corresponding to
the name of the color as defined in BUILDINGS_COLORS."""
assert get_tile_type(tile) == PALACE_TILE
return BUILDINGS_COLORS[get_tile_value(tile) - 1]
def take_merchants(tile):
"""If the tile has merchants, this method will return the number of
merchants then set the number of merchants on the tile to zero."""
num = get_num_merchants(tile)
tile['merchants'] = 0
return num
def clone_tile(tile):
"""Clones a tile"""
return tile.copy()
def make_tile(tile_type, value=0):
"""This function will make and return a tile of a given type and score. If
the type is merchant, merchants will be added to the tile."""
tile = dict()
tile['type'] = tile_type
tile['value'] = value
if tile_type == TOWER_TILE:
tile['merchants'] = 4 - value
return tile
|
{
"content_hash": "be706b9e7752547d0953dc80bc172900",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 78,
"avg_line_length": 31.08080808080808,
"alnum_prop": 0.650633734156646,
"repo_name": "nicholas-maltbie/Medina",
"id": "629869da69fa1a2a4d2267c9bfe609624dc53673",
"size": "3077",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Tile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "180812"
}
],
"symlink_target": ""
}
|
"""
test_sanity_codec.py
sanity test for CodecService and CodecServiceProvider
"""
import unittest
try:
from ydk.models.ydktest.ydktest_sanity import Runner, Native, YdkEnumTest
from ydk.models.ydktest.openconfig_routing_policy import RoutingPolicy
from ydk.models.ydktest.oc_pattern import OcA
from ydk.models.ydktest.ydktest_sanity_typedefs import System, TopMode
except ImportError:
from ydk.models.ydktest.ydktest_sanity.runner.runner import Runner
from ydk.models.ydktest.ydktest_sanity.native.native import Native
from ydk.models.ydktest.ydktest_sanity.ydktest_sanity import YdkEnumTest
from ydk.models.ydktest import openconfig_routing_policy
from ydk.models.ydktest.openconfig_routing_policy.routing_policy.routing_policy import RoutingPolicy
from ydk.models.ydktest.oc_pattern.oc_a.oc_a import OcA
from ydk.models.ydktest.ydktest_sanity_typedefs.system.system import System
from ydk.models.ydktest.ydktest_sanity_typedefs.ydktest_sanity_typedefs import TopMode
from ydk.providers import CodecServiceProvider
from ydk.services import CodecService
from ydk.errors import YServiceError
from ydk.types import EncodingFormat
from test_utils import assert_with_error
_xml_enum_payload_1 = '''<built-in-t xmlns="http://cisco.com/ns/yang/ydktest-sanity">
<enum-value>local</enum-value>
</built-in-t>
'''
_json_enum_payload_1 = """{
"ydktest-sanity:built-in-t": {
"enum-value": "local"
}
}
"""
_xml_enum_payload_2 = '''<runner xmlns="http://cisco.com/ns/yang/ydktest-sanity">
<ytypes>
<built-in-t>
<enum-value>local</enum-value>
</built-in-t>
</ytypes>
</runner>
'''
_xml_runner_payload = '''<runner xmlns="http://cisco.com/ns/yang/ydktest-sanity">
<two-list>
<ldata>
<number>21</number>
<name>runner:twolist:ldata[21]:name</name>
<subl1>
<number>211</number>
<name>runner:twolist:ldata[21]:subl1[211]:name</name>
</subl1>
<subl1>
<number>212</number>
<name>runner:twolist:ldata[21]:subl1[212]:name</name>
</subl1>
</ldata>
<ldata>
<number>22</number>
<name>runner:twolist:ldata[22]:name</name>
<subl1>
<number>221</number>
<name>runner:twolist:ldata[22]:subl1[221]:name</name>
</subl1>
<subl1>
<number>222</number>
<name>runner:twolist:ldata[22]:subl1[222]:name</name>
</subl1>
</ldata>
</two-list>
</runner>
'''
_json_runner_payload = """{
"ydktest-sanity:runner": {
"two-list": {
"ldata": [
{
"number": 21,
"name": "runner:twolist:ldata[21]:name",
"subl1": [
{
"number": 211,
"name": "runner:twolist:ldata[21]:subl1[211]:name"
},
{
"number": 212,
"name": "runner:twolist:ldata[21]:subl1[212]:name"
}
]
},
{
"number": 22,
"name": "runner:twolist:ldata[22]:name",
"subl1": [
{
"number": 221,
"name": "runner:twolist:ldata[22]:subl1[221]:name"
},
{
"number": 222,
"name": "runner:twolist:ldata[22]:subl1[222]:name"
}
]
}
]
}
}
}
"""
_xml_oc_pattern_payload = '''<oc-A xmlns="http://cisco.com/ns/yang/oc-pattern">
<a>Hello</a>
</oc-A>
'''
_json_oc_pattern_payload = """{
"oc-pattern:oc-A": [
{
"a": "Hello",
"B": {
"b": "Hello"
}
}
]
}"""
def _get_runner_entity():
r_1 = Runner()
e_1, e_2 = Runner.TwoList.Ldata(), Runner.TwoList.Ldata()
e_11, e_12 = Runner.TwoList.Ldata.Subl1(), Runner.TwoList.Ldata.Subl1()
e_1.number = 21
e_1.name = 'runner:twolist:ldata[' + str(e_1.number) + ']:name'
e_11.number = 211
e_11.name = 'runner:twolist:ldata[' + str(e_1.number) + ']:subl1[' + str(e_11.number) + ']:name'
e_12.number = 212
e_12.name = 'runner:twolist:ldata[' + str(e_1.number) + ']:subl1[' + str(e_12.number) + ']:name'
e_1.subl1.extend([e_11, e_12])
e_21, e_22 = Runner.TwoList.Ldata.Subl1(), Runner.TwoList.Ldata.Subl1()
e_2.number = 22
e_2.name = 'runner:twolist:ldata[' + str(e_2.number) + ']:name'
e_21.number = 221
e_21.name = 'runner:twolist:ldata[' + str(e_2.number) + ']:subl1[' + str(e_21.number) + ']:name'
e_22.number = 222
e_22.name = 'runner:twolist:ldata[' + str(e_2.number) + ']:subl1[' + str(e_22.number) + ']:name'
e_2.subl1.extend([e_21, e_22])
r_1.two_list.ldata.extend([e_1, e_2])
return r_1
class SanityYang(unittest.TestCase):
@classmethod
def setUpClass(self):
self.maxDiff = None
self.codec = CodecService()
self.provider = CodecServiceProvider(type='xml')
self.json_provider = CodecServiceProvider(type='json')
def test_xml_encode_1(self):
r_1 = _get_runner_entity()
payload = self.codec.encode(self.provider, r_1)
self.assertEqual(_xml_runner_payload, payload)
ldata_list = r_1.two_list.ldata
ldata_keys = ldata_list.keys()
self.assertEqual(ldata_keys, ['21', '22'])
for lkey in ldata_keys:
ldata = ldata_list[lkey]
self.assertNotEqual(ldata, None)
ldata_21 = ldata_list['21']
self.assertEqual(ldata_21.number, 21)
self.assertEqual(ldata_21.name, 'runner:twolist:ldata[21]:name')
subl_21 = ldata_21.subl1
subl_21_keys = subl_21.keys()
self.assertEqual(subl_21_keys, ['211', '212'])
subl_21_212 = subl_21['212']
self.assertEqual(subl_21_212.name, 'runner:twolist:ldata[21]:subl1[212]:name')
def test_xml_encode_2(self):
r_1 = Runner.Ytypes.BuiltInT()
r_1.enum_value = YdkEnumTest.local
payload = self.codec.encode(self.provider, r_1)
self.assertEqual(_xml_enum_payload_1, payload)
@assert_with_error("'provider' and 'entity_holder' cannot be None", YServiceError)
def test_encode_invalid_1(self):
self.codec.encode(self.provider, None)
@assert_with_error("'provider' and 'entity_holder' cannot be None", YServiceError)
def test_encode_invalid_2(self):
self.codec.encode(None, _get_runner_entity())
@assert_with_error("'provider' and 'entity_holder' cannot be None", YServiceError)
def test_encode_invalid_3(self):
self.codec.encode(None, None)
def test_xml_decode_1(self):
entity = self.codec.decode(self.provider, _xml_enum_payload_2)
self.assertEqual(
_xml_enum_payload_2, self.codec.encode(self.provider, entity))
@assert_with_error("'provider' and 'payload_holder' cannot be None", YServiceError)
def test_decode_invalid_1(self):
self.codec.decode(None, _xml_enum_payload_2)
@assert_with_error("'provider' and 'payload_holder' cannot be None", YServiceError)
def test_decode_invalid_2(self):
self.codec.decode(self.provider, None)
@assert_with_error("'provider' and 'payload_holder' cannot be None", YServiceError)
def test_decode_invalid_3(self):
self.codec.decode(None, None)
def test_xml_encode_decode(self):
r_1 = _get_runner_entity()
payload = self.codec.encode(self.provider, r_1)
entity = self.codec.decode(self.provider, payload)
self.assertEqual(r_1, entity)
self.assertEqual(payload, self.codec.encode(self.provider, entity))
def test_xml_encode_decode_dict(self):
r_1 = _get_runner_entity()
r_entity = {'ydktest-sanity': r_1}
payload = self.codec.encode(self.provider, r_entity)
entity = self.codec.decode(self.provider, payload)
for module in entity:
self.assertEqual(r_entity[module], entity[module])
self.assertEqual(payload, self.codec.encode(self.provider, entity))
def test_xml_decode_oc_pattern(self):
obj_a = OcA()
obj_a.a = 'Hello'
entity = self.codec.decode(self.provider, _xml_oc_pattern_payload)
self.assertEqual(obj_a.a, entity.a)
# JSON
def test_json_encode_1(self):
json_provider = CodecServiceProvider(type='json')
entity = _get_runner_entity()
payload = self.codec.encode(json_provider, entity)
self.assertEqual(_json_runner_payload, payload)
def test_json_encode_2(self):
r_1 = Runner.Ytypes.BuiltInT()
r_1.enum_value = YdkEnumTest.local
payload = self.codec.encode(self.json_provider, r_1)
self.assertEqual(_json_enum_payload_1, payload)
def test_json_decode_1(self):
entity = self.codec.decode(self.json_provider, _json_runner_payload)
payload = self.codec.encode(self.json_provider, entity)
self.assertEqual(_json_runner_payload, payload)
def test_json_encode_decode(self):
runner = _get_runner_entity()
payload = self.codec.encode(self.json_provider, runner)
entity = self.codec.decode(self.json_provider, payload)
self.assertEqual(runner, entity)
self.assertEqual(payload, self.codec.encode(self.json_provider, entity))
def test_json_encode_decode_dict(self):
entity = _get_runner_entity()
entity_holder = {'ydktest-sanity': entity}
payload_holder = self.codec.encode(self.json_provider, entity_holder)
entities = self.codec.decode(self.json_provider, payload_holder)
for module in entities:
self.assertEqual(entities[module], entities[module])
self.assertEqual(payload_holder[module],
self.codec.encode(self.json_provider, entities[module]))
@unittest.skip('encodes to "oc-pattern:a": "(!error!)"')
def test_json_encode_oc_pattern(self):
obj_a = OcA()
obj_a.a = 'Hello'
obj_a.b.b = 'Hello'
self.assertEqual(self.codec.encode(self.json_provider, obj_a),
_json_oc_pattern_payload)
@unittest.skip('YCoreError: YCodecError:Unknown element "oc-A".. Path:')
def test_json_decode_oc_pattern(self):
entity = self.codec.decode(self.json_provider, _json_oc_pattern_payload)
self.assertEqual(entity.a.get(), 'Hello')
self.assertEqual(entity.b.b.get(), 'Hello')
def test_xml_subtree(self):
r_1 = _get_runner_entity()
payload = self.codec.encode(self.provider, r_1, subtree=True)
self.assertEqual(_xml_runner_payload[:-1], payload)
r_2 = self.codec.decode(self.provider, payload, subtree=True)
self.assertEqual(r_1, r_2)
def test_embedded_quote_leaflist_value(self):
"""<routing-policy xmlns="http://openconfig.net/yang/routing-policy">
<defined-sets>
<bgp-defined-sets xmlns="http://openconfig.net/yang/bgp-policy">
<community-sets>
<community-set>
<community-set-name>COMMUNITY-SET1</community-set-name>
<config>
<community-set-name>COMMUNITY-SET1</community-set-name>
<community-member>ios-regex '^65172:17...$'</community-member>
<community-member>65172:16001</community-member>
</config>
<state>
<community-set-name>COMMUNITY-SET1</community-set-name>
<community-member>ios-regex '^65172:17...$'</community-member>
<community-member>65172:16001</community-member>
</state>
</community-set>
</community-sets>
</bgp-defined-sets>
</defined-sets>
</routing-policy>
"""
routing_policy = RoutingPolicy()
com = RoutingPolicy.DefinedSets.BgpDefinedSets.CommunitySets.CommunitySet()
com.community_set_name = "COMMUNITY-SET1"
com.config.community_set_name = "COMMUNITY-SET1"
com.config.community_member.append("ios-regex '^65172:17...$'")
com.config.community_member.append("65172:16001")
com.state.community_set_name = "COMMUNITY-SET1"
com.state.community_member.append("ios-regex '^65172:17...$'")
com.state.community_member.append("65172:16001")
routing_policy.defined_sets.bgp_defined_sets.community_sets.community_set.append(com)
xml_provider = CodecServiceProvider(type='xml')
payload = self.codec.encode(xml_provider, routing_policy)
routing_policy_decode = self.codec.decode(xml_provider, payload)
if routing_policy == routing_policy_decode: # TODO failing on travis for --one-module-per-class option
self.assertEqual(routing_policy, routing_policy_decode)
def test_list_no_keys(self):
payload = '''<runner xmlns="http://cisco.com/ns/yang/ydktest-sanity">
<no-key-list>
<test>abc</test>
</no-key-list>
<no-key-list>
<test>xyz</test>
</no-key-list>
</runner>'''
xml_provider = CodecServiceProvider(type='xml')
no_key = self.codec.decode(xml_provider, payload)
no_key_payload = self.codec.encode(xml_provider, no_key, subtree=True)
self.assertEqual(payload, no_key_payload)
def test_anyxml(self):
payload = '<?xml version="1.0"?><runner xmlns="http://cisco.com/ns/yang/ydktest-sanity"/>'
result = self.codec.decode(self.provider, payload)
self.assertIsNotNone(result)
payload = '''<?xml version="1.0"?>
<runner xmlns="http://cisco.com/ns/yang/ydktest-sanity"/>'''
result = self.codec.decode(self.provider, payload)
self.assertIsNotNone(result)
def test_decode_json_subtree(self):
entity = self.codec.decode(self.json_provider, '{"ydktest-sanity:runner": null}', subtree=True)
self.assertEqual(Runner(), entity)
def test_encode_json_subtree_(self):
json = self.codec.encode(self.json_provider, Runner(), pretty=False, subtree=True)
expected = '''{"ydktest-sanity:runner":null}'''
self.assertEqual(expected, json)
def test_encode_decode_typedefs(self):
system_encode = System()
system_encode.mode = TopMode.stand_alone
system_encode.id = 22
payload = self.codec.encode(self.provider, system_encode)
system_decode = self.codec.decode(self.provider, payload)
self.assertEqual(system_encode, system_decode)
def test_encode_decode_list(self):
runner = Runner()
runner.two.number = 2
native = Native()
native.version = '0.1.0'
xml_encode = self.codec.encode(self.provider, [runner, native])
entity_list = self.codec.decode(self.provider, xml_encode)
self.assertEqual(entity_list, [runner, native])
def test_codec_json(self):
runner = Runner()
runner.two.number = 2
native = Native()
native.version = '0.1.0'
json_encode = self.codec.encode(self.json_provider, [runner, native])
entity_list = self.codec.decode(self.json_provider, json_encode)
self.assertEqual(entity_list, [runner, native])
def test_passive_codec(self):
e='''<runner xmlns="http://cisco.com/ns/yang/ydktest-sanity"><passive><name>xyz</name><interfac><test>abc</test></interfac><testc xmlns="http://cisco.com/ns/yang/ydktest-sanity-augm"><xyz><xyz>25</xyz></xyz></testc></passive></runner>'''
runner = Runner()
p = runner.Passive()
p.name = "xyz"
i = runner.Passive.Interfac()
i.test = "abc"
p.interfac.append(i)
p.testc.xyz = runner.Passive.Testc.Xyz()
p.testc.xyz.xyz = 25
runner.passive.append(p)
x = self.codec.encode(self.provider, runner, False)
self.assertEqual(x, e)
def test_augment_subtree(self):
passive = Runner.Passive()
passive.name = "xyz"
ifc = Runner.Passive.Interfac()
ifc.test = "abc"
passive.interfac.append(ifc)
passive.testc.xyz = Runner.Passive.Testc.Xyz()
passive.testc.xyz.parent = passive;
passive.testc.xyz.xyz = 25
xml = self.codec.encode(self.provider, passive, subtree=True);
expected = '''<passive xmlns="http://cisco.com/ns/yang/ydktest-sanity">
<name>xyz</name>
<interfac>
<test>abc</test>
</interfac>
<testc xmlns="http://cisco.com/ns/yang/ydktest-sanity-augm">
<xyz>
<xyz>25</xyz>
</xyz>
</testc>
</passive>'''
self.assertEqual(expected, xml)
def test_bool_lists(self):
r = Runner()
r.ytypes.built_in_t.bool_leaf_list.append(True)
r.ytypes.built_in_t.bool_leaf_list.append(False)
bool_list_elem = Runner.Ytypes.BuiltInT.BoolList()
bool_list_elem.bool_leaf = True
r.ytypes.built_in_t.bool_list.append(bool_list_elem)
xml = self.codec.encode(self.provider, r)
expected = '''<runner xmlns="http://cisco.com/ns/yang/ydktest-sanity">
<ytypes>
<built-in-t>
<bool-leaf-list>true</bool-leaf-list>
<bool-leaf-list>false</bool-leaf-list>
<bool-list>
<bool-leaf>true</bool-leaf>
</bool-list>
</built-in-t>
</ytypes>
</runner>
'''
self.assertEqual(expected, xml)
entity = self.codec.decode(self.provider, xml)
self.assertEqual(entity, r)
def test_bool_lists_json(self):
r = Runner()
r.ytypes.built_in_t.bool_leaf_list.append(True)
r.ytypes.built_in_t.bool_leaf_list.append(False)
bool_list_elem = Runner.Ytypes.BuiltInT.BoolList()
bool_list_elem.bool_leaf = True
r.ytypes.built_in_t.bool_list.append(bool_list_elem)
json = self.codec.encode(self.json_provider, r)
expected = '''{
"ydktest-sanity:runner": {
"ytypes": {
"built-in-t": {
"bool-leaf-list": [
true,
false
],
"bool-list": [
{
"bool-leaf": true
}
]
}
}
}
}
'''
self.assertEqual(expected, json)
entity = self.codec.decode(self.json_provider, json)
self.assertEqual(entity, r)
def test_json_string_empty_value(self):
r = Runner()
r.ytypes.built_in_t.name = ""
json = self.codec.encode(self.json_provider, r, pretty=False)
expected = '''{"ydktest-sanity:runner":{"ytypes":{"built-in-t":{"name":""}}}}'''
self.assertEqual(expected, json)
entity = self.codec.decode(self.json_provider, json)
self.assertEqual(entity, r)
if __name__ == '__main__':
import sys
suite = unittest.TestLoader().loadTestsFromTestCase(SanityYang)
ret = not unittest.TextTestRunner(verbosity=2).run(suite).wasSuccessful()
sys.exit(ret)
|
{
"content_hash": "28bbdb36287c82bb4e3e4be4456417c4",
"timestamp": "",
"source": "github",
"line_count": 539,
"max_line_length": 245,
"avg_line_length": 34.391465677179966,
"alnum_prop": 0.6171440901979824,
"repo_name": "CiscoDevNet/ydk-gen",
"id": "85f7e5c64ae9fec95665ed5e9b5e5156d31a69c7",
"size": "19549",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sdk/python/core/tests/test_sanity_codec.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "21945"
},
{
"name": "C",
"bytes": "15875"
},
{
"name": "C++",
"bytes": "3529963"
},
{
"name": "CMake",
"bytes": "120070"
},
{
"name": "CSS",
"bytes": "134"
},
{
"name": "Dockerfile",
"bytes": "770"
},
{
"name": "Go",
"bytes": "566728"
},
{
"name": "Makefile",
"bytes": "960022"
},
{
"name": "Python",
"bytes": "1052712"
},
{
"name": "Ruby",
"bytes": "4023"
},
{
"name": "Shell",
"bytes": "153786"
}
],
"symlink_target": ""
}
|
import attr
import pytest
from platform_alarms import (
get_human_message,
guess_cloudwatch_log_group,
guess_cloudwatch_search_terms,
is_critical_error,
simplify_message,
)
@attr.s
class Alarm:
name = attr.ib()
@pytest.mark.parametrize(
"alarm_name, expected_log_group_name",
[
("loris-alb-target-500-errors", "platform/loris"),
("loris-alb-not-enough-healthy-hosts", "platform/loris"),
("catalogue-api-remus-5xx-alarm", "ecs/catalogue_api_gw-remus"),
(
"lambda-ecs_ec2_instance_tagger-errors",
"/aws/lambda/ecs_ec2_instance_tagger",
),
("lambda-post_to_slack-errors", "/aws/lambda/post_to_slack"),
(
"lambda-reindex_shard_generator_vhs-sourcedata-sierra-errors",
"/aws/lambda/reindex_shard_generator_vhs-sourcedata-sierra",
),
],
)
def test_guess_cloudwatch_log_group(alarm_name, expected_log_group_name):
assert guess_cloudwatch_log_group(alarm_name) == expected_log_group_name
@pytest.mark.parametrize(
"bad_alarm_name",
["api_remus_v2-alb-target-500-errors", "winnipeg-not-enough-healthy-hosts"],
)
def test_unrecognised_log_group_name_is_valueerror(bad_alarm_name):
with pytest.raises(ValueError):
guess_cloudwatch_log_group(bad_alarm_name)
@pytest.mark.parametrize(
"alarm_name, expected_search_terms",
[
("loris-alb-target-500-errors", ['"HTTP/1.0 500"']),
("loris-alb-not-enough-healthy-hosts", []),
("catalogue-api-remus-5xx-alarm", ['"HTTP 500"']),
(
"lambda-ecs_ec2_instance_tagger-errors",
["Traceback", "Task timed out after"],
),
("lambda-post_to_slack-errors", ["Traceback", "Task timed out after"]),
],
)
def test_guess_cloudwatch_search_terms(alarm_name, expected_search_terms):
assert guess_cloudwatch_search_terms(alarm_name) == expected_search_terms
@pytest.mark.parametrize(
"alarm_name, expected_is_critical_error",
[
("catalogue-api-romulus-alb-target-400-errors", True),
("catalogue-api-remus-alb-target-500-errors", True),
("storage-api-5xx-alarm", True),
("loris-alb-not-enough-healthy-hosts", True),
("id_minter-alb-unhealthy-hosts", True),
("ingestor-alb-unhealthy-hosts", True),
("transformer-alb-not-enough-healthy-hosts", True),
("grafana-alb-target-500-errors", True),
("IngestorWorkerService_TerminalFailure", True),
("sierra_items_windows_dlq_not_empty", False),
("lambda-post_to_slack-errors", False),
("unknown-alarm-type", True),
],
)
def test_is_critical_error(alarm_name, expected_is_critical_error):
assert is_critical_error(alarm_name) == expected_is_critical_error
@pytest.mark.parametrize(
"message, expected",
[
# We correctly strip timestamp and thread information from Scala logs
(
"13:25:56.965 [ForkJoinPool-1-worker-61] ERROR u.a.w.p.a.f.e.ElasticsearchResponseExceptionMapper - Sending HTTP 500 from ElasticsearchResponseExceptionMapper (Error (com.fasterxml.jackson.core.JsonParseException: Unrecognized token ‘No’: was expecting ‘null’, ‘true’, ‘false’ or NaN",
"ERROR u.a.w.p.a.f.e.ElasticsearchResponseExceptionMapper - Sending HTTP 500 from ElasticsearchResponseExceptionMapper (Error (com.fasterxml.jackson.core.JsonParseException: Unrecognized token ‘No’: was expecting ‘null’, ‘true’, ‘false’ or NaN",
),
# We strip UWGSI and timestamp prefixes from Loris logs
(
"[pid: 88|app: 0|req: 1871/9531] 172.17.0.4 () {46 vars in 937 bytes} [Wed Oct 11 22:42:03 2017] GET //wordpress:2014/05/untitled3.png/full/320,/0/default.jpg (HTTP/1.0 500)",
"GET //wordpress:2014/05/untitled3.png/full/320,/0/default.jpg (HTTP/1.0 500)",
),
# We strip UWSGI suffixes from Loris logs
(
"GET //wordpress:2014/05/untitled2.png/full/320,/0/default.jpg (HTTP/1.0 500) 3 headers in 147 bytes (1 switches on core 0)",
"GET //wordpress:2014/05/untitled2.png/full/320,/0/default.jpg (HTTP/1.0 500)",
),
# We strip byte count and timings from Loris logs
(
"GET //s3:L0009000/L0009709.jpg/full/282,/0/default.jpg => generated 271 bytes in 988 msecs (HTTP/1.0 500)",
"GET //s3:L0009000/L0009709.jpg/full/282,/0/default.jpg (HTTP/1.0 500)",
),
# We strip the timestamp and Lambda ID from timeout errors
(
"2017-10-12T13:18:31.917Z d1fdfca5-af4f-11e7-a100-030f2a39c6f6 Task timed out after 10.01 seconds",
"Task timed out after 10.01 seconds",
),
],
)
def test_simplify_message(message, expected):
assert simplify_message(message) == expected
@pytest.mark.parametrize(
"alarm_name, state_reason, expected_message",
[
(
"sierra_items_windows_dlq_not_empty",
"Threshold Crossed: 1 datapoint [1.0 (01/01/01 12:00:00)] was greater than the threshold (0.0).",
"There is 1 item on the sierra_items_windows DLQ.",
),
(
"transformer_dlq_not_empty",
"Threshold Crossed: 1 datapoint [3.0 (01/01/01 12:00:00)] was greater than the threshold (0.0).",
"There are 3 items on the transformer DLQ.",
),
(
"id_minter_dlq_not_empty",
"Threshold Crossed: 1 datapoint [17.0 (01/01/01 12:00:36)] was greater than the threshold (0.0).",
"There are 17 items on the id_minter DLQ.",
),
(
"id_minter-alb-unhealthy-hosts",
"Threshold Crossed: 1 datapoint [1.0 (01/01/01 12:00:00)] was greater than the threshold (0.0).",
"There is 1 unhealthy target in the id_minter ALB target group.",
),
(
"loris-alb-unhealthy-hosts",
"Threshold Crossed: 1 datapoint [3.0 (01/01/01 12:00:00)] was greater than the threshold (0.0).",
"There are 3 unhealthy targets in the loris ALB target group.",
),
(
"api_romulus_v1-alb-not-enough-healthy-hosts",
"Threshold Crossed: 1 datapoint [0.0 (09/01/18 10:36:00)] was less than the threshold (1.0).",
"There aren't enough healthy targets in the api_romulus_v1 ALB target group (saw 0, expected at least 1).",
),
(
"ingestor-alb-not-enough-healthy-hosts",
"Threshold Crossed: 1 datapoint [2.0 (09/01/18 10:36:00)] was less than the threshold (3.0).",
"There aren't enough healthy targets in the ingestor ALB target group (saw 2, expected at least 3).",
),
(
"api_remus_v1-alb-not-enough-healthy-hosts",
"Threshold Crossed: no datapoints were received for 1 period and 1 missing datapoint was treated as [Breaching].",
"There are no healthy hosts in the api_remus_v1 ALB target group.",
),
(
"api_remus_v1-alb-target-500-errors",
"Threshold Crossed: 1 datapoint [3.0 (11/08/18 10:55:00)] was greater than or equal to the threshold (1.0).",
"There were multiple 500 errors (3) from the api_remus_v1 ALB target group.",
),
(
"grafana-alb-target-500-errors",
"Threshold Crossed: 1 datapoint [1.0 (11/08/18 10:55:00)] was greater than or equal to the threshold (1.0).",
"There was a 500 error from the grafana ALB target group.",
),
(
"lambda-update_ecs_service_size-errors",
"Threshold Crossed: 1 datapoint [1.0 (02/02/18 13:20:00)] was greater than or equal to the threshold (1.0).)",
"There was an error in the update_ecs_service_size Lambda.",
),
(
"lambda-post_to_slack-errors",
"Threshold Crossed: 1 datapoint [4.0 (02/02/18 13:20:00)] was greater than or equal to the threshold (1.0).)",
"There were 4 errors in the post_to_slack Lambda.",
),
(
"unrecognised-alarm-name",
"Threshold Crossed: 1 datapoint [1.0 (01/01/01 12:00:00)] was less than the threshold (0.0).",
"Threshold Crossed: 1 datapoint [1.0 (01/01/01 12:00:00)] was less than the threshold (0.0).",
),
],
)
def test_get_human_message(alarm_name, state_reason, expected_message):
message = get_human_message(alarm_name=alarm_name, state_reason=state_reason)
assert message == expected_message
|
{
"content_hash": "14fae866d77f31509c5f123a42e8a3dc",
"timestamp": "",
"source": "github",
"line_count": 192,
"max_line_length": 297,
"avg_line_length": 44.390625,
"alnum_prop": 0.6151589815792561,
"repo_name": "wellcometrust/platform-api",
"id": "923a24607f402cae5d2b1fc817a7e348b28215df",
"size": "8578",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "monitoring/post_to_slack/src/test_platform_alarms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4675"
},
{
"name": "HCL",
"bytes": "137435"
},
{
"name": "HTML",
"bytes": "521"
},
{
"name": "JavaScript",
"bytes": "12557"
},
{
"name": "Makefile",
"bytes": "9394"
},
{
"name": "Python",
"bytes": "134051"
},
{
"name": "Scala",
"bytes": "363435"
},
{
"name": "Shell",
"bytes": "12568"
}
],
"symlink_target": ""
}
|
import os
# try/except added for compatibility with python < 3.8
try:
from unittest import mock
from unittest.mock import AsyncMock # pragma: NO COVER
except ImportError: # pragma: NO COVER
import mock
import math
from google.api_core import gapic_v1, grpc_helpers, grpc_helpers_async, path_template
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
import google.auth
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.oauth2 import service_account
import grpc
from grpc.experimental import aio
from proto.marshal.rules import wrappers
from proto.marshal.rules.dates import DurationRule, TimestampRule
import pytest
from google.cloud.texttospeech_v1beta1.services.text_to_speech import (
TextToSpeechAsyncClient,
TextToSpeechClient,
transports,
)
from google.cloud.texttospeech_v1beta1.types import cloud_tts
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert TextToSpeechClient._get_default_mtls_endpoint(None) is None
assert (
TextToSpeechClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
)
assert (
TextToSpeechClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
TextToSpeechClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
TextToSpeechClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert TextToSpeechClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
@pytest.mark.parametrize(
"client_class,transport_name",
[
(TextToSpeechClient, "grpc"),
(TextToSpeechAsyncClient, "grpc_asyncio"),
],
)
def test_text_to_speech_client_from_service_account_info(client_class, transport_name):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info, transport=transport_name)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == ("texttospeech.googleapis.com:443")
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.TextToSpeechGrpcTransport, "grpc"),
(transports.TextToSpeechGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_text_to_speech_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize(
"client_class,transport_name",
[
(TextToSpeechClient, "grpc"),
(TextToSpeechAsyncClient, "grpc_asyncio"),
],
)
def test_text_to_speech_client_from_service_account_file(client_class, transport_name):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file(
"dummy/file/path.json", transport=transport_name
)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json(
"dummy/file/path.json", transport=transport_name
)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == ("texttospeech.googleapis.com:443")
def test_text_to_speech_client_get_transport_class():
transport = TextToSpeechClient.get_transport_class()
available_transports = [
transports.TextToSpeechGrpcTransport,
]
assert transport in available_transports
transport = TextToSpeechClient.get_transport_class("grpc")
assert transport == transports.TextToSpeechGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(TextToSpeechClient, transports.TextToSpeechGrpcTransport, "grpc"),
(
TextToSpeechAsyncClient,
transports.TextToSpeechGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
TextToSpeechClient, "DEFAULT_ENDPOINT", modify_default_endpoint(TextToSpeechClient)
)
@mock.patch.object(
TextToSpeechAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(TextToSpeechAsyncClient),
)
def test_text_to_speech_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(TextToSpeechClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(TextToSpeechClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
api_audience=None,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
api_audience=None,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
api_audience=None,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
api_audience=None,
)
# Check the case api_endpoint is provided
options = client_options.ClientOptions(
api_audience="https://language.googleapis.com"
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
api_audience="https://language.googleapis.com",
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(TextToSpeechClient, transports.TextToSpeechGrpcTransport, "grpc", "true"),
(
TextToSpeechAsyncClient,
transports.TextToSpeechGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(TextToSpeechClient, transports.TextToSpeechGrpcTransport, "grpc", "false"),
(
TextToSpeechAsyncClient,
transports.TextToSpeechGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
TextToSpeechClient, "DEFAULT_ENDPOINT", modify_default_endpoint(TextToSpeechClient)
)
@mock.patch.object(
TextToSpeechAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(TextToSpeechAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_text_to_speech_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
api_audience=None,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
api_audience=None,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
api_audience=None,
)
@pytest.mark.parametrize("client_class", [TextToSpeechClient, TextToSpeechAsyncClient])
@mock.patch.object(
TextToSpeechClient, "DEFAULT_ENDPOINT", modify_default_endpoint(TextToSpeechClient)
)
@mock.patch.object(
TextToSpeechAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(TextToSpeechAsyncClient),
)
def test_text_to_speech_client_get_mtls_endpoint_and_cert_source(client_class):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=mock_client_cert_source,
):
(
api_endpoint,
cert_source,
) = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(TextToSpeechClient, transports.TextToSpeechGrpcTransport, "grpc"),
(
TextToSpeechAsyncClient,
transports.TextToSpeechGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_text_to_speech_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(
scopes=["1", "2"],
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
api_audience=None,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
TextToSpeechClient,
transports.TextToSpeechGrpcTransport,
"grpc",
grpc_helpers,
),
(
TextToSpeechAsyncClient,
transports.TextToSpeechGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_text_to_speech_client_client_options_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
api_audience=None,
)
def test_text_to_speech_client_client_options_from_dict():
with mock.patch(
"google.cloud.texttospeech_v1beta1.services.text_to_speech.transports.TextToSpeechGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = TextToSpeechClient(client_options={"api_endpoint": "squid.clam.whelk"})
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
api_audience=None,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
TextToSpeechClient,
transports.TextToSpeechGrpcTransport,
"grpc",
grpc_helpers,
),
(
TextToSpeechAsyncClient,
transports.TextToSpeechGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_text_to_speech_client_create_channel_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
api_audience=None,
)
# test that the credentials from file are saved and used as the credentials.
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel"
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
file_creds = ga_credentials.AnonymousCredentials()
load_creds.return_value = (file_creds, None)
adc.return_value = (creds, None)
client = client_class(client_options=options, transport=transport_name)
create_channel.assert_called_with(
"texttospeech.googleapis.com:443",
credentials=file_creds,
credentials_file=None,
quota_project_id=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=None,
default_host="texttospeech.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"request_type",
[
cloud_tts.ListVoicesRequest,
dict,
],
)
def test_list_voices(request_type, transport: str = "grpc"):
client = TextToSpeechClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_voices), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cloud_tts.ListVoicesResponse()
response = client.list_voices(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_tts.ListVoicesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cloud_tts.ListVoicesResponse)
def test_list_voices_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TextToSpeechClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_voices), "__call__") as call:
client.list_voices()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_tts.ListVoicesRequest()
@pytest.mark.asyncio
async def test_list_voices_async(
transport: str = "grpc_asyncio", request_type=cloud_tts.ListVoicesRequest
):
client = TextToSpeechAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_voices), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cloud_tts.ListVoicesResponse()
)
response = await client.list_voices(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_tts.ListVoicesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cloud_tts.ListVoicesResponse)
@pytest.mark.asyncio
async def test_list_voices_async_from_dict():
await test_list_voices_async(request_type=dict)
def test_list_voices_flattened():
client = TextToSpeechClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_voices), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cloud_tts.ListVoicesResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_voices(
language_code="language_code_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].language_code
mock_val = "language_code_value"
assert arg == mock_val
def test_list_voices_flattened_error():
client = TextToSpeechClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_voices(
cloud_tts.ListVoicesRequest(),
language_code="language_code_value",
)
@pytest.mark.asyncio
async def test_list_voices_flattened_async():
client = TextToSpeechAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_voices), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cloud_tts.ListVoicesResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cloud_tts.ListVoicesResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_voices(
language_code="language_code_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].language_code
mock_val = "language_code_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_voices_flattened_error_async():
client = TextToSpeechAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_voices(
cloud_tts.ListVoicesRequest(),
language_code="language_code_value",
)
@pytest.mark.parametrize(
"request_type",
[
cloud_tts.SynthesizeSpeechRequest,
dict,
],
)
def test_synthesize_speech(request_type, transport: str = "grpc"):
client = TextToSpeechClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.synthesize_speech), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cloud_tts.SynthesizeSpeechResponse(
audio_content=b"audio_content_blob",
)
response = client.synthesize_speech(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_tts.SynthesizeSpeechRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cloud_tts.SynthesizeSpeechResponse)
assert response.audio_content == b"audio_content_blob"
def test_synthesize_speech_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TextToSpeechClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.synthesize_speech), "__call__"
) as call:
client.synthesize_speech()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_tts.SynthesizeSpeechRequest()
@pytest.mark.asyncio
async def test_synthesize_speech_async(
transport: str = "grpc_asyncio", request_type=cloud_tts.SynthesizeSpeechRequest
):
client = TextToSpeechAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.synthesize_speech), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cloud_tts.SynthesizeSpeechResponse(
audio_content=b"audio_content_blob",
)
)
response = await client.synthesize_speech(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_tts.SynthesizeSpeechRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cloud_tts.SynthesizeSpeechResponse)
assert response.audio_content == b"audio_content_blob"
@pytest.mark.asyncio
async def test_synthesize_speech_async_from_dict():
await test_synthesize_speech_async(request_type=dict)
def test_synthesize_speech_flattened():
client = TextToSpeechClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.synthesize_speech), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cloud_tts.SynthesizeSpeechResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.synthesize_speech(
input=cloud_tts.SynthesisInput(text="text_value"),
voice=cloud_tts.VoiceSelectionParams(language_code="language_code_value"),
audio_config=cloud_tts.AudioConfig(
audio_encoding=cloud_tts.AudioEncoding.LINEAR16
),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].input
mock_val = cloud_tts.SynthesisInput(text="text_value")
assert arg == mock_val
arg = args[0].voice
mock_val = cloud_tts.VoiceSelectionParams(language_code="language_code_value")
assert arg == mock_val
arg = args[0].audio_config
mock_val = cloud_tts.AudioConfig(
audio_encoding=cloud_tts.AudioEncoding.LINEAR16
)
assert arg == mock_val
def test_synthesize_speech_flattened_error():
client = TextToSpeechClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.synthesize_speech(
cloud_tts.SynthesizeSpeechRequest(),
input=cloud_tts.SynthesisInput(text="text_value"),
voice=cloud_tts.VoiceSelectionParams(language_code="language_code_value"),
audio_config=cloud_tts.AudioConfig(
audio_encoding=cloud_tts.AudioEncoding.LINEAR16
),
)
@pytest.mark.asyncio
async def test_synthesize_speech_flattened_async():
client = TextToSpeechAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.synthesize_speech), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cloud_tts.SynthesizeSpeechResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cloud_tts.SynthesizeSpeechResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.synthesize_speech(
input=cloud_tts.SynthesisInput(text="text_value"),
voice=cloud_tts.VoiceSelectionParams(language_code="language_code_value"),
audio_config=cloud_tts.AudioConfig(
audio_encoding=cloud_tts.AudioEncoding.LINEAR16
),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].input
mock_val = cloud_tts.SynthesisInput(text="text_value")
assert arg == mock_val
arg = args[0].voice
mock_val = cloud_tts.VoiceSelectionParams(language_code="language_code_value")
assert arg == mock_val
arg = args[0].audio_config
mock_val = cloud_tts.AudioConfig(
audio_encoding=cloud_tts.AudioEncoding.LINEAR16
)
assert arg == mock_val
@pytest.mark.asyncio
async def test_synthesize_speech_flattened_error_async():
client = TextToSpeechAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.synthesize_speech(
cloud_tts.SynthesizeSpeechRequest(),
input=cloud_tts.SynthesisInput(text="text_value"),
voice=cloud_tts.VoiceSelectionParams(language_code="language_code_value"),
audio_config=cloud_tts.AudioConfig(
audio_encoding=cloud_tts.AudioEncoding.LINEAR16
),
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.TextToSpeechGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = TextToSpeechClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.TextToSpeechGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = TextToSpeechClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide an api_key and a transport instance.
transport = transports.TextToSpeechGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = TextToSpeechClient(
client_options=options,
transport=transport,
)
# It is an error to provide an api_key and a credential.
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = TextToSpeechClient(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
# It is an error to provide scopes and a transport instance.
transport = transports.TextToSpeechGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = TextToSpeechClient(
client_options={"scopes": ["1", "2"]},
transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.TextToSpeechGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = TextToSpeechClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.TextToSpeechGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.TextToSpeechGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[
transports.TextToSpeechGrpcTransport,
transports.TextToSpeechGrpcAsyncIOTransport,
],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
@pytest.mark.parametrize(
"transport_name",
[
"grpc",
],
)
def test_transport_kind(transport_name):
transport = TextToSpeechClient.get_transport_class(transport_name)(
credentials=ga_credentials.AnonymousCredentials(),
)
assert transport.kind == transport_name
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = TextToSpeechClient(
credentials=ga_credentials.AnonymousCredentials(),
)
assert isinstance(
client.transport,
transports.TextToSpeechGrpcTransport,
)
def test_text_to_speech_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.TextToSpeechTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_text_to_speech_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.texttospeech_v1beta1.services.text_to_speech.transports.TextToSpeechTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.TextToSpeechTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"list_voices",
"synthesize_speech",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
# Catch all for all remaining methods and properties
remainder = [
"kind",
]
for r in remainder:
with pytest.raises(NotImplementedError):
getattr(transport, r)()
def test_text_to_speech_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.texttospeech_v1beta1.services.text_to_speech.transports.TextToSpeechTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.TextToSpeechTransport(
credentials_file="credentials.json",
quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
def test_text_to_speech_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.texttospeech_v1beta1.services.text_to_speech.transports.TextToSpeechTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.TextToSpeechTransport()
adc.assert_called_once()
def test_text_to_speech_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
TextToSpeechClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.TextToSpeechGrpcTransport,
transports.TextToSpeechGrpcAsyncIOTransport,
],
)
def test_text_to_speech_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class",
[
transports.TextToSpeechGrpcTransport,
transports.TextToSpeechGrpcAsyncIOTransport,
],
)
def test_text_to_speech_transport_auth_gdch_credentials(transport_class):
host = "https://language.com"
api_audience_tests = [None, "https://language2.com"]
api_audience_expect = [host, "https://language2.com"]
for t, e in zip(api_audience_tests, api_audience_expect):
with mock.patch.object(google.auth, "default", autospec=True) as adc:
gdch_mock = mock.MagicMock()
type(gdch_mock).with_gdch_audience = mock.PropertyMock(
return_value=gdch_mock
)
adc.return_value = (gdch_mock, None)
transport_class(host=host, api_audience=t)
gdch_mock.with_gdch_audience.assert_called_once_with(e)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.TextToSpeechGrpcTransport, grpc_helpers),
(transports.TextToSpeechGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_text_to_speech_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"texttospeech.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=["1", "2"],
default_host="texttospeech.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[transports.TextToSpeechGrpcTransport, transports.TextToSpeechGrpcAsyncIOTransport],
)
def test_text_to_speech_grpc_transport_client_cert_source_for_mtls(transport_class):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
@pytest.mark.parametrize(
"transport_name",
[
"grpc",
"grpc_asyncio",
],
)
def test_text_to_speech_host_no_port(transport_name):
client = TextToSpeechClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="texttospeech.googleapis.com"
),
transport=transport_name,
)
assert client.transport._host == ("texttospeech.googleapis.com:443")
@pytest.mark.parametrize(
"transport_name",
[
"grpc",
"grpc_asyncio",
],
)
def test_text_to_speech_host_with_port(transport_name):
client = TextToSpeechClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="texttospeech.googleapis.com:8000"
),
transport=transport_name,
)
assert client.transport._host == ("texttospeech.googleapis.com:8000")
def test_text_to_speech_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.TextToSpeechGrpcTransport(
host="squid.clam.whelk",
channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_text_to_speech_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.TextToSpeechGrpcAsyncIOTransport(
host="squid.clam.whelk",
channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[transports.TextToSpeechGrpcTransport, transports.TextToSpeechGrpcAsyncIOTransport],
)
def test_text_to_speech_transport_channel_mtls_with_client_cert_source(transport_class):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[transports.TextToSpeechGrpcTransport, transports.TextToSpeechGrpcAsyncIOTransport],
)
def test_text_to_speech_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_model_path():
project = "squid"
location = "clam"
model = "whelk"
expected = "projects/{project}/locations/{location}/models/{model}".format(
project=project,
location=location,
model=model,
)
actual = TextToSpeechClient.model_path(project, location, model)
assert expected == actual
def test_parse_model_path():
expected = {
"project": "octopus",
"location": "oyster",
"model": "nudibranch",
}
path = TextToSpeechClient.model_path(**expected)
# Check that the path construction is reversible.
actual = TextToSpeechClient.parse_model_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "cuttlefish"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = TextToSpeechClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "mussel",
}
path = TextToSpeechClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = TextToSpeechClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "winkle"
expected = "folders/{folder}".format(
folder=folder,
)
actual = TextToSpeechClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "nautilus",
}
path = TextToSpeechClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = TextToSpeechClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "scallop"
expected = "organizations/{organization}".format(
organization=organization,
)
actual = TextToSpeechClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "abalone",
}
path = TextToSpeechClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = TextToSpeechClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "squid"
expected = "projects/{project}".format(
project=project,
)
actual = TextToSpeechClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "clam",
}
path = TextToSpeechClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = TextToSpeechClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "whelk"
location = "octopus"
expected = "projects/{project}/locations/{location}".format(
project=project,
location=location,
)
actual = TextToSpeechClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "oyster",
"location": "nudibranch",
}
path = TextToSpeechClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = TextToSpeechClient.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.TextToSpeechTransport, "_prep_wrapped_messages"
) as prep:
client = TextToSpeechClient(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.TextToSpeechTransport, "_prep_wrapped_messages"
) as prep:
transport_class = TextToSpeechClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = TextToSpeechAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc_asyncio",
)
with mock.patch.object(
type(getattr(client.transport, "grpc_channel")), "close"
) as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = TextToSpeechClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"grpc",
]
for transport in transports:
client = TextToSpeechClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize(
"client_class,transport_class",
[
(TextToSpeechClient, transports.TextToSpeechGrpcTransport),
(TextToSpeechAsyncClient, transports.TextToSpeechGrpcAsyncIOTransport),
],
)
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
api_audience=None,
)
|
{
"content_hash": "9710c75cc1666064e322f94bc1395bf1",
"timestamp": "",
"source": "github",
"line_count": 1732,
"max_line_length": 123,
"avg_line_length": 36.67147806004619,
"alnum_prop": 0.6435330236951902,
"repo_name": "googleapis/python-texttospeech",
"id": "96807af675bf4f6180e4dfbf8dcc24a8ab5f2c54",
"size": "64115",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/unit/gapic/texttospeech_v1beta1/test_text_to_speech.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "368339"
},
{
"name": "Shell",
"bytes": "30678"
}
],
"symlink_target": ""
}
|
from osgeo import ogr
import os, sys
from math import floor
class ShapeDataError(Exception):
"""
Error for wrong geometry type when loading shapefiles
"""
pass
def load_points(shapefile):
"""
Returns a list of coordinate from points in an input shapefile.
Required Argument(s):
- shapefile: The path to a point-geometry shapefile
Optional Argument(s):
- None
Returns:
- points: A list of tuples with the x, y coords for each point in the input shapefile
"""
# Open shapeData
shapeData = ogr.Open(validateShapePath(shapefile))
# Validate shapeData
validateShapeData(shapeData)
# Get the first layer
layer = shapeData.GetLayer()
# Initialize
points = []
# For each point,
for index in xrange(layer.GetFeatureCount()):
# Get
feature = layer.GetFeature(index)
geometry = feature.GetGeometryRef()
# Make sure that it is a point
if geometry.GetGeometryType() != ogr.wkbPoint:
raise ShapeDataError('This function only accepts point geometry.')
# Get pointCoordinates
pointCoordinates = geometry.GetX(), geometry.GetY()
# Append
points.append(pointCoordinates)
# Cleanup
feature.Destroy()
# Cleanup
shapeData.Destroy()
# Return
return points
def get_ref_from_shapefile(shapefile):
"""
Gets a spatial reference from an input shapefile.
Required Arguement(s):
- shapefile: The path to a shapefile
Optional Argument(s):
- None
Returns:
- spatialref: The spatial reference of then input shapefile in proj4 format
"""
# Open shapeData
shapeData = ogr.Open(validateShapePath(shapefile))
# Validate shapeData
validateShapeData(shapeData)
# Get the first layer
layer = shapeData.GetLayer()
# Get spatial reference as proj4
spatialref = layer.GetSpatialRef()
return spatialref
def getSpatialReferenceFromProj4(proj4):
"""Return GDAL spatial reference object from proj4 string"""
spatialReference = ogr.SpatialReference()
spatialReference.ImportFromProj4(proj4)
return spatialReference
# Validate
def validateShapePath(shapePath):
"""Validate shapefile extension"""
return os.path.splitext(str(shapePath))[0] + '.shp'
def validateShapeData(shapeData):
"""Make sure we can access the shapefile"""
# Make sure the shapefile exists
if not shapeData:
raise ShapeDataError('The shapefile is invalid')
# Make sure there is exactly one layer
if shapeData.GetLayerCount() != 1:
raise ShapeDataError('The shapefile must have exactly one layer')
def check_spatial_refs(srs1, srs2):
if srs1 == srs2:
return False
else:
return True
def get_px_coords_from_points(raster, shapefile):
"""
Takes geographic coordinates from a shapefile and finds the corresponding pixel coordinates on a raster.
rst = "/Users/phoetrymaster/Documents/School/Geography/Thesis/Data/MODIS_KANSAS_2007-2012/reprojected/clips/KansasEVI_2012_clip1.tif"
#rst = "/Users/phoetrymaster/Documents/School/Geography/Thesis/Data/polygonclip_20130929223024_325071991/resampled/newclips/2012clip1.tif"
shp = "/Users/phoetrymaster/Documents/School/Geography/Thesis/Data/MODIS_KANSAS_2007-2012/SampleAreas/samplepoints2012_clip1_new.shp"
print get_px_coords_from_points(rst, shp)
"""
pointcoords = load_points(shapefile)
ref1 = get_ref_from_shapefile(shapefile)
image = gdalObject
image.open(raster)
image.close()
referror = check_spatial_refs(ref1, image.projection)
if referror:
print "WARNING: Spatial Reference of raster does not match points shapefile. Output may not be as expected. For best resutls ensure reference systems are identical."
#TODO Change to use warnings module
#Raster edge coords
left = image.geotransform[0]
top = image.geotransform[3]
right = image.cols * image.geotransform[1] + image.geotransform[0]
bottom = image.cols * image.geotransform[5] + image.geotransform[3]
pxcoords = []
for coords in pointcoords:
x = int(floor(image.cols * (coords[0] - left) / (right - left)))
y = int(floor(image.rows * (coords[1] - top) / (bottom - top)))
pxcoords.append((x, y))
return pxcoords
if __name__ == '__main__':
sys.exit()
|
{
"content_hash": "de710a99fd95e5d8441239c93e00c5a5",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 173,
"avg_line_length": 29.32450331125828,
"alnum_prop": 0.6797651309846432,
"repo_name": "jkeifer/pyHytemporal",
"id": "631ef694344a55cf80bed642b2db9d86c0413153",
"size": "4428",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "old_TO_MIGRATE/get_px_coords_from_point.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "254268"
}
],
"symlink_target": ""
}
|
"""
Revision ID: 0382_nhs_letter_branding_id
Revises: 0381_letter_branding_to_org
Create Date: 2022-11-15 07:57:49.060820
"""
import os
from alembic import op
revision = '0382_nhs_letter_branding_id'
down_revision = '0381_letter_branding_to_org'
environment = os.environ["NOTIFY_ENVIRONMENT"]
def upgrade():
if environment not in ["live", "production"]:
op.execute(
"""
DELETE FROM service_letter_branding
WHERE letter_branding_id in (
SELECT id
FROM letter_branding
WHERE name = 'NHS'
)
"""
)
op.execute(
"""
DELETE FROM letter_branding_to_organisation
WHERE letter_branding_id in (
SELECT id
FROM letter_branding
WHERE name = 'NHS'
)
"""
)
op.execute(
"""
UPDATE organisation SET letter_branding_id = null
WHERE letter_branding_id in(
SELECT id
FROM letter_branding
WHERE name = 'NHS'
)
"""
)
op.execute(
"""
DELETE FROM letter_branding WHERE name = 'NHS'
"""
)
op.execute(
"""
INSERT INTO letter_branding (
id, name, filename
)
VALUES (
'2cd354bb-6b85-eda3-c0ad-6b613150459f',
'NHS',
'nhs'
)
"""
)
def downgrade():
"""
No downgrade step since this is not fully reversible, but won't be run in production.
"""
|
{
"content_hash": "5babc29a2fbd647079a3f405f173cfe5",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 89,
"avg_line_length": 22.32894736842105,
"alnum_prop": 0.47200942840306426,
"repo_name": "alphagov/notifications-api",
"id": "3d58109a57fb60e147f04a5499d42c0655fdf76b",
"size": "1697",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "migrations/versions/0382_nhs_letter_branding_id.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "719"
},
{
"name": "Jinja",
"bytes": "5543"
},
{
"name": "Makefile",
"bytes": "6627"
},
{
"name": "Mako",
"bytes": "361"
},
{
"name": "Procfile",
"bytes": "35"
},
{
"name": "Python",
"bytes": "3506225"
},
{
"name": "Shell",
"bytes": "13179"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/dungeon/corellian_corvette/shared_corvette_search_rebel_destroy_03.iff"
result.attribute_template_id = -1
result.stfName("frn_n","frn_storage_drums")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "d95f0aae65bb99a876626277ee1158dd",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 107,
"avg_line_length": 26.23076923076923,
"alnum_prop": 0.7126099706744868,
"repo_name": "obi-two/Rebelion",
"id": "16d325ba08cce99ba07cd4ed6c446e9adbe0d94d",
"size": "486",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/tangible/dungeon/corellian_corvette/shared_corvette_search_rebel_destroy_03.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
}
|
"""Attack on CountSketch and median estimator."""
import math
import numpy as np
from numpy.random import default_rng
class CountsketchMedianAttack:
"""Attack on the classic CountSketch."""
def __init__(self,
num_of_rounds=100,
nof_repetitions=10,
nof_sim_keys=11,
tail_size=1000,
b=30,
l=100,
seed=None):
self.num_of_rounds = num_of_rounds
self.nof_repetitions = nof_repetitions
self.nof_sim_keys = nof_sim_keys
self.tail_size = tail_size
self.l = l
self.b = b
self.seed = seed
self.rng = default_rng(seed)
self.results = np.zeros((self.num_of_rounds, self.nof_sim_keys),
dtype=float)
def __repr__(self):
return ("test parameters:\n number of rounds = {0}; number of repetitions ="
" {6}\n num of keys simulated = {1};\n attack tail size = {2};\n l "
"= {3};\n b = {4}; seed = {5};").format(self.num_of_rounds,
self.nof_sim_keys,
self.tail_size, self.l,
self.b, self.seed,
self.nof_repetitions)
def draw_sketch(self):
self.sim_keys_hash = self.rng.choice(
self.b, size=(self.l, self.nof_sim_keys))
self.sim_keys_sign = self.rng.choice(
2, size=(self.l, self.nof_sim_keys)) * 2 - 1
def generate_v(self, mk_factor=10, lk_factor=20):
sd = math.sqrt(self.tail_size / self.b)
lk_weight = int(sd * lk_factor)
mk_weight = int(sd * mk_factor)
v = np.ones(self.nof_sim_keys, dtype=int)
v[0:2] = mk_weight
v[2:] = lk_weight
return v
def encode_v(self, v):
counters_v = np.zeros((self.l, self.b), dtype=int)
for line in range(self.l):
for key in range(self.nof_sim_keys):
counters_v[line,
self.sim_keys_hash[line,
key]] += v[key] * self.sim_keys_sign[line,
key]
return counters_v
def decode_v(self, counters):
weak_estimates = np.zeros((self.l, self.nof_sim_keys), dtype=int)
for line in range(self.l):
for key in range(self.nof_sim_keys):
weak_estimates[line, key] = counters[line, self.sim_keys_hash[
line, key]] * self.sim_keys_sign[line, key]
# for even length axis: returns average of the two medians
estimates = np.median(weak_estimates, axis=0)
return estimates
def check_parameters(self, nof_checks=100):
# pass if all sketch draws return exact estimates for v
v = self.generate_v()
results = np.zeros(nof_checks, dtype=int)
for i in range(nof_checks):
self.draw_sketch()
estimates_v = self.decode_v(self.encode_v(v))
results[i] = np.absolute(estimates_v - v).sum()
print("parameters OK")
return results.sum() == 0
def gencode_z(self):
counters_z = np.zeros((self.l, self.b), dtype=int)
for line in range(self.l):
tail_hash = self.rng.choice(self.b, size=self.tail_size)
tail_sign = self.rng.choice(2, size=self.tail_size) * 2 - 1
for key in range(self.tail_size):
counters_z[line, tail_hash[key]] += tail_sign[key]
return counters_z
def simulate_median_attack(self, files_pref="sim_1", new_seed=None):
"""Runs the attack and writes the results into CSV files."""
if new_seed is not None:
self.seed = new_seed
self.rng = default_rng(new_seed)
self.results = np.zeros((self.num_of_rounds, 3, self.nof_repetitions),
dtype=float)
# here we add looping over 10 repetitions
for rep in range(self.nof_repetitions):
self.draw_sketch()
nof_collected = 0
v = self.generate_v()
counters_a = np.zeros((self.l, self.b), dtype=int)
for r in range(self.num_of_rounds):
# query
counters_v = self.encode_v(v)
counters_z = self.gencode_z()
estimates_q = self.decode_v(counters_v + counters_z)
key_0_reported = estimates_q[0] >= sorted(estimates_q)[1]
key_1_reported = estimates_q[1] >= sorted(estimates_q)[1]
# collecting desicion
if key_0_reported and (not key_1_reported):
counters_a = counters_a + counters_z
nof_collected += 1
if key_1_reported and (not key_0_reported):
counters_a = counters_a - counters_z
nof_collected += 1
# saving the keys signal
factor = 1
if nof_collected > 0:
factor = math.sqrt(nof_collected * self.tail_size / self.b)
estimates_a = self.decode_v(counters_a)
self.results[r, :, rep] = estimates_a[0:3] / factor
# here we ouput a file
print("saving file for rep = {0}".format(rep))
file_name = "./results/{0}_rep_{1}.csv".format(files_pref, rep)
np.savetxt(file_name, self.results[:, :, rep], delimiter=",")
return self.results
# test 1: 10 repetitions of 10000 rounds attack on skeches of size l=100, b=30.
rounds = 10000
test_1 = CountsketchMedianAttack(num_of_rounds=rounds)
print(test_1)
if test_1.check_parameters():
test_1.simulate_median_attack(files_pref="sim_1_run")
|
{
"content_hash": "9a0c4795b337881b976f5830d28a9be4",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 80,
"avg_line_length": 37.90780141843972,
"alnum_prop": 0.5698783910196445,
"repo_name": "google-research/google-research",
"id": "fc88839d8445cc24828084f086e0de9d681ce89a",
"size": "5953",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "robust_count_sketch/sim_1_code.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "9817"
},
{
"name": "C++",
"bytes": "4166670"
},
{
"name": "CMake",
"bytes": "6412"
},
{
"name": "CSS",
"bytes": "27092"
},
{
"name": "Cuda",
"bytes": "1431"
},
{
"name": "Dockerfile",
"bytes": "7145"
},
{
"name": "Gnuplot",
"bytes": "11125"
},
{
"name": "HTML",
"bytes": "77599"
},
{
"name": "ImageJ Macro",
"bytes": "50488"
},
{
"name": "Java",
"bytes": "487585"
},
{
"name": "JavaScript",
"bytes": "896512"
},
{
"name": "Julia",
"bytes": "67986"
},
{
"name": "Jupyter Notebook",
"bytes": "71290299"
},
{
"name": "Lua",
"bytes": "29905"
},
{
"name": "MATLAB",
"bytes": "103813"
},
{
"name": "Makefile",
"bytes": "5636"
},
{
"name": "NASL",
"bytes": "63883"
},
{
"name": "Perl",
"bytes": "8590"
},
{
"name": "Python",
"bytes": "53790200"
},
{
"name": "R",
"bytes": "101058"
},
{
"name": "Roff",
"bytes": "1208"
},
{
"name": "Rust",
"bytes": "2389"
},
{
"name": "Shell",
"bytes": "730444"
},
{
"name": "Smarty",
"bytes": "5966"
},
{
"name": "Starlark",
"bytes": "245038"
}
],
"symlink_target": ""
}
|
''' *** '''
# start app
from app import app
PORT = app.config['PORT']
DEBUG = app.config['DEBUG']
app.run(host='0.0.0.0', port=PORT, debug=DEBUG)
|
{
"content_hash": "a5d5b106937c5211ef131b443cbf4070",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 47,
"avg_line_length": 16.555555555555557,
"alnum_prop": 0.6174496644295302,
"repo_name": "greenglobal/face-recognition-demo",
"id": "d674efb349a1c81c5779494ec9c9ebf1124269af",
"size": "149",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "run.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "340"
},
{
"name": "HTML",
"bytes": "8178"
},
{
"name": "JavaScript",
"bytes": "2113"
},
{
"name": "Python",
"bytes": "3467"
}
],
"symlink_target": ""
}
|
"""
The Plaid API
The Plaid REST API. Please see https://plaid.com/docs/api for more details. # noqa: E501
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from plaid.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from plaid.model.external_payment_initiation_consent_options import ExternalPaymentInitiationConsentOptions
from plaid.model.payment_initiation_consent_constraints import PaymentInitiationConsentConstraints
from plaid.model.payment_initiation_consent_scope import PaymentInitiationConsentScope
globals()['ExternalPaymentInitiationConsentOptions'] = ExternalPaymentInitiationConsentOptions
globals()['PaymentInitiationConsentConstraints'] = PaymentInitiationConsentConstraints
globals()['PaymentInitiationConsentScope'] = PaymentInitiationConsentScope
class PaymentInitiationConsentCreateRequest(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
('recipient_id',): {
'min_length': 1,
},
('reference',): {
'max_length': 18,
'min_length': 1,
},
('scopes',): {
'min_items': 1,
},
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'recipient_id': (str,), # noqa: E501
'reference': (str,), # noqa: E501
'scopes': ([PaymentInitiationConsentScope],), # noqa: E501
'constraints': (PaymentInitiationConsentConstraints,), # noqa: E501
'client_id': (str,), # noqa: E501
'secret': (str,), # noqa: E501
'options': (ExternalPaymentInitiationConsentOptions,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'recipient_id': 'recipient_id', # noqa: E501
'reference': 'reference', # noqa: E501
'scopes': 'scopes', # noqa: E501
'constraints': 'constraints', # noqa: E501
'client_id': 'client_id', # noqa: E501
'secret': 'secret', # noqa: E501
'options': 'options', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, recipient_id, reference, scopes, constraints, *args, **kwargs): # noqa: E501
"""PaymentInitiationConsentCreateRequest - a model defined in OpenAPI
Args:
recipient_id (str): The ID of the recipient the payment consent is for. The created consent can be used to transfer funds to this recipient only.
reference (str): A reference for the payment consent. This must be an alphanumeric string with at most 18 characters and must not contain any special characters.
scopes ([PaymentInitiationConsentScope]): An array of payment consent scopes.
constraints (PaymentInitiationConsentConstraints):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
client_id (str): Your Plaid API `client_id`. The `client_id` is required and may be provided either in the `PLAID-CLIENT-ID` header or as part of a request body.. [optional] # noqa: E501
secret (str): Your Plaid API `secret`. The `secret` is required and may be provided either in the `PLAID-SECRET` header or as part of a request body.. [optional] # noqa: E501
options (ExternalPaymentInitiationConsentOptions): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.recipient_id = recipient_id
self.reference = reference
self.scopes = scopes
self.constraints = constraints
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
|
{
"content_hash": "bc1785aaef413023725801b87f2ffb48",
"timestamp": "",
"source": "github",
"line_count": 207,
"max_line_length": 199,
"avg_line_length": 43.888888888888886,
"alnum_prop": 0.5866813428728673,
"repo_name": "plaid/plaid-python",
"id": "5d0cf964373e9b44dd8c046408457d44fe226ffa",
"size": "9085",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plaid/model/payment_initiation_consent_create_request.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "323"
},
{
"name": "Makefile",
"bytes": "622"
},
{
"name": "Mustache",
"bytes": "125163"
},
{
"name": "Python",
"bytes": "9342874"
}
],
"symlink_target": ""
}
|
import webob.exc
from nova.api.openstack import api_version_request
from nova.api.openstack.compute.schemas import services
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api import validation
from nova import compute
from nova import exception
from nova.i18n import _
from nova import servicegroup
from nova import utils
ALIAS = "os-services"
authorize = extensions.os_compute_authorizer(ALIAS)
class ServiceController(wsgi.Controller):
def __init__(self):
self.host_api = compute.HostAPI()
self.servicegroup_api = servicegroup.API()
self.actions = {"enable": self._enable,
"disable": self._disable,
"disable-log-reason": self._disable_log_reason}
def _get_services(self, req):
api_services = ('nova-osapi_compute', 'nova-ec2', 'nova-metadata')
context = req.environ['nova.context']
authorize(context)
_services = [
s
for s in self.host_api.service_get_all(context, set_zones=True)
if s['binary'] not in api_services
]
host = ''
if 'host' in req.GET:
host = req.GET['host']
binary = ''
if 'binary' in req.GET:
binary = req.GET['binary']
if host:
_services = [s for s in _services if s['host'] == host]
if binary:
_services = [s for s in _services if s['binary'] == binary]
return _services
def _get_service_detail(self, svc, additional_fields):
alive = self.servicegroup_api.service_is_up(svc)
state = (alive and "up") or "down"
active = 'enabled'
if svc['disabled']:
active = 'disabled'
service_detail = {'binary': svc['binary'],
'host': svc['host'],
'id': svc['id'],
'zone': svc['availability_zone'],
'status': active,
'state': state,
'updated_at': svc['updated_at'],
'disabled_reason': svc['disabled_reason']}
for field in additional_fields:
service_detail[field] = svc[field]
return service_detail
def _get_services_list(self, req, additional_fields=()):
_services = self._get_services(req)
return [self._get_service_detail(svc, additional_fields)
for svc in _services]
def _enable(self, body, context):
"""Enable scheduling for a service."""
return self._enable_disable(body, context, "enabled",
{'disabled': False,
'disabled_reason': None})
def _disable(self, body, context, reason=None):
"""Disable scheduling for a service with optional log."""
return self._enable_disable(body, context, "disabled",
{'disabled': True,
'disabled_reason': reason})
def _disable_log_reason(self, body, context):
"""Disable scheduling for a service with a log."""
try:
reason = body['disabled_reason']
except KeyError:
msg = _('Missing disabled reason field')
raise webob.exc.HTTPBadRequest(explanation=msg)
return self._disable(body, context, reason)
def _enable_disable(self, body, context, status, params_to_update):
"""Enable/Disable scheduling for a service."""
reason = params_to_update.get('disabled_reason')
ret_value = {
'service': {
'host': body['host'],
'binary': body['binary'],
'status': status
},
}
if reason:
ret_value['service']['disabled_reason'] = reason
self._update(context, body['host'], body['binary'], params_to_update)
return ret_value
def _forced_down(self, body, context):
"""Set or unset forced_down flag for the service"""
try:
forced_down = body["forced_down"]
except KeyError:
msg = _('Missing forced_down field')
raise webob.exc.HTTPBadRequest(explanation=msg)
host = body['host']
binary = body['binary']
ret_value = {'service': {'host': host,
'binary': binary,
'forced_down': forced_down}}
self._update(context, host, binary, {"forced_down": forced_down})
return ret_value
def _update(self, context, host, binary, payload):
"""Do the actual PUT/update"""
try:
self.host_api.service_update(context, host, binary, payload)
except exception.HostBinaryNotFound as exc:
raise webob.exc.HTTPNotFound(explanation=exc.format_message())
def _perform_action(self, req, id, body, actions):
"""Calculate action dictionary dependent on provided fields"""
context = req.environ['nova.context']
authorize(context)
try:
action = actions[id]
except KeyError:
msg = _("Unknown action")
raise webob.exc.HTTPNotFound(explanation=msg)
return action(body, context)
@wsgi.response(204)
@extensions.expected_errors((400, 404))
def delete(self, req, id):
"""Deletes the specified service."""
context = req.environ['nova.context']
authorize(context)
try:
utils.validate_integer(id, 'id')
except exception.InvalidInput as exc:
raise webob.exc.HTTPBadRequest(explanation=exc.format_message())
try:
self.host_api.service_delete(context, id)
except exception.ServiceNotFound:
explanation = _("Service %s not found.") % id
raise webob.exc.HTTPNotFound(explanation=explanation)
@extensions.expected_errors(())
def index(self, req):
"""Return a list of all running services. Filter by host & service
name
"""
if api_version_request.is_supported(req, min_version='2.11'):
_services = self._get_services_list(req, ['forced_down'])
else:
_services = self._get_services_list(req)
return {'services': _services}
@extensions.expected_errors((400, 404))
@validation.schema(services.service_update, '2.0', '2.10')
@validation.schema(services.service_update_v211, '2.11')
def update(self, req, id, body):
"""Perform service update"""
if api_version_request.is_supported(req, min_version='2.11'):
actions = self.actions.copy()
actions["force-down"] = self._forced_down
else:
actions = self.actions
return self._perform_action(req, id, body, actions)
class Services(extensions.V21APIExtensionBase):
"""Services support."""
name = "Services"
alias = ALIAS
version = 1
def get_resources(self):
resources = [extensions.ResourceExtension(ALIAS,
ServiceController())]
return resources
def get_controller_extensions(self):
return []
|
{
"content_hash": "e1a0ace367bb72bdcf0c4f79cb596e15",
"timestamp": "",
"source": "github",
"line_count": 211,
"max_line_length": 77,
"avg_line_length": 34.345971563981045,
"alnum_prop": 0.5631295708569063,
"repo_name": "HybridF5/nova",
"id": "0c21442dd1c0031e73ef832b7777d68b7e1764c0",
"size": "7849",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "nova/api/openstack/compute/services.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('competicion', '0003_auto_20150825_1042'),
]
operations = [
migrations.AlterModelOptions(
name='clasificacion',
options={'verbose_name': 'Clasificacion', 'verbose_name_plural': 'Clasificaciones'},
),
migrations.AlterModelOptions(
name='granpremio',
options={'verbose_name': 'Gran Premio', 'verbose_name_plural': 'Grandes Premios'},
),
migrations.AlterModelOptions(
name='piloto',
options={'verbose_name': 'Piloto', 'verbose_name_plural': 'Pilotos'},
),
migrations.AddField(
model_name='circuito',
name='imagen',
field=models.TextField(default=' '),
preserve_default=False,
),
]
|
{
"content_hash": "cf7703b11e44ae192db6b5200a95b02b",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 96,
"avg_line_length": 29.967741935483872,
"alnum_prop": 0.5758880516684607,
"repo_name": "lendoly/djangoF1",
"id": "f48f9d463570dd4a3b7ad4dcc872e0cefda94b3e",
"size": "953",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "SC/competicion/migrations/0004_auto_20150826_1109.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "2780"
},
{
"name": "Python",
"bytes": "20899"
}
],
"symlink_target": ""
}
|
from urllib.parse import urlparse
from django.conf import settings
from django.utils.encoding import force_str
from wagtail.core.models import Page, Site
from wagtail.core.utils import resolve_model_string
class BadRequestError(Exception):
pass
def get_base_url(request=None):
base_url = getattr(settings, 'WAGTAILAPI_BASE_URL', None)
if base_url is None and request:
site = Site.find_for_request(request)
if site:
base_url = site.root_url
if base_url:
# We only want the scheme and netloc
base_url_parsed = urlparse(force_str(base_url))
return base_url_parsed.scheme + '://' + base_url_parsed.netloc
def get_full_url(request, path):
base_url = get_base_url(request) or ''
return base_url + path
def get_object_detail_url(router, request, model, pk):
url_path = router.get_object_detail_urlpath(model, pk)
if url_path:
return get_full_url(request, url_path)
def page_models_from_string(string):
page_models = []
for sub_string in string.split(','):
page_model = resolve_model_string(sub_string)
if not issubclass(page_model, Page):
raise ValueError("Model is not a page")
page_models.append(page_model)
return tuple(page_models)
class FieldsParameterParseError(ValueError):
pass
def parse_fields_parameter(fields_str):
"""
Parses the ?fields= GET parameter. As this parameter is supposed to be used
by developers, the syntax is quite tight (eg, not allowing any whitespace).
Having a strict syntax allows us to extend the it at a later date with less
chance of breaking anyone's code.
This function takes a string and returns a list of tuples representing each
top-level field. Each tuple contains three items:
- The name of the field (string)
- Whether the field has been negated (boolean)
- A list of nested fields if there are any, None otherwise
Some examples of how this function works:
>>> parse_fields_parameter("foo")
[
('foo', False, None),
]
>>> parse_fields_parameter("foo,bar")
[
('foo', False, None),
('bar', False, None),
]
>>> parse_fields_parameter("-foo")
[
('foo', True, None),
]
>>> parse_fields_parameter("foo(bar,baz)")
[
('foo', False, [
('bar', False, None),
('baz', False, None),
]),
]
It raises a FieldsParameterParseError (subclass of ValueError) if it
encounters a syntax error
"""
def get_position(current_str):
return len(fields_str) - len(current_str)
def parse_field_identifier(fields_str):
first_char = True
negated = False
ident = ""
while fields_str:
char = fields_str[0]
if char in ['(', ')', ',']:
if not ident:
raise FieldsParameterParseError("unexpected char '%s' at position %d" % (char, get_position(fields_str)))
if ident in ['*', '_'] and char == '(':
# * and _ cannot have nested fields
raise FieldsParameterParseError("unexpected char '%s' at position %d" % (char, get_position(fields_str)))
return ident, negated, fields_str
elif char == '-':
if not first_char:
raise FieldsParameterParseError("unexpected char '%s' at position %d" % (char, get_position(fields_str)))
negated = True
elif char in ['*', '_']:
if ident and char == '*':
raise FieldsParameterParseError("unexpected char '%s' at position %d" % (char, get_position(fields_str)))
ident += char
elif char.isalnum() or char == '_':
if ident == '*':
# * can only be on its own
raise FieldsParameterParseError("unexpected char '%s' at position %d" % (char, get_position(fields_str)))
ident += char
elif char.isspace():
raise FieldsParameterParseError("unexpected whitespace at position %d" % get_position(fields_str))
else:
raise FieldsParameterParseError("unexpected char '%s' at position %d" % (char, get_position(fields_str)))
first_char = False
fields_str = fields_str[1:]
return ident, negated, fields_str
def parse_fields(fields_str, expect_close_bracket=False):
first_ident = None
is_first = True
fields = []
while fields_str:
sub_fields = None
ident, negated, fields_str = parse_field_identifier(fields_str)
# Some checks specific to '*' and '_'
if ident in ['*', '_']:
if not is_first:
raise FieldsParameterParseError("'%s' must be in the first position" % ident)
if negated:
raise FieldsParameterParseError("'%s' cannot be negated" % ident)
if fields_str and fields_str[0] == '(':
if negated:
# Negated fields cannot contain subfields
raise FieldsParameterParseError("unexpected char '(' at position %d" % get_position(fields_str))
sub_fields, fields_str = parse_fields(fields_str[1:], expect_close_bracket=True)
if is_first:
first_ident = ident
else:
# Negated fields can't be used with '_'
if first_ident == '_' and negated:
# _,foo is allowed but _,-foo is not
raise FieldsParameterParseError("negated fields with '_' doesn't make sense")
# Additional fields without sub fields can't be used with '*'
if first_ident == '*' and not negated and not sub_fields:
# *,foo(bar) and *,-foo are allowed but *,foo is not
raise FieldsParameterParseError("additional fields with '*' doesn't make sense")
fields.append((ident, negated, sub_fields))
if fields_str and fields_str[0] == ')':
if not expect_close_bracket:
raise FieldsParameterParseError("unexpected char ')' at position %d" % get_position(fields_str))
return fields, fields_str[1:]
if fields_str and fields_str[0] == ',':
fields_str = fields_str[1:]
# A comma can not exist immediately before another comma or the end of the string
if not fields_str or fields_str[0] == ',':
raise FieldsParameterParseError("unexpected char ',' at position %d" % get_position(fields_str))
is_first = False
if expect_close_bracket:
# This parser should've exited with a close bracket but instead we
# hit the end of the input. Raise an error
raise FieldsParameterParseError("unexpected end of input (did you miss out a close bracket?)")
return fields, fields_str
fields, _ = parse_fields(fields_str)
return fields
def parse_boolean(value):
"""
Parses strings into booleans using the following mapping (case-sensitive):
'true' => True
'false' => False
'1' => True
'0' => False
"""
if value in ['true', '1']:
return True
elif value in ['false', '0']:
return False
else:
raise ValueError("expected 'true' or 'false', got '%s'" % value)
|
{
"content_hash": "014b353a0b25497acce6608623a728c8",
"timestamp": "",
"source": "github",
"line_count": 232,
"max_line_length": 125,
"avg_line_length": 32.668103448275865,
"alnum_prop": 0.5739543475392532,
"repo_name": "gasman/wagtail",
"id": "92a1f739ef6672b49e7642122a25e3e9150b0f61",
"size": "7579",
"binary": false,
"copies": "5",
"ref": "refs/heads/main",
"path": "wagtail/api/v2/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "3390"
},
{
"name": "Dockerfile",
"bytes": "2041"
},
{
"name": "HTML",
"bytes": "512773"
},
{
"name": "JavaScript",
"bytes": "431502"
},
{
"name": "Makefile",
"bytes": "984"
},
{
"name": "Python",
"bytes": "4980441"
},
{
"name": "SCSS",
"bytes": "267482"
},
{
"name": "Shell",
"bytes": "6692"
},
{
"name": "TypeScript",
"bytes": "233366"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals, absolute_import
from unittest import TestCase
from django import template
from django.utils.translation import ugettext_lazy as _
from pymorphy.py3k import PY3
from .templatetags.pymorphy_tags import inflect, plural, inflect_marked
class PymorphyDjangoTestCase(TestCase):
def _msg(self, fmt, w1, w2):
if PY3:
return None
# console fix for python 2
return fmt.encode('utf8') % (w1.encode('utf8'), w2.encode('utf8'))
class InflectMarkedTagTest(PymorphyDjangoTestCase):
def assertInflected(self, phrase, form, result):
inflected_word = inflect_marked(phrase, form)
err_msg = self._msg("%s != %s" , inflected_word, result)
self.assertEqual(inflected_word, result, err_msg)
def test_basic_no_inflect(self):
self.assertInflected('[[лошадь]] Пржевальского', 'дт', 'лошади Пржевальского')
self.assertInflected('Москва', 'пр', 'Москва')
self.assertInflected('[[Москва]]', 'пр', 'Москве')
self.assertInflected('[[Москва]]-сити', 'пр', 'Москве-сити')
def test_two_words_no_inflect(self):
self.assertInflected('[[лошадь]] Пржевальского и [[красный конь]] Кузьмы Петрова-Водкина',
'дт',
'лошади Пржевальского и красному коню Кузьмы Петрова-Водкина')
class InflectTagTest(PymorphyDjangoTestCase):
def assertInflected(self, phrase, form, result):
inflected_word = inflect(phrase, form)
err_msg = self._msg("%s != %s" , inflected_word, result)
self.assertEqual(inflected_word, result, err_msg)
def test_word_case(self):
self.assertInflected('Котопес', '', 'Котопес')
self.assertInflected('ВАСЯ', '', 'ВАСЯ')
self.assertInflected('котопес', '', 'котопес')
def test_one_word(self):
self.assertInflected('Москва', 'пр', 'Москве')
self.assertInflected('бутявка', 'мн,тв', 'бутявками')
self.assertInflected('Петрович', 'дт,отч', 'Петровичу')
def test_susliki(self):
self.assertInflected('сусликов', 'тв', 'сусликами')
def test_complex_phrase(self):
self.assertInflected('тридцать восемь попугаев и Удав', 'дт',
'тридцати восьми попугаям и Удаву')
self.assertInflected('Пятьдесят девять сусликов', 'тв', 'Пятьюдесятью девятью сусликами')
def test_name(self):
self.assertInflected('Геннадий Петрович', 'вн', 'Геннадия Петровича')
self.assertInflected('Геннадий Петрович', 'дт', 'Геннадию Петровичу')
self.assertInflected('Геннадий Петрович', 'тв', 'Геннадием Петровичем')
self.assertInflected('Геннадий Петрович', 'пр', 'Геннадии Петровиче')
def test_hyphen(self):
self.assertInflected('Ростов-на-Дону', 'пр', 'Ростове-на-Дону')
# тесты для несклоняемых кусков
def test_basic_no_inflect(self):
self.assertInflected('лошадь [[Пржевальского]]', 'дт', 'лошади Пржевальского')
self.assertInflected('[[Москва]]', 'пр', 'Москва')
self.assertInflected('Москва', 'пр', 'Москве')
self.assertInflected('Москва[[-сити]]', 'пр', 'Москве-сити')
def test_two_words_no_inflect(self):
self.assertInflected('лошадь [[Пржевальского]] и красный конь [[Кузьмы Петрова-Водкина]]',
'дт',
'лошади Пржевальского и красному коню Кузьмы Петрова-Водкина')
class PluralTagTest(PymorphyDjangoTestCase):
def assertPlural(self, phrase, amount, result):
morphed = plural(phrase, amount)
err_msg = self._msg("%s != %s" , morphed, result)
self.assertEqual(morphed, result, err_msg)
def test_pluralize(self):
self.assertPlural('бутявка', 1, 'бутявка')
self.assertPlural('бутявка', 2, 'бутявки')
self.assertPlural('бутявка', 5, 'бутявок')
self.assertPlural('Бутявка', 1, 'Бутявка')
def test_phrase(self):
self.assertPlural('Геннадий Петрович', 8, 'Геннадиев Петровичей')
def test_mixed(self):
self.assertPlural('активный пользователь', 1, 'активный пользователь')
self.assertPlural('активный пользователь', 2, 'активных пользователя')
self.assertPlural('активный пользователь', 3, 'активных пользователя')
self.assertPlural('активный пользователь', 4, 'активных пользователя')
self.assertPlural('активный пользователь', 5, 'активных пользователей')
self.assertPlural('активный пользователь', 10, 'активных пользователей')
self.assertPlural('активный пользователь', 21, 'активный пользователь')
class LazyStringTest(PymorphyDjangoTestCase):
def test_safe_string(self):
tpl = template.Template("{% load pymorphy_tags %}{{ 'конь'|inflect:'дт' }}")
rendered, expected = tpl.render(template.Context()), 'коню'
err_msg = self._msg("%s != %s" , rendered, expected)
self.assertEqual(rendered, expected, err_msg)
def test_i18n_string(self):
horse = _('конь')
tpl = template.Template("{% load pymorphy_tags %}{{ horses|inflect:'дт' }}")
rendered, expected = tpl.render(template.Context({'horses': horse})), 'коню'
err_msg = self._msg("%s != %s" , rendered, expected)
self.assertEqual(rendered, expected, err_msg)
|
{
"content_hash": "08685b74975598d902aeff09c14b331b",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 98,
"avg_line_length": 42.142857142857146,
"alnum_prop": 0.6467043314500942,
"repo_name": "kmike/pymorphy",
"id": "6b2e43497c2c13ca841db0bb71e653a1209a19a7",
"size": "6524",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pymorphy/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "670662"
},
{
"name": "Python",
"bytes": "229420"
},
{
"name": "Shell",
"bytes": "143"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import mock
from kombu import Exchange, Queue
from st2common.transport import consumers
from st2common.util.greenpooldispatch import BufferedDispatcher
from st2tests.base import DbTestCase
from tests.unit.base import FakeModelDB
FAKE_XCHG = Exchange("st2.tests", type="topic")
FAKE_WORK_Q = Queue("st2.tests.unit", FAKE_XCHG)
class FakeMessageHandler(consumers.MessageHandler):
message_type = FakeModelDB
def process(self, payload):
pass
def get_handler():
return FakeMessageHandler(mock.MagicMock(), [FAKE_WORK_Q])
class QueueConsumerTest(DbTestCase):
@mock.patch.object(FakeMessageHandler, "process", mock.MagicMock())
def test_process_message(self):
payload = FakeModelDB()
handler = get_handler()
handler._queue_consumer._process_message(payload)
FakeMessageHandler.process.assert_called_once_with(payload)
@mock.patch.object(FakeMessageHandler, "process", mock.MagicMock())
def test_process_message_wrong_payload_type(self):
payload = 100
handler = get_handler()
mock_message = mock.MagicMock()
handler._queue_consumer.process(payload, mock_message)
self.assertTrue(mock_message.ack.called)
self.assertFalse(FakeMessageHandler.process.called)
class FakeStagedMessageHandler(consumers.StagedMessageHandler):
message_type = FakeModelDB
def pre_ack_process(self, message):
return message
def process(self, payload):
pass
def get_staged_handler():
return FakeStagedMessageHandler(mock.MagicMock(), [FAKE_WORK_Q])
class StagedQueueConsumerTest(DbTestCase):
@mock.patch.object(FakeStagedMessageHandler, "pre_ack_process", mock.MagicMock())
def test_process_message_pre_ack(self):
payload = FakeModelDB()
handler = get_staged_handler()
mock_message = mock.MagicMock()
handler._queue_consumer.process(payload, mock_message)
FakeStagedMessageHandler.pre_ack_process.assert_called_once_with(payload)
self.assertTrue(mock_message.ack.called)
@mock.patch.object(BufferedDispatcher, "dispatch", mock.MagicMock())
@mock.patch.object(FakeStagedMessageHandler, "process", mock.MagicMock())
def test_process_message(self):
payload = FakeModelDB()
handler = get_staged_handler()
mock_message = mock.MagicMock()
handler._queue_consumer.process(payload, mock_message)
BufferedDispatcher.dispatch.assert_called_once_with(
handler._queue_consumer._process_message, payload
)
handler._queue_consumer._process_message(payload)
FakeStagedMessageHandler.process.assert_called_once_with(payload)
self.assertTrue(mock_message.ack.called)
def test_process_message_wrong_payload_type(self):
payload = 100
handler = get_staged_handler()
mock_message = mock.MagicMock()
handler._queue_consumer.process(payload, mock_message)
self.assertTrue(mock_message.ack.called)
class FakeVariableMessageHandler(consumers.VariableMessageHandler):
def __init__(self, connection, queues):
super(FakeVariableMessageHandler, self).__init__(connection, queues)
self.message_types = {FakeModelDB: self.handle_fake_model}
def process(self, message):
handler_function = self.message_types.get(type(message))
handler_function(message)
def handle_fake_model(self, fk_db):
pass
def get_variable_messages_handler():
return FakeVariableMessageHandler(mock.MagicMock(), [FAKE_WORK_Q])
class VariableMessageQueueConsumerTest(DbTestCase):
@mock.patch.object(
FakeVariableMessageHandler, "handle_fake_model", mock.MagicMock()
)
def test_process_message(self):
payload = FakeModelDB()
handler = get_variable_messages_handler()
handler._queue_consumer._process_message(payload)
FakeVariableMessageHandler.handle_fake_model.assert_called_once_with(payload)
@mock.patch.object(FakeVariableMessageHandler, "process", mock.MagicMock())
def test_process_message_wrong_payload_type(self):
payload = 100
handler = get_variable_messages_handler()
mock_message = mock.MagicMock()
handler._queue_consumer.process(payload, mock_message)
self.assertTrue(mock_message.ack.called)
self.assertFalse(FakeVariableMessageHandler.process.called)
|
{
"content_hash": "19f869b8e1e9af2bd19754f6641c1778",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 85,
"avg_line_length": 35.568,
"alnum_prop": 0.7091767881241565,
"repo_name": "Plexxi/st2",
"id": "463eb0def4b3aa5d97d8a537d116d58ef52d7fcf",
"size": "5074",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "st2common/tests/unit/test_queue_consumer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "198"
},
{
"name": "JavaScript",
"bytes": "444"
},
{
"name": "Jinja",
"bytes": "174532"
},
{
"name": "Makefile",
"bytes": "75242"
},
{
"name": "PowerShell",
"bytes": "856"
},
{
"name": "Python",
"bytes": "6453910"
},
{
"name": "Shell",
"bytes": "93607"
},
{
"name": "Starlark",
"bytes": "7236"
}
],
"symlink_target": ""
}
|
from usergrid import Usergrid
__author__ = 'ApigeeCorporation'
def main():
Usergrid.init(org_id='jwest1',
app_id='sandbox')
response = Usergrid.DELETE('pets', 'max')
if not response.ok:
print 'Failed to delete max: %s' % response
exit()
response = Usergrid.DELETE('owners', 'jeff')
if not response.ok:
print 'Failed to delete Jeff: %s' % response
exit()
response = Usergrid.POST('pets', {'name': 'max'})
if response.ok:
pet = response.first()
print pet
response = Usergrid.POST('owners', {'name': 'jeff'})
if response.ok:
owner = response.first()
print owner
response = pet.connect('ownedBy', owner)
if response.ok:
print 'Connected!'
response = pet.disconnect('ownedBy', owner)
if response.ok:
print 'all done!'
else:
print response
else:
print 'failed to connect: %s' % response
else:
print 'Failed to create Jeff: %s' % response
else:
print response
main()
|
{
"content_hash": "1e953061b6da6247e0e778cf17aa6454",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 60,
"avg_line_length": 23.470588235294116,
"alnum_prop": 0.5129490392648287,
"repo_name": "jwest-apigee/usergrid-python",
"id": "a829736f9e3c8024db4d2084b4d0378b51784a3f",
"size": "2029",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sample_app.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "31327"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/component/clothing/shared_jewelry_setting.iff"
result.attribute_template_id = -1
result.stfName("craft_clothing_ingredients_n","jewelry_setting")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "9585eb44ccb72be603f096a2f5303014",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 82,
"avg_line_length": 25.923076923076923,
"alnum_prop": 0.7181008902077152,
"repo_name": "anhstudios/swganh",
"id": "6c4b973e8b26127187e80e483c64d46538f5f4a5",
"size": "482",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/tangible/component/clothing/shared_jewelry_setting.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
}
|
"""
WSGI config for pastit project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pasteit.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
import sys
path = '/var/www/qrt/htdocs/p'
if path not in sys.path:
sys.path.append(path)
path = '/var/www/qrt/htdocs/p/pasteit'
if path not in sys.path:
sys.path.append(path)
|
{
"content_hash": "ed7d3d39592503b27e18d22c9ecf9df9",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 79,
"avg_line_length": 35.67567567567568,
"alnum_prop": 0.7833333333333333,
"repo_name": "spezifanta/Paste-It",
"id": "241ad558ee90abc659b63f98117f4deceef593b3",
"size": "1320",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pasteit/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18770"
}
],
"symlink_target": ""
}
|
"""Offer numeric state listening automation rules."""
import logging
import voluptuous as vol
from homeassistant import exceptions
from homeassistant.const import (
CONF_ABOVE,
CONF_ATTRIBUTE,
CONF_BELOW,
CONF_ENTITY_ID,
CONF_FOR,
CONF_PLATFORM,
CONF_VALUE_TEMPLATE,
)
from homeassistant.core import CALLBACK_TYPE, HassJob, callback
from homeassistant.helpers import condition, config_validation as cv, template
from homeassistant.helpers.event import (
async_track_same_state,
async_track_state_change_event,
)
# mypy: allow-incomplete-defs, allow-untyped-calls, allow-untyped-defs
# mypy: no-check-untyped-defs
def validate_above_below(value):
"""Validate that above and below can co-exist."""
above = value.get(CONF_ABOVE)
below = value.get(CONF_BELOW)
if above is None or below is None:
return value
if above > below:
raise vol.Invalid(
f"A value can never be above {above} and below {below} at the same time. You probably want two different triggers.",
)
return value
TRIGGER_SCHEMA = vol.All(
vol.Schema(
{
vol.Required(CONF_PLATFORM): "numeric_state",
vol.Required(CONF_ENTITY_ID): cv.entity_ids,
vol.Optional(CONF_BELOW): vol.Coerce(float),
vol.Optional(CONF_ABOVE): vol.Coerce(float),
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_FOR): cv.positive_time_period_template,
vol.Optional(CONF_ATTRIBUTE): cv.match_all,
}
),
cv.has_at_least_one_key(CONF_BELOW, CONF_ABOVE),
validate_above_below,
)
_LOGGER = logging.getLogger(__name__)
async def async_attach_trigger(
hass, config, action, automation_info, *, platform_type="numeric_state"
) -> CALLBACK_TYPE:
"""Listen for state changes based on configuration."""
entity_id = config.get(CONF_ENTITY_ID)
below = config.get(CONF_BELOW)
above = config.get(CONF_ABOVE)
time_delta = config.get(CONF_FOR)
template.attach(hass, time_delta)
value_template = config.get(CONF_VALUE_TEMPLATE)
unsub_track_same = {}
entities_triggered = set()
period: dict = {}
attribute = config.get(CONF_ATTRIBUTE)
job = HassJob(action)
if value_template is not None:
value_template.hass = hass
@callback
def check_numeric_state(entity, from_s, to_s):
"""Return True if criteria are now met."""
if to_s is None:
return False
variables = {
"trigger": {
"platform": "numeric_state",
"entity_id": entity,
"below": below,
"above": above,
"attribute": attribute,
}
}
return condition.async_numeric_state(
hass, to_s, below, above, value_template, variables, attribute
)
@callback
def state_automation_listener(event):
"""Listen for state changes and calls action."""
entity = event.data.get("entity_id")
from_s = event.data.get("old_state")
to_s = event.data.get("new_state")
@callback
def call_action():
"""Call action with right context."""
hass.async_run_hass_job(
job,
{
"trigger": {
"platform": platform_type,
"entity_id": entity,
"below": below,
"above": above,
"from_state": from_s,
"to_state": to_s,
"for": time_delta if not time_delta else period[entity],
"description": f"numeric state of {entity}",
}
},
to_s.context,
)
matching = check_numeric_state(entity, from_s, to_s)
if not matching:
entities_triggered.discard(entity)
elif entity not in entities_triggered:
entities_triggered.add(entity)
if time_delta:
variables = {
"trigger": {
"platform": "numeric_state",
"entity_id": entity,
"below": below,
"above": above,
}
}
try:
period[entity] = cv.positive_time_period(
template.render_complex(time_delta, variables)
)
except (exceptions.TemplateError, vol.Invalid) as ex:
_LOGGER.error(
"Error rendering '%s' for template: %s",
automation_info["name"],
ex,
)
entities_triggered.discard(entity)
return
unsub_track_same[entity] = async_track_same_state(
hass,
period[entity],
call_action,
entity_ids=entity,
async_check_same_func=check_numeric_state,
)
else:
call_action()
unsub = async_track_state_change_event(hass, entity_id, state_automation_listener)
@callback
def async_remove():
"""Remove state listeners async."""
unsub()
for async_remove in unsub_track_same.values():
async_remove()
unsub_track_same.clear()
return async_remove
|
{
"content_hash": "23763ac2f88a6440f9ad0846eb146bd4",
"timestamp": "",
"source": "github",
"line_count": 177,
"max_line_length": 128,
"avg_line_length": 31.412429378531073,
"alnum_prop": 0.5339928057553956,
"repo_name": "balloob/home-assistant",
"id": "a6e3b33ae97298bb1d58eb3a3e738e93ba3d56c2",
"size": "5560",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/homeassistant/triggers/numeric_state.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1099"
},
{
"name": "Python",
"bytes": "12903869"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17137"
}
],
"symlink_target": ""
}
|
class WebPayError(Exception):
"""Base class for errors related to webpay library.
"""
def __init__(self, message, status, error_info):
Exception.__init__(self, message)
self.status = status
self.error_info = error_info
class ApiConnectionError(WebPayError):
"""Error raised when something is wrong while connecting to WebPay API.
"""
def __init__(self, message, status, error_info, cause):
WebPayError.__init__(self, message, status, error_info)
self.cause = cause
class ApiError(WebPayError):
"""Error raised when WebPay API returns error status (500, 503, etc).
"""
def __init__(self, status, error_info):
WebPayError.__init__(self, error_info['message'], status, error_info)
self.type = error_info['type']
class AuthenticationError(WebPayError):
"""Error raised when authentication failed.
In most cases, the API key is invalid.
"""
def __init__(self, status, error_info):
WebPayError.__init__(self, error_info['message'], status, error_info)
class CardError(WebPayError):
"""Error raised when given card information is invalid.
A system should make its end user check input information.
"""
def __init__(self, status, error_info):
WebPayError.__init__(self, error_info['message'], status, error_info)
self.type = error_info['type']
self.code = error_info['code']
self.param = error_info.get('param')
class InvalidRequestError(WebPayError):
"""Error raised when given parameters have an invalid field.
"""
def __init__(self, status, error_info):
WebPayError.__init__(self, error_info['message'], status, error_info)
self.type = error_info['type']
self.param = error_info.get('param')
@staticmethod
def empty_id_error():
return InvalidRequestError(None, {
'message': 'id must not be empty',
'type': 'invalid_request_error',
'param': 'id'
})
|
{
"content_hash": "b8f9e7bd65b512b2b63e416836b6596d",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 77,
"avg_line_length": 28.535211267605632,
"alnum_prop": 0.6253701875616979,
"repo_name": "yamaneko1212/webpay-python",
"id": "79ee961d46dbf3e9bf0417355341abac5d33a3cd",
"size": "2026",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "webpay/errors.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "253628"
}
],
"symlink_target": ""
}
|
from django.contrib import admin
from models import ModelGroup
from forms import ModelGroupForm
class ModelGroupAdmin(admin.ModelAdmin):
form = ModelGroupForm
admin.site.register(ModelGroup, ModelGroupAdmin)
|
{
"content_hash": "a7c67947b8afa62763371f82cb7d58cc",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 48,
"avg_line_length": 23.88888888888889,
"alnum_prop": 0.827906976744186,
"repo_name": "spookylukey/django-autocomplete-light",
"id": "d8a8274206f08aa2c53d0e5f15fd35c3785fdf3b",
"size": "215",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "test_project/generic_m2m_autocomplete/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "13929"
},
{
"name": "JavaScript",
"bytes": "106201"
},
{
"name": "Python",
"bytes": "180473"
},
{
"name": "Shell",
"bytes": "5144"
}
],
"symlink_target": ""
}
|
""" Sahana Eden Guided Tour Model
@copyright: 2009-2013 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ["S3GuidedTourModel",
"tour_rheader",
"tour_builder",
]
from gluon import *
from gluon.storage import Storage
from ..s3 import *
from s3layouts import S3AddResourceLink
# =============================================================================
class S3GuidedTourModel(S3Model):
""" Details about which guided tours this Person has completed """
names = ["tour_config",
"tour_details",
"tour_user",
]
def model(self):
T = current.T
db = current.db
NONE = current.messages["NONE"]
s3 = current.response.s3
add_component = self.add_component
configure = self.configure
crud_strings = s3.crud_strings
define_table = self.define_table
person_id = self.pr_person_id
# ---------------------------------------------------------------------
# Guided tours that are available
#
tablename = "tour_config"
table = define_table(tablename,
Field("name",
represent=lambda v: v or NONE,
label=T("Display name")),
Field("code",
length=255,
notnull=True,
unique=True,
represent=lambda v: v or NONE,
label=T("Unique code")),
Field("controller",
represent=lambda v: v or NONE,
label=T("Controller tour is activated")),
Field("function",
represent=lambda v: v or NONE,
label=T("Function tour is activated")),
Field("autostart", "boolean",
default=False,
represent=lambda v: \
T("Yes") if v else T("No"),
label=T("Auto start")),
Field("role", "string",
represent=lambda v: v or NONE,
label=T("User's role")),
* s3_meta_fields()
)
# CRUD strings
ADD_TOUR = T("Add Tour")
crud_strings[tablename] = Storage(
title_create = ADD_TOUR,
title_display = T("Tour Configuration"),
title_list = T("Tours"),
title_update = T("Edit Tour"),
title_search = T("Search Tours"),
subtitle_create = T("Add New Tour"),
label_list_button = T("List Tours"),
label_create_button = ADD_TOUR,
label_delete_button = T("Delete Tour"),
msg_record_created = T("Tour added"),
msg_record_modified = T("Tour updated"),
msg_record_deleted = T("Tour deleted"),
msg_list_empty = T("No Tours currently registered"))
represent = S3Represent(lookup=tablename, translate=True)
tour_config_id = S3ReusableField("tour_config_id", table,
requires = IS_NULL_OR(
IS_ONE_OF(db, "tour_config.id",
represent,
sort=True)),
represent=represent,
label=T("Tour Name"),
ondelete="SET NULL")
# Details as component of Tour Configs
add_component("tour_details", tour_config="tour_config_id")
# Users as component of Tour Configs
add_component("tour_user", tour_config="tour_config_id")
# ---------------------------------------------------------------------
# Details of the tour.
#
tablename = "tour_details"
table = define_table(tablename,
tour_config_id(),
Field("posn", "integer",
default=0,
label=T("Position in tour")),
Field("controller",
represent=lambda v: v or NONE,
label=T("Controller name")),
Field("function",
represent=lambda v: v or NONE,
label=T("Function name")),
Field("args",
represent=lambda v: v or NONE,
label=T("Arguments")),
Field("tip_title",
represent=lambda v: v or NONE,
label=T("Title")),
Field("tip_details",
represent=lambda v: v or NONE,
label=T("Details")),
Field("html_id",
represent=lambda v: v or NONE,
label=T("HTML ID")),
Field("html_class",
represent=lambda v: v or NONE,
label=T("HTML class")),
Field("button",
represent=lambda v: v or NONE,
label=T("Button name")),
Field("tip_location",
represent=lambda v: v or NONE,
label=T("Loctaion of tip")),
Field("datatable_id",
represent=lambda v: v or NONE,
label=T("DataTable ID")),
Field("datatable_row",
represent=lambda v: v or NONE,
label=T("DataTable row")),
Field("redirect",
represent=lambda v: v or NONE,
label=T("Redirect URL")),
)
# CRUD strings
ADD_DETAILS = T("Add Details")
crud_strings[tablename] = Storage(
title_create = ADD_DETAILS,
title_display = T("Tour Details"),
title_list = T("Details"),
title_update = T("Edit Details"),
title_search = T("Search Details"),
subtitle_create = T("Add New Detail"),
label_list_button = T("List Details"),
label_create_button = ADD_DETAILS,
label_delete_button = T("Delete Detail"),
msg_record_created = T("Detail added"),
msg_record_modified = T("Detail updated"),
msg_record_deleted = T("Detail deleted"),
msg_list_empty = T("No Details currently registered"))
configure(tablename,
orderby=table.tour_config_id | table.posn
)
# ---------------------------------------------------------------------
# Details of the tours that the user has taken.
#
tablename = "tour_user"
table = define_table(tablename,
person_id(label = T("Person"),
ondelete="CASCADE"),
tour_config_id(),
Field("place",
represent=lambda v: v or NONE,
label=T("Where reached")),
Field("resume",
represent=lambda v: v or NONE,
label=T("URL to resume tour")),
Field("completed", "boolean",
default=False,
represent=lambda v: \
T("Yes") if v else T("No"),
label=T("Completed tour?")),
Field("trip_counter", "integer",
default=0,
label=T("Times Completed")),
)
# CRUD strings
ADD_USER = T("Add User")
crud_strings[tablename] = Storage(
title_create = ADD_USER,
title_display = T("Tour User"),
title_list = T("Users"),
title_update = T("Edit User"),
title_search = T("Search Users"),
subtitle_create = T("Add New User"),
label_list_button = T("List Users"),
label_create_button = ADD_USER,
label_delete_button = T("Delete User"),
msg_record_created = T("User added"),
msg_record_modified = T("User updated"),
msg_record_deleted = T("User deleted"),
msg_list_empty = T("No users have taken a tour"))
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return Storage(
tour_config_id = tour_config_id,
)
# =============================================================================
def tour_rheader(r):
""" Resource Header for Guided Tour """
if r.representation == "html":
tour = r.record
if tour:
T = current.T
tabs = [(T("Edit Details"), None),
(T("Details"), "details"),
(T("People"), "user"),
]
rheader_tabs = s3_rheader_tabs(r, tabs)
table = r.table
rheader = DIV(TABLE(TR(TH("%s: " % table.name.label),
tour.name,
),
TR(TH("%s: " % table.code.label),
tour.code,
),
),
rheader_tabs
)
return rheader
return None
# =============================================================================
def tour_builder(output):
"""
Helper function to attach a guided tour (if required) to the output
"""
request = current.request
auth = current.auth
s3db = current.s3db
db = current.db
s3 = current.response.s3
T = current.T
req_vars = request.vars
tour_id = req_vars.tour
# Now see if the details are on the database for this user
tour = None
user_id = None
if auth.is_logged_in():
user_id = auth.s3_logged_in_person()
# Find out if the user has done this tour before
utable = s3db.tour_user
uquery = (utable.person_id == user_id) & \
(utable.tour_config_id == tour_id)
tour = db(uquery).select(utable.id,
utable.completed,
utable.place,
utable.resume,
limitby=(0, 1)).first()
# If the tour has just been started (from the menu) then
# it may be necessary to redirect to a different controller
# @todo: does place need to be changed to controller and function?
if not req_vars.tour_running:
if (tour and not tour.completed and tour.place != request.controller):
redirect("%s?tour=%s" %(tour.resume, tour_id))
# get the details from the database
dtable = s3db.tour_details
dquery = (dtable.tour_config_id == tour_id) &\
(dtable.controller == request.controller) &\
(dtable.function == request.function)
details = db(dquery).select(dtable.args,
dtable.tip_title,
dtable.tip_details,
dtable.button,
dtable.tip_location,
dtable.html_id,
dtable.html_class,
dtable.datatable_id,
dtable.datatable_row,
dtable.redirect,
orderby=(dtable.posn)
)
# tour_filename = os.path.join(request.folder,
# "private",
# "tour",
# tour_name)
# tour_file = open (tour_filename, "rb")
# # now open the details of the guided_tour into a dictionary
# import csv
# tour_details = csv.DictReader(tour_file, skipinitialspace=True)
# load the list of tour items in the html
joyride_OL = OL(_id="joyrideID_1")
pre_step_data = []
post_step_data = []
post_ride_data = []
last_row = None
last_used = None
req_args = request.args
cnt = -1
for row in details:
if row.args:
args = row.args.split(",")
else:
args = []
# if the page has a nested login form then "login" will be added to
# the req_args list so it needs to be added to the args list as well
if "login" in req_args:
if "login" not in args:
args.append("login")
# The following will capture the actual id used for the req_arg
# Example org/organisation/10, where 10 is the id from the database
posn = 0
for arg in args:
if arg == "dt_id":
args[posn] = req_args[posn]
posn += 1
# Now check that the tour url matches the current url
if (args == req_args):
cnt += 1 # number of records used in this part of the tour
if row.datatable_id:
dt_id = row.datatable_id
# cols = []
# if "DataTable_columns" in row:
# cols = row["DataTable_columns"].split(",")
row_num = 0
if row.datatable_row:
row_num = row.datatable_row
# Now set this up for the pre-processor hook in joyride
pre_step_data.append([cnt, dt_id, row_num])
if row.redirect:
redirect_row = row.redirect.split(",")
if len(redirect_row) >= 3:
url = URL(c=redirect_row[0],
f=redirect_row[1],
args=redirect_row[2:],
vars={"tour_running":True,
"tour":tour_id}
)
if "dt_id" in redirect_row[2]:
post_step_data.append([cnt, url, dt_id, row_num])
elif len(redirect_row) == 2:
url = URL(c=redirect_row[0],
f=redirect_row[1],
vars={"tour_running":True,
"tour":tour_id}
)
post_step_data.append([cnt, url])
else:
url = URL(c=redirect_row[0],vars={"tour_running":True,
"tour":tour_id})
post_step_data.append([cnt, url])
extra = {}
if row.html_id:
extra["_data-id"] = row.html_id
elif row.html_class:
extra["_data-class"] = row.html_class
if row.button:
extra["_data-button"] = row.button
else:
extra["_data-button"] = "Next"
if row.tip_location:
extra["_data-options"] = "tipLocation:%s" % row.tip_location.lower()
else:
extra["_data-options"] = "tipLocation:right"
joyride_OL.append(LI(H2(T(row.tip_title)),
P(T(row.tip_details)),
**extra
)
)
last_used = row
last_row = row
# The following redirect will be triggered if the user has moved away
# from the tour, such as by clicking on a tab. However if a tab
# is part of the tour we are unable to determine if they have moved
# away or just visiting as part of the tour and so it will continue.
if len(joyride_OL) == 0:
del request.vars.tour
redirect(URL(args=req_args,
vars=request.vars))
if (user_id != None) and (last_row == last_used):
# set up an AJAX call to record that the tour has been completed
post_ride_data = [cnt, tour_id]
joyride_div = DIV(joyride_OL,
_class="hidden")
# add the javascript configuration data
from gluon.serializers import json as jsons
if pre_step_data:
joyride_div.append(INPUT(_type="hidden",
_id="prestep_data",
_name="prestep_data",
_value=jsons(pre_step_data))
)
if post_step_data:
joyride_div.append(INPUT(_type="hidden",
_id="poststep_data",
_name="poststep_data",
_value=jsons(post_step_data))
)
if post_ride_data:
joyride_div.append(INPUT(_type="hidden",
_id="postride_data",
_name="postride_data",
_value=jsons(post_ride_data))
)
# Now add the details to the tour_user table
if user_id != None:
if tour == None:
# this user has never done this tour before so create a new record
utable.insert(person_id = user_id,
tour_config_id = tour_id,
place = request.controller,
resume = request.url)
else:
# the user has done some of this tour so update the record
db(uquery).update(place = request.controller,
resume = request.url,
completed = False)
output["joyride_div"] = joyride_div
if s3.debug:
appname = request.application
s3.scripts.append("/%s/static/scripts/jquery.joyride.js" % appname)
s3.scripts.append("/%s/static/scripts/S3/s3.guidedtour.js" % appname)
s3.stylesheets.append("plugins/guidedtour.min.css")
else:
s3.scripts.append("/%s/static/scripts/S3/s3.guidedtour.min.js" % request.application)
s3.stylesheets.append("plugins/joyride.css")
return output
# END =========================================================================
|
{
"content_hash": "a29061a782919331486c0d8a741fec82",
"timestamp": "",
"source": "github",
"line_count": 465,
"max_line_length": 93,
"avg_line_length": 45.15483870967742,
"alnum_prop": 0.42553698147354385,
"repo_name": "sahildua2305/eden",
"id": "0ac0e5726104061361d009b1ab71c45691f663b8",
"size": "21022",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "modules/s3db/tour.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
import qpid
import sys
import os
from qpid.util import connect
from qpid.connection import Connection
from qpid.datatypes import Message, RangedSet, uuid4
from qpid.queue import Empty
from math import fabs
def getApplicationHeaders(msg):
for h in msg.headers:
if hasattr(h, 'application_headers'): return getattr(h, 'application_headers')
return None
# Set parameters for login
host="127.0.0.1"
port=5672
user="guest"
password="guest"
if len(sys.argv) > 1 :
host=sys.argv[1]
if len(sys.argv) > 2 :
port=int(sys.argv[2])
# Create a connection.
socket = connect(host, port)
connection = Connection (sock=socket)
connection.start()
session = connection.session(str(uuid4()))
q = "header_interop_test_queue"
session.queue_declare(queue=q)
session.message_subscribe(queue=q, destination="received")
queue = session.incoming("received")
queue.start()
msg = queue.get(timeout=10)
pi = 3.14159265
e = 2.71828
headers = getApplicationHeaders(msg)
pi_ = headers["pi"]
e_ = headers["e"]
session.close(timeout=10)
failed = False
if pi != pi_:
print "got incorrect value for pi: ", pi_, " expected:", pi
failed = True
if fabs(e - e_) > 0.0001:
print "got incorrect value for e: ", e_, " expected:", e
failed = True
if failed:
sys.exit(1)
else:
print "Correct header values received."
sys.exit(0)
|
{
"content_hash": "463a2b6713eb541f3d02e7cc95566fa7",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 86,
"avg_line_length": 20.742424242424242,
"alnum_prop": 0.6888239590942293,
"repo_name": "gregerts/debian-qpid-cpp",
"id": "d5a2c16c01ec0dc7cef5b3df453b80857b3a95a4",
"size": "2183",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "src/tests/header_test.py",
"mode": "33261",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
from functools import wraps
import json
import base64
from urllib import unquote_plus
from oauth2client.client import OAuth2WebServerFlow
from oauth2client.client import AccessTokenRefreshError
from oauth2client.django_orm import Storage
from oauth2client import xsrfutil
from django.http import HttpResponseRedirect
from django.http import HttpResponseBadRequest
from django.conf import settings
import httplib2
import apiclient.discovery
from models import CredentialsModel
class GApi(object):
def __init__(self, client_id='', client_secret='', scope='', redirect_uri=None):
self.flow = OAuth2WebServerFlow(client_id,
client_secret,
scope,
redirect_uri=redirect_uri,
access_type='offline',
approval_prompt='force')
def oauth2_required(self, view_function):
"""
Decorator function that will initiate OAUTH2 WEB flow with google services
:param view_function:
:return:
"""
@wraps(view_function)
def wrapper(request, *args, **kwargs):
def oauth2_step1():
state = {
# token to check on redirect
'token': xsrfutil.generate_token(settings.SECRET_KEY, request.user)
}
# extra params that need to be kept over the auth process
if 'oauth2_state' in kwargs:
state['oauth2_state'] = kwargs['oauth2_state']
# encode the whole stuff
base64_state = base64.urlsafe_b64encode(str(json.dumps(state)))
# set the oauth2 state param
self.flow.params['state'] = base64_state
authorize_url = self.flow.step1_get_authorize_url()
return HttpResponseRedirect(authorize_url)
storage = Storage(CredentialsModel, 'id', request.user, 'credential')
credential = storage.get()
if credential is None or credential.invalid is True:
return oauth2_step1()
else:
# refresh credential if needed
if credential.access_token_expired:
try:
credential.refresh(httplib2.Http())
except AccessTokenRefreshError:
return oauth2_step1()
# remove existing oauth2_state params
if 'oauth2_state' in kwargs:
del kwargs['oauth2_state']
return view_function(request, *args, **kwargs)
return wrapper
def oauth2_redirect(self, view_function):
"""
Decorator function to handle the redirect after the OAUTH2 WEB process
:param view_function:
:return:
"""
@wraps(view_function)
def wrapper(request, *args, **kwargs):
# decode the oauth2 state param
state_str = str(request.REQUEST['state'])
# fix here state might be urlencoded twice along the way and sucks if that happens
while '%' in state_str:
state_str = unquote_plus(state_str)
state = json.loads(base64.urlsafe_b64decode(state_str))
# validate token
if not 'token' in state or not xsrfutil.validate_token(settings.SECRET_KEY, str(state['token']),
request.user):
return HttpResponseBadRequest()
# save oauth2 credential in db
credential = self.flow.step2_exchange(request.REQUEST)
storage = Storage(CredentialsModel, 'id', request.user, 'credential')
storage.put(credential)
# put oauth2_state params in kwargs
if 'oauth2_state' in state:
kwargs['oauth2_state'] = state['oauth2_state']
return view_function(request, *args, **kwargs)
return wrapper
@classmethod
def get_gservice(cls, request, api_name, version):
"""
Get a google api service
:param request: the request to check oauth credential
:param api_name: Google api name ex 'drive'
:param version: Google api version name ex v2''
:return: the service object
"""
storage = Storage(CredentialsModel, 'id', request.user, 'credential')
credential = storage.get()
http = httplib2.Http()
http = credential.authorize(http)
return apiclient.discovery.build(api_name, version, http=http)
|
{
"content_hash": "1f589d639b72afbe9361896852f5c1a0",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 108,
"avg_line_length": 40.93859649122807,
"alnum_prop": 0.5731733447610885,
"repo_name": "Fl0r14n/django_googleapi",
"id": "94c8204f40635327148fae42b0c8ab2605319386",
"size": "4667",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gdrive/gapi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12442"
}
],
"symlink_target": ""
}
|
def run_command(command, project):
pass
|
{
"content_hash": "ccff35d10e229dd568e98c3bd5d45246",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 34,
"avg_line_length": 21.5,
"alnum_prop": 0.7209302325581395,
"repo_name": "pebbie/BIBINT",
"id": "856f5abea922a4a5790edfae6fae0884a3986704",
"size": "43",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/engine/command.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "48936"
}
],
"symlink_target": ""
}
|
import sys
from ssssg.config import options
from ssssg.commands import run_ssssg, build_index
def help_text():
return """usage:
To index the site:
python ssssg.py index /path/to/site
To run the site:
python ssssg.py run site"""
if __name__ == "__main__":
args = sys.argv[1:]
if len(args) < 2:
print(help_text())
elif args[0] == 'run':
run_ssssg(args[1], *sys.argv[2:])
elif args[0] == 'index':
build_index(args[1], *sys.argv[2:])
else:
print(help_text())
|
{
"content_hash": "a384fc9797c38122ac5fd7b116ca6533",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 49,
"avg_line_length": 20.884615384615383,
"alnum_prop": 0.56353591160221,
"repo_name": "emehrkay/ssssg",
"id": "754afc7db53b15f8848c8993d9fcc12b807de79e",
"size": "543",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ssssg.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "752"
},
{
"name": "Python",
"bytes": "12235"
}
],
"symlink_target": ""
}
|
from splinter import Browser
from easygui import *
from sys import exit
from time import sleep
from re import sub
from os import path, makedirs
# Recursive function that does the actual scrapping
# @type flag: bool
# @rtype: None
def scrapper_recursion(flag):
if chrome.is_element_present_by_xpath(
"//div[contains(@class,'cards-categorical-list-scrollbox jfk-scrollbar jfk-scrollbar-borderless')]"):
for each_div in chrome.find_by_xpath(
"//div[contains(@jsaction, "
"'mousemove:categorical.hoverListItem;mouseout:categorical.unhoverListItem;"
"focus:categorical.hoverListItem;blur:categorical.unhoverListItem')]"):
# noinspection PyBroadException
try:
each_div.click()
sleep(2)
name = chrome.find_by_xpath(
"//h1[contains(@class, 'cards-entity-title cards-strong cards-text-truncate-and-wrap')]").text
print name
address = " ".join(
chrome.find_by_xpath("//div[contains(@class, 'cards-entity-address cards-strong')]").text.split(
"\n"))
phone_no = chrome.find_by_xpath("//div[contains(@class, 'cards-entity-phone')]").text
category = chrome.find_by_xpath("//span[contains(@class, 'cards-social-termlink')]").last.text
geocode_url = chrome.url.split("@")
geocode_arr = geocode_url[1].split(",")
lat = geocode_arr[0]
lng = geocode_arr[1]
file_handler.write(name.strip().encode("UTF-8") + "\t" + category.strip().encode(
"UTF-8") + "\t" + address.strip().encode("UTF-8") + "\t" + str(lat) + "\t" + str(
lng) + "\t" + phone_no.strip().encode("UTF-8") + "\n")
chrome.find_by_xpath("//div[contains(@class, 'cards-categorical-list-context-card')]").click()
sleep(2)
except:
continue
if chrome.is_element_present_by_xpath(
'//a[@class="cards-categorical-pagination-button cards-categorical-pagination-button-inactive"]') \
and flag:
print "Successfully Scrapped All Available Data!"
file_handler.close()
else:
# noinspection PyBroadException
try:
chrome.find_by_xpath("//span[contains(@class,'cards-categorical-pagination-button-right')]").click()
sleep(5)
while 1:
if chrome.is_element_present_by_xpath(
"//button[contains(@class, "
"'widget-zoom-button widget-zoom-in widget-zoom-button-disabled')]"):
break
chrome.find_by_xpath("//button[contains(@class, 'widget-zoom-button widget-zoom-in')]").click()
chrome.find_by_id("searchboxinput").mouse_over()
sleep(2)
scrapper_recursion(True)
except:
print "Successfully Scrapped All Available Data!"
file_handler.close()
else:
print "No data Available"
file_handler.close()
# GUI creation
msg = "Enter required * information"
title = "BI GMaps Scrapper"
fieldNames = ["Folder Name * (Enter City Name)",
"Lifestyle Indicator Name *",
"Places * (Separate each place with a ('$') dollar)"]
fieldValues = multenterbox(msg, title, fieldNames)
while 1: # make sure that none of the fields was left blank
if fieldValues is None:
break
errmsg = ""
for i in range(len(fieldNames)):
if fieldValues[i].strip() == "":
errmsg += '"%s" is a required field.\n\n' % fieldNames[i]
if errmsg == "": # if no problems found
break
fieldValues = multenterbox(errmsg, title, fieldNames, fieldValues)
if fieldValues is None:
exit(0)
# Processing input and sending it to the scrapper function
folder_name = "XLS\\" + fieldValues[0].lower()
lifestyle_indicator = sub(r"\s+", '+', fieldValues[1].lower())
places = fieldValues[2]
print "Connecting To Google Maps..."
places_arr = places.split("$")
chrome = Browser("chrome")
for place in places_arr:
place = sub(r"\s+", '+', place)
chrome.visit("https://www.google.co.in/maps/search/" + lifestyle_indicator.strip() + "+in+" + place.strip() +
"/@13.0318799,80.1985061,21z")
sleep(5)
if not path.exists(folder_name):
makedirs(folder_name)
if not path.exists(folder_name + "/" + lifestyle_indicator):
makedirs(folder_name + "/" + lifestyle_indicator)
file_handler = open(folder_name + "/" + lifestyle_indicator + "/" + place.strip() + "_" +
lifestyle_indicator.strip() + ".xls", "w")
file_handler.write("Name\tCategory\tAddress\tLatitude\tLongitude\tContact\n")
scrapper_recursion(False)
print "Disconnecting Google Maps..."
chrome.quit()
|
{
"content_hash": "627b7b3d0deaeb95e7d49c675eea308b",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 116,
"avg_line_length": 44.3716814159292,
"alnum_prop": 0.5765855604307938,
"repo_name": "harish0507/GMapsScrapper",
"id": "ff410d44985c210fab5bccfc5a21ea75551aca0d",
"size": "5160",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "GMaps_Scrapper_V2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "455517"
}
],
"symlink_target": ""
}
|
import subprocess
import socket
import time
import inspect, os, sys
# From http://stackoverflow.com/questions/279237/python-import-a-module-from-a-folder
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile( inspect.currentframe() ))[0],"..")))
if cmd_subfolder not in sys.path:
sys.path.insert(0, cmd_subfolder)
import mosq_test
rc = 1
keepalive = 10
connect_packet = mosq_test.gen_connect("", keepalive=keepalive, proto_ver=4)
connack_packet = mosq_test.gen_connack(rc=0)
broker = subprocess.Popen(['../../src/mosquitto', '-p', '1888'], stderr=subprocess.PIPE)
try:
time.sleep(0.5)
sock = mosq_test.do_client_connect(connect_packet, connack_packet)
sock.close()
rc = 0
finally:
broker.terminate()
broker.wait()
if rc:
(stdo, stde) = broker.communicate()
print(stde)
exit(rc)
|
{
"content_hash": "2924c795b3be4c4e3a331eb081a3d6ca",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 129,
"avg_line_length": 26.393939393939394,
"alnum_prop": 0.6934557979334098,
"repo_name": "dehuinet/mosquitto",
"id": "1ace1c3d95bb4adcb0e025e662264b3dbccf7ab8",
"size": "988",
"binary": false,
"copies": "18",
"ref": "refs/heads/master",
"path": "test/broker/01-connect-invalid-id-0-311.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "765156"
},
{
"name": "C++",
"bytes": "37722"
},
{
"name": "Groovy",
"bytes": "138"
},
{
"name": "JavaScript",
"bytes": "8597"
},
{
"name": "Perl",
"bytes": "10173"
},
{
"name": "Python",
"bytes": "261812"
},
{
"name": "Shell",
"bytes": "24885"
},
{
"name": "XSLT",
"bytes": "1189"
}
],
"symlink_target": ""
}
|
import sys
import os
import path_utils
def writecontents(filename, contents):
with open(filename, "w") as f:
f.write(contents)
def readcontents(filename):
contents = ""
with open(filename, "r") as f:
contents = f.read()
return contents
def prjrename_validate(target_dir, project_name, project_new_name):
if not os.path.exists(target_dir):
print("%s does not exist. Specify another target directory." % target_dir)
return False
if not os.path.exists(path_utils.concat_path(target_dir, project_name)):
print("%s does not exist. Specify another original project." % path_utils.concat_path(target_dir, project_name))
return False
if os.path.exists(path_utils.concat_path(target_dir, project_new_name)):
print("%s already exists. Pick another new name." % path_utils.concat_path(target_dir, project_new_name))
return False
return True
def remove_ext(path):
if (path.find(".") == -1):
return path
f, e = path.split(".")
return f
def codelite_rename(base_prj_codelite_fn, new_project_name):
opn = remove_ext(path_utils.filter_remove_trailing_sep(path_utils.basename_filtered(base_prj_codelite_fn)))
npn = remove_ext(path_utils.filter_remove_trailing_sep(path_utils.basename_filtered(new_project_name)))
npn_full = path_utils.concat_path(path_utils.dirname_filtered(base_prj_codelite_fn), "%s.project" % new_project_name)
os.rename(base_prj_codelite_fn, npn_full)
contents = readcontents(npn_full)
str_cur = "CodeLite_Project Name=\"%s\"" % opn
str_new = "CodeLite_Project Name=\"%s\"" % npn
contents = contents.replace(str_cur, str_new)
writecontents(npn_full, contents)
def msvc15sln_rename(base_prj_msvc15_sln, new_project_name):
opn = remove_ext(path_utils.filter_remove_trailing_sep(path_utils.basename_filtered(base_prj_msvc15_sln)))
npn = remove_ext(path_utils.filter_remove_trailing_sep(path_utils.basename_filtered(new_project_name)))
npn_full = path_utils.concat_path(path_utils.dirname_filtered(base_prj_msvc15_sln), "%s.sln" % new_project_name)
os.rename(base_prj_msvc15_sln, npn_full)
contents = readcontents(npn_full)
str_cur = "\"%s\", \"%s.vcxproj\"" % (opn, opn)
str_new = "\"%s\", \"%s.vcxproj\"" % (npn, npn)
contents = contents.replace(str_cur, str_new)
writecontents(npn_full, contents)
def msvc15vcxproj_rename(base_prj_msvc15_fn, new_project_name):
opn = remove_ext(path_utils.filter_remove_trailing_sep(path_utils.basename_filtered(base_prj_msvc15_fn)))
npn = remove_ext(path_utils.filter_remove_trailing_sep(path_utils.basename_filtered(new_project_name)))
npn_full = path_utils.concat_path(path_utils.dirname_filtered(base_prj_msvc15_fn), "%s.vcxproj" % new_project_name)
os.rename(base_prj_msvc15_fn, npn_full)
contents = readcontents(npn_full)
str_cur = "<RootNamespace>%s</RootNamespace>" % opn
str_new = "<RootNamespace>%s</RootNamespace>" % npn
contents = contents.replace(str_cur, str_new)
writecontents(npn_full, contents)
def msvc17sln_rename(base_prj_msvc17_sln, new_project_name):
opn = remove_ext(path_utils.filter_remove_trailing_sep(path_utils.basename_filtered(base_prj_msvc17_sln)))
npn = remove_ext(path_utils.filter_remove_trailing_sep(path_utils.basename_filtered(new_project_name)))
npn_full = path_utils.concat_path(path_utils.dirname_filtered(base_prj_msvc17_sln), "%s.sln" % new_project_name)
os.rename(base_prj_msvc17_sln, npn_full)
contents = readcontents(npn_full)
str_cur = "\"%s\", \"%s.vcxproj\"" % (opn, opn)
str_new = "\"%s\", \"%s.vcxproj\"" % (npn, npn)
contents = contents.replace(str_cur, str_new)
writecontents(npn_full, contents)
def msvc17vcxproj_rename(base_prj_msvc17_fn, new_project_name):
opn = remove_ext(path_utils.filter_remove_trailing_sep(path_utils.basename_filtered(base_prj_msvc17_fn)))
npn = remove_ext(path_utils.filter_remove_trailing_sep(path_utils.basename_filtered(new_project_name)))
npn_full = path_utils.concat_path(path_utils.dirname_filtered(base_prj_msvc17_fn), "%s.vcxproj" % new_project_name)
os.rename(base_prj_msvc17_fn, npn_full)
contents = readcontents(npn_full)
str_cur = "<RootNamespace>%s</RootNamespace>" % opn
str_new = "<RootNamespace>%s</RootNamespace>" % npn
contents = contents.replace(str_cur, str_new)
writecontents(npn_full, contents)
def msvc17vcxprojfilters_rename(base_prj_msvc17_c_vcxproj_filters_fn, new_project_name):
npf_full = path_utils.concat_path(path_utils.dirname_filtered(base_prj_msvc17_c_vcxproj_filters_fn), "%s.vcxproj.filters" % new_project_name)
os.rename(base_prj_msvc17_c_vcxproj_filters_fn, npf_full)
def makefile_rename(base_prj_makefile_fn, current_project_name, new_project_name):
opn = path_utils.filter_remove_trailing_sep(current_project_name)
npn = remove_ext(path_utils.filter_remove_trailing_sep(path_utils.basename_filtered(new_project_name)))
npn_full = path_utils.concat_path(path_utils.dirname_filtered(base_prj_makefile_fn), "Makefile")
contents = readcontents(npn_full)
str_cur = "APPNAME=%s" % opn
str_new = "APPNAME=%s" % npn
contents = contents.replace(str_cur, str_new)
writecontents(npn_full, contents)
def gitignore_rename(base_gitignore_fn, current_project_name, new_project_name):
contents = readcontents(base_gitignore_fn)
adapted_contents = contents.replace(current_project_name, new_project_name)
writecontents(base_gitignore_fn, adapted_contents)
def prjrename(target_dir, original_project_name, new_project_name):
original_project_name = path_utils.filter_remove_trailing_sep(original_project_name)
new_project_name = path_utils.filter_remove_trailing_sep(new_project_name)
full_original = path_utils.concat_path(target_dir, original_project_name)
full_new = path_utils.concat_path(target_dir, new_project_name)
if not prjrename_validate(target_dir, original_project_name, new_project_name):
sys.exit(1)
prj_fullname_base = path_utils.concat_path(target_dir, original_project_name)
base_prj = path_utils.concat_path(prj_fullname_base, "proj")
# makefile_c
base_prj_makefile_c = path_utils.concat_path(base_prj, "makefile_c")
base_prj_makefile_c_fn = path_utils.concat_path(base_prj_makefile_c, "Makefile")
if os.path.isfile(base_prj_makefile_c_fn):
makefile_rename(base_prj_makefile_c_fn, original_project_name, new_project_name)
print("Adapted [%s]" % base_prj_makefile_c_fn)
# makefile_cpp
base_prj_makefile_cpp = path_utils.concat_path(base_prj, "makefile_cpp")
base_prj_makefile_cpp_fn = path_utils.concat_path(base_prj_makefile_cpp, "Makefile")
if os.path.isfile(base_prj_makefile_cpp_fn):
makefile_rename(base_prj_makefile_cpp_fn, original_project_name, new_project_name)
print("Adapted [%s]" % base_prj_makefile_cpp_fn)
# codelite15_c
base_prj_codelite15_c = path_utils.concat_path(base_prj, "codelite15_c")
base_prj_codelite15_c_fn = path_utils.concat_path(base_prj_codelite15_c, "%s.project" % original_project_name)
if os.path.isfile(base_prj_codelite15_c_fn):
codelite_rename(base_prj_codelite15_c_fn, new_project_name)
print("Adapted [%s]" % base_prj_codelite15_c_fn)
# codelite13_cpp
base_prj_codelite13_cpp = path_utils.concat_path(base_prj, "codelite13_cpp")
base_prj_codelite13_cpp_fn = path_utils.concat_path(base_prj_codelite13_cpp, "%s.project" % original_project_name)
if os.path.isfile(base_prj_codelite13_cpp_fn):
codelite_rename(base_prj_codelite13_cpp_fn, new_project_name)
print("Adapted [%s]" % base_prj_codelite13_cpp_fn)
# msvc15_c
base_prj_msvc15_c = path_utils.concat_path(base_prj, "msvc15_c")
base_prj_msvc15_c_sln_fn = path_utils.concat_path(base_prj_msvc15_c, "%s.sln" % original_project_name)
base_prj_msvc15_c_vcxproj_fn = path_utils.concat_path(base_prj_msvc15_c, "%s.vcxproj" % original_project_name)
if os.path.isfile(base_prj_msvc15_c_sln_fn) and os.path.isfile(base_prj_msvc15_c_vcxproj_fn):
msvc15sln_rename(base_prj_msvc15_c_sln_fn, new_project_name)
msvc15vcxproj_rename(base_prj_msvc15_c_vcxproj_fn, new_project_name)
print("Adapted [%s] and [%s]" % (base_prj_msvc15_c_sln_fn, base_prj_msvc15_c_vcxproj_fn))
# msvc15_cpp
base_prj_msvc15_cpp = path_utils.concat_path(base_prj, "msvc15_cpp")
base_prj_msvc15_cpp_sln_fn = path_utils.concat_path(base_prj_msvc15_cpp, "%s.sln" % original_project_name)
base_prj_msvc15_cpp_vcxproj_fn = path_utils.concat_path(base_prj_msvc15_cpp, "%s.vcxproj" % original_project_name)
if os.path.isfile(base_prj_msvc15_cpp_sln_fn) and os.path.isfile(base_prj_msvc15_cpp_vcxproj_fn):
msvc15sln_rename(base_prj_msvc15_cpp_sln_fn, new_project_name)
msvc15vcxproj_rename(base_prj_msvc15_cpp_vcxproj_fn, new_project_name)
print("Adapted [%s] and [%s]" % (base_prj_msvc15_cpp_sln_fn, base_prj_msvc15_cpp_vcxproj_fn))
# msvc17_c
base_prj_msvc17_c = path_utils.concat_path(base_prj, "msvc17_c")
base_prj_msvc17_c_sln_fn = path_utils.concat_path(base_prj_msvc17_c, "%s.sln" % original_project_name)
base_prj_msvc17_c_vcxproj_fn = path_utils.concat_path(base_prj_msvc17_c, "%s.vcxproj" % original_project_name)
base_prj_msvc17_c_vcxproj_filters_fn = path_utils.concat_path(base_prj_msvc17_c, "%s.vcxproj.filters" % original_project_name)
if os.path.isfile(base_prj_msvc17_c_sln_fn) and os.path.isfile(base_prj_msvc17_c_vcxproj_fn):
msvc17sln_rename(base_prj_msvc17_c_sln_fn, new_project_name)
msvc17vcxproj_rename(base_prj_msvc17_c_vcxproj_fn, new_project_name)
msvc17vcxprojfilters_rename(base_prj_msvc17_c_vcxproj_filters_fn, new_project_name)
print("Adapted [%s], [%s] and [%s]" % (base_prj_msvc17_c_sln_fn, base_prj_msvc17_c_vcxproj_fn, base_prj_msvc17_c_vcxproj_filters_fn))
# gitignore
gitignore_filename = path_utils.concat_path(prj_fullname_base, ".gitignore")
if os.path.isfile(gitignore_filename):
gitignore_rename(gitignore_filename, original_project_name, new_project_name)
print("Adapted [%s]" % gitignore_filename)
os.rename(full_original, full_new)
def puaq():
print("Usage: %s proj-name new-proj-name [target-dir]" % path_utils.basename_filtered(__file__))
sys.exit(1)
if __name__ == "__main__":
td = os.getcwd()
pn_orig = ""
pn_new = ""
if len(sys.argv) < 3:
puaq()
pn_orig = sys.argv[1]
pn_new = sys.argv[2]
if len(sys.argv) > 3:
td = sys.argv[3]
prjrename(td, pn_orig, pn_new)
|
{
"content_hash": "84ba91d7eda1e69cb5a7969bad76474d",
"timestamp": "",
"source": "github",
"line_count": 226,
"max_line_length": 145,
"avg_line_length": 47.36283185840708,
"alnum_prop": 0.695440956651719,
"repo_name": "mvendra/mvtools",
"id": "1e2dd23a505218118cbe91192061f3c6936a8b7f",
"size": "10728",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "codegen/prjrenamer.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "10468"
},
{
"name": "Python",
"bytes": "2654549"
},
{
"name": "Shell",
"bytes": "27094"
}
],
"symlink_target": ""
}
|
import numpy as np
import pandas as pd
import time
import re
import xgboost as xgb
from xgboost.callback import _get_callback_context
def leaf_cnts(bst):
dump = bst.get_dump()
return([tree.count('leaf') for tree in dump])
def get_leaf_values(tree_str):
# To find 'leaf=0.123\n'
prog=re.compile(r"(?<=leaf\=)(.+)\n")
result = [float(rval) for rval in prog.findall(tree_str)]
return np.array(result)
def get_all_leaves(bst):
dmp = bst.get_dump()
return [get_leaf_values(tree) for tree in dmp]
def reset_parameters(param_name, param_values):
"""Reset paramter values after iteration 1
Parameters
----------
param_values: list or function
List of parameter values for each boosting round
or a customized function that calculates eta in terms of
current number of round and the total number of boosting round (e.g. yields
learning rate decay)
- list l: eta = l[boosting round]
- function f: eta = f(boosting round, num_boost_round)
Returns
-------
callback : function
The requested callback function.
"""
def get_param_value(i, n, param_values):
"""helper providing the learning rate"""
if isinstance(param_values, (list, np.ndarray)):
if len(param_values) != n:
raise ValueError("Length of list 'param_values' has to equal 'num_boost_round'.")
new_param_value = param_values[i]
else:
new_param_value = param_values(i, n)
return new_param_value
def callback(env):
"""internal function"""
context = _get_callback_context(env)
if context == 'train':
bst, i, n = env.model, env.iteration, env.end_iteration
bst.set_param(param_name, get_param_value(i, n, param_values))
elif context == 'cv':
i, n = env.iteration, env.end_iteration
for cvpack in env.cvfolds:
bst = cvpack.bst
bst.set_param(param_name, get_param_value(i, n, param_values))
callback.before_iteration = True
return callback
def experiment_xgb(X_train, y_train, X_valid, y_valid,
params_xgb_lst, model_str_lst,
n_rounds=10,
fname_header=None, fname_footer=None, n_skip=10):
t000 = time.time()
df_score_train = pd.DataFrame(index=range(n_rounds))
df_score_valid = pd.DataFrame(index=range(n_rounds))
feature_names = ['f%d' % i for i in range(X_train.shape[1])]
feat_imps_dict = {}
leaf_cnts_dict = {}
w_L1_dict = {}
w_L2_dict = {}
time_sec_lst = []
# XGBoost
metric = params_xgb_lst[0]['eval_metric']
xgmat_train = xgb.DMatrix(X_train, label=y_train)
xgmat_valid = xgb.DMatrix(X_valid, label=y_valid)
watchlist = [(xgmat_train,'train'), (xgmat_valid, 'valid')]
print("training XGBoost")
for params_xgb, model_str in zip(params_xgb_lst, model_str_lst):
evals_result = {}
t0 = time.time()
bst = xgb.train(params_xgb, xgmat_train, n_rounds, watchlist,
evals_result=evals_result, verbose_eval=False)
time_sec_lst.append(time.time() - t0)
print("%s: %s seconds" % (model_str, str(time_sec_lst[-1])))
df_score_train[model_str] = evals_result['train'][metric]
df_score_valid[model_str] = evals_result['valid'][metric]
feat_imps_dict[model_str] = pd.Series(bst.get_score(importance_type='gain'), index=feature_names)
leaves_lst = get_all_leaves(bst)
leaf_cnts_dict[model_str] = [len(leaves) for leaves in leaves_lst]
w_L1_dict[model_str] = [np.sum(np.abs(leaves)) for leaves in leaves_lst]
w_L2_dict[model_str] = [np.sqrt(np.sum(leaves ** 2)) for leaves in leaves_lst]
print('\n%s train' % metric)
print(df_score_train.iloc[::n_skip,])
print('\n%s valid' % metric)
print(df_score_valid.iloc[::n_skip,])
columns = model_str_lst
print('\nLeaf counts')
df_leaf_cnts = pd.DataFrame(leaf_cnts_dict, columns=columns)
print(df_leaf_cnts.iloc[::n_skip,])
print('\nw L1 sum')
df_w_L1 = pd.DataFrame(w_L1_dict, columns=columns)
print(df_w_L1.iloc[::n_skip,])
print('\nw L2 sum')
df_w_L2 = pd.DataFrame(w_L2_dict, columns=columns)
print(df_w_L2.iloc[::n_skip,])
df_feat_imps = pd.DataFrame(feat_imps_dict,
index=feature_names,
columns=columns).fillna(0)
df_feat_imps /= df_feat_imps.sum(0)
df_feat_imps = df_feat_imps.sort_values(model_str_lst[0], ascending=False)
print('\nFeature importance(gain) sorted by ' + model_str_lst[0])
print(df_feat_imps.head(5))
if fname_header is not None:
df_score_train.to_csv('log/' + fname_header + 'Score_Train_' + fname_footer)
df_score_valid.to_csv('log/' + fname_header + 'Score_Valid_' + fname_footer)
df_leaf_cnts.to_csv('log/' + fname_header + 'Leaf_cnts_' + fname_footer)
df_w_L1.to_csv('log/' + fname_header + 'w_L1__' + fname_footer)
df_w_L2.to_csv('log/' + fname_header + 'w_L2__' + fname_footer)
df_feat_imps.to_csv('log/' + fname_header + 'Feat_imps_' + fname_footer)
return{'time' : time_sec_lst,
'score' : df_score_valid.tail(1).values[0].tolist(),
'leaf_cnts': df_leaf_cnts.sum(0),
'w_L1' : df_w_L1.sum(0),
'w_L2' : df_w_L2.sum(0)}
def experiment(X_train, y_train, X_valid, y_valid,
n_rounds, params_xgb,
param_name=None, params_values=None):
if param_name is None:
callbacks = None
else:
callbacks = [reset_parameters(param_name, param_values)]
xgmat_train = xgb.DMatrix(X_train, label=y_train)
xgmat_valid = xgb.DMatrix(X_valid, label=y_valid)
watchlist = [(xgmat_valid, 'valid')]
evals_result = {}
t0 = time.time()
bst = xgb.train(params_xgb, xgmat_train, n_rounds, watchlist,
callbacks=callbacks,
early_stopping_rounds=30,
evals_result=evals_result, verbose_eval=False)
ntree = len(evals_result['valid']['logloss'])
df_scores = pd.DataFrame({'valid_loss':evals_result['valid']['logloss']},
index=pd.Index(range(1, ntree+1), name='Boosting iteration'))
leaves_lst = get_all_leaves(bst)[:ntree]
df_leaf_cnts = pd.DataFrame({'leaf_cnts':[len(leaves) for leaves in leaves_lst]},
index=pd.Index(range(1, ntree+1), name='Boosting iteration'))
df_w_L2 = pd.DataFrame({'w_L2':[np.sqrt(np.sum(leaves**2)) for leaves in leaves_lst]},
index=pd.Index(range(1, ntree+1), name='Boosting iteration'))
print("valid_loss:%.4f, ntree:%d, %.1fs" % \
(evals_result['valid']['logloss'][bst.best_iteration],
bst.best_ntree_limit,
(time.time() - t0)))
fig, ax = plt.subplots(3, sharex=True, figsize=(13,9))
df_scores.plot(ax=ax[0])
df_leaf_cnts.plot(ax=ax[1])
df_w_L2.plot(ax=ax[2])
|
{
"content_hash": "69266725355157c7428f40cf7821a338",
"timestamp": "",
"source": "github",
"line_count": 173,
"max_line_length": 105,
"avg_line_length": 41.115606936416185,
"alnum_prop": 0.590186981583017,
"repo_name": "tks0123456789/XGB_experiments",
"id": "ffd5b72b20afd0595a57ae75bab7d55bdf6faa7e",
"size": "7113",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utility.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "615993"
},
{
"name": "Python",
"bytes": "72979"
}
],
"symlink_target": ""
}
|
from django.shortcuts import render
from django.template import RequestContext
from esperancanordeste.context_processors import enterprise_proc
from esperancanordeste.newsletter.forms import SubscribeForm
def subscribe(request):
context = {}
if request.method == 'POST':
form = SubscribeForm(request.POST)
if form.is_valid():
form.save()
context['newsletter_success'] = True
else:
form = SubscribeForm()
context['newsletter_form'] = form
return render(request, 'newsletter_subscribe.html', context,
context_instance=RequestContext(request,
processors=[enterprise_proc]
))
|
{
"content_hash": "01e852acf4df53681e20c897f6f95620",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 78,
"avg_line_length": 30.6,
"alnum_prop": 0.6,
"repo_name": "klebercode/esperancanordeste",
"id": "95d18409eb9e4a7f80362483691d768592d3d015",
"size": "781",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "esperancanordeste/newsletter/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "274091"
},
{
"name": "HTML",
"bytes": "161726"
},
{
"name": "JavaScript",
"bytes": "161079"
},
{
"name": "Python",
"bytes": "135147"
}
],
"symlink_target": ""
}
|
"""Visual Studio Code-related test steps."""
# vim: set fileencoding=utf-8
#
# (C) Copyright 2019 Pavel Tisnovsky
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v1.0
# which accompanies this distribution, and is available at
# http://www.eclipse.org/legal/epl-v10.html
#
# Contributors:
# Pavel Tisnovsky
#
from time import sleep
from behave import then, when
from src.gui import perform_click_on_the_region, perform_find_the_region, perform_type
from src.gui import perform_move_mouse_cursor
SLEEP_AMOUNT = 2
@when('I click on the File menu')
def click_on_the_file_menu(context):
"""Click on the File menu."""
assert context is not None
perform_find_the_region(context, "file menu header")
perform_click_on_the_region(context)
@when('I move mouse cursor to the top left corner')
def move_mouse_cursor(context):
"""Move mouse cursor to the top left corner."""
assert context is not None
# don't use [0, 0] as it is special area for the PyAutoGUI library
perform_move_mouse_cursor(context, 10, 10)
@when('I click on the Exit menu entry')
def click_on_the_exit_menu_entry(context):
"""Click on the Exit menu entry in File menu."""
assert context is not None
perform_find_the_region(context, "file menu exit")
perform_click_on_the_region(context)
@then('I should find the activity bar')
def look_for_activity_bar(context):
"""Try to find the activity bar."""
assert context is not None
perform_find_the_region(context, "activity bar", "activity bar 2")
@then('I should find the extension icon on the activity bar')
def look_for_extension_icon_on_activity_bar(context):
"""Try to find the extension icon on activity bar."""
assert context is not None
perform_find_the_region(context, "extensions icon")
@then('I should find the Search extension in Marketplace input box')
def look_for_search_extension_in_marketplace(context):
"""Try to find the Marketplace input box."""
assert context is not None
perform_find_the_region(context, "search extension in marketplace")
@then('I should find the Dependency Analytics info region')
def look_for_dependency_analytics_info_region(context):
"""Try to find the Dependency Analytics info region."""
assert context is not None
perform_find_the_region(context, "dependency analytics info region")
perform_find_the_region(context, "dependency analytics info region 0 12")
@then('I should find the OpenShift logo')
def look_for_openshift_logo(context):
"""Try to find the OpenShift logo."""
assert context is not None
perform_find_the_region(context, "openshift logo", "openshift logo 2")
@then('I should find Analytics page with {header} header')
def look_for_analytics_page_with_specified_header(context, header):
"""Try to find specified header on the Analytics page."""
assert context is not None
assert header is not None
region = "analysis_security_{}_header".format(header.lower())
perform_find_the_region(context, region)
@then('I should find the Dependency Analytics header')
def look_for_dependency_analytics_header(context):
"""Try to find the Dependency Analytics header."""
assert context is not None
perform_find_the_region(context, "dependency analytics header")
@then('I should find the Dependency Analytics title')
def look_for_dependency_analytics_title(context):
"""Try to find the Dependency Analytics title."""
assert context is not None
perform_find_the_region(context, "dependency analytics title")
@then('I should find the Plugin install button')
def look_for_plugin_install_button(context):
"""Try to find the Install button for a selected plugin."""
assert context is not None
perform_find_the_region(context, "plugin install button", "plugin install button 2")
@then(u'I should find the Plugin uninstall button')
def look_for_plugin_uninstall_button(context):
"""Try to find the Unistall button for a selected plugin."""
assert context is not None
perform_find_the_region(context, "plugin uninstall button", "plugin uninstall button 2")
@then('I should find the Uninstalled label')
def look_for_uninstalled_label(context):
"""Try to find the Uninstalled label."""
assert context is not None
perform_find_the_region(context, "uninstalled label")
@then('I should find the Reload and Uninstall buttons')
def look_for_plugin_reload_and_uninstall_button(context):
"""Try to find the Reload and Unistall buttons for a selected plugin."""
assert context is not None
perform_find_the_region(context, "reload uninstall buttons")
@then('I should find the Reload button and Gear icon')
def look_for_plugin_reload_button_and_gear_icon(context):
"""Try to find the Reload button with Gear icon for a selected plugin."""
assert context is not None
perform_find_the_region(context, "reload gear")
@then('I should find the Installed icon and Gear button')
def look_for_plugin_installed_icon_and_gear_button(context):
"""Try to find the Install icon and a Gear icon for a selected plugin."""
assert context is not None
perform_find_the_region(context, "installed icon and gear button")
@then('I should find the Installed icon and Uninstall button')
def look_for_plugin_install_icon_and_uninstall_button(context):
"""Try to find the Install icon and a Uninstall icon for a selected plugin."""
assert context is not None
perform_find_the_region(context, "installed icon and uninstall button")
@then('I should find the Dependency Analysis Report menu entry in context menu')
def look_for_dependency_analysis_report_menu_entry_context_menu(context):
"""Try to find the Dependency Analysis Report menu entry in context menu."""
assert context is not None
perform_find_the_region(context, "context menu dependency analytics entry")
@then('I should find the empty window or Welcome tab')
def look_for_empty_window_or_welcome_tab(context):
"""Try to find the Welcome tab displayed after all editor tabs are closed."""
assert context is not None
perform_find_the_region(context, "welcome tab", "empty window")
@then('I should find the icon with info about zero problems in the status bar')
def look_for_icon_with_info_about_zero_problems(context):
"""Try to find the icon that informed users about zero problems."""
assert context is not None
perform_find_the_region(context, "zero problems")
@then('I should find the icon with info about one problem found in the status bar')
def look_for_icon_with_info_about_one_problem(context):
"""Try to find the icon that informed users about one problem."""
assert context is not None
perform_find_the_region(context, "one problem")
@then('I should find the icon with info about {number} problems found in the status bar')
def look_for_icon_with_info_about_more_problems(context, number):
"""Try to find the icon that informed users about more problems."""
assert context is not None
perform_find_the_region(context, number + " problems")
@when('I type in {what}')
def type_in_text(context, what):
"""Type anything onto the screen."""
assert context is not None
perform_type(context, what)
@when('I click on the extension icon on the activity bar')
def click_on_the_extension_icon_on_the_activity_bar(context):
"""Try to click on the extensino icon on the activity bar."""
assert context is not None
look_for_activity_bar(context)
look_for_extension_icon_on_activity_bar(context)
perform_click_on_the_region(context)
click_on_the_extension_icon_on_the_activity_bar
sleep(SLEEP_AMOUNT)
@when('I close the Visual Studio Code')
def close_visual_studio_code(context):
"""Try close the Visual Studio Code."""
assert context is not None
click_on_the_file_menu(context)
click_on_the_exit_menu_entry(context)
sleep(SLEEP_AMOUNT)
@when('I search for {plugin} plugin')
def search_for_plugin(context, plugin):
"""Search for plugin."""
assert context is not None
look_for_search_extension_in_marketplace(context)
type_in_text(context, plugin)
sleep(SLEEP_AMOUNT)
look_for_dependency_analytics_info_region(context)
@when('I select the plugin')
def select_plugin(context):
"""Select the plugin to install."""
assert context is not None
perform_click_on_the_region(context)
# time to find the plugin
sleep(SLEEP_AMOUNT)
look_for_plugin_install_button(context)
@when('I start the installation by clicking on the Plugin install button')
def start_extension_installation(context):
"""Start the VS code extension installation."""
assert context is not None
perform_click_on_the_region(context)
perform_move_mouse_cursor(context, 10, 10)
sleep(SLEEP_AMOUNT)
|
{
"content_hash": "1d3c3066400d1b50cec71e955877a6d1",
"timestamp": "",
"source": "github",
"line_count": 245,
"max_line_length": 92,
"avg_line_length": 36.33877551020408,
"alnum_prop": 0.7266090081994834,
"repo_name": "tisnik/fabric8-analytics-common",
"id": "cd0d5a0596665831881be5b870bf2566781826a2",
"size": "8903",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vscode-visual-tests/features/steps/vscode.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2827"
},
{
"name": "Dockerfile",
"bytes": "833"
},
{
"name": "Gherkin",
"bytes": "571433"
},
{
"name": "HTML",
"bytes": "65002"
},
{
"name": "Python",
"bytes": "797262"
},
{
"name": "Shell",
"bytes": "30956"
}
],
"symlink_target": ""
}
|
from absl import app
from iree.tf.support import tf_test_utils
from iree.tf.support import tf_utils
import numpy as np
import tensorflow.compat.v2 as tf
HIDDEN_1_DIM = 256
HIDDEN_2_DIM = 256
INPUT_DIM = 728 # 28 * 28
CLASSES = 10
class DynamicMlpReluModule(tf.Module):
def __init__(self,
hidden_1_dim=256,
hidden_2_dim=256,
input_dim=28 * 28,
classes=10):
super().__init__()
tf_utils.set_random_seed()
self.hidden_1_dim = hidden_1_dim
self.hidden_2_dim = hidden_2_dim
self.input_dim = input_dim
self.classes = classes
self.h1_weights = tf.Variable(tf.random.normal([input_dim, hidden_1_dim]))
self.h2_weights = tf.Variable(tf.random.normal([hidden_1_dim,
hidden_2_dim]))
self.out_weights = tf.Variable(tf.random.normal([hidden_2_dim, classes]))
self.h1_bias = tf.Variable(tf.random.normal([hidden_1_dim]))
self.h2_bias = tf.Variable(tf.random.normal([hidden_2_dim]))
self.out_bias = tf.Variable(tf.random.normal([classes]))
# Compile with dynamic batch dim.
self.predict = tf.function(
input_signature=[tf.TensorSpec([None, self.input_dim])])(self.predict)
def mlp(self, x):
layer_1 = tf.nn.relu(tf.add(tf.matmul(x, self.h1_weights), self.h1_bias))
layer_2 = tf.nn.relu(
tf.add(tf.matmul(layer_1, self.h2_weights), self.h2_bias))
return tf.nn.relu(
tf.add(tf.matmul(layer_2, self.out_weights), self.out_bias))
def predict(self, x):
return tf.nn.softmax(self.mlp(x))
class DynamicMlpReluTest(tf_test_utils.TracedModuleTestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._modules = tf_test_utils.compile_tf_module(DynamicMlpReluModule,
exported_names=["predict"])
def test_dynamic_batch(self):
def dynamic_batch(module):
x = tf_utils.uniform([3, 28 * 28]) * 1e-3
module.predict(x)
self.compare_backends(dynamic_batch, self._modules)
def main(argv):
del argv # Unused
if hasattr(tf, 'enable_v2_behavior'):
tf.enable_v2_behavior()
tf.test.main()
if __name__ == '__main__':
app.run(main)
|
{
"content_hash": "b7541037e3628bb775415a82fed341eb",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 79,
"avg_line_length": 30.78082191780822,
"alnum_prop": 0.616822429906542,
"repo_name": "google/iree",
"id": "e049c9e47c2fba927fec741e43ac63a859879d91",
"size": "2653",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "integrations/tensorflow/test/python/iree_tf_tests/uncategorized/dynamic_mlp_relu_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "23010"
},
{
"name": "Batchfile",
"bytes": "353"
},
{
"name": "C",
"bytes": "3830546"
},
{
"name": "C++",
"bytes": "8161374"
},
{
"name": "CMake",
"bytes": "899403"
},
{
"name": "Dockerfile",
"bytes": "28245"
},
{
"name": "GLSL",
"bytes": "2629"
},
{
"name": "HTML",
"bytes": "31018"
},
{
"name": "Java",
"bytes": "31697"
},
{
"name": "JavaScript",
"bytes": "18714"
},
{
"name": "MLIR",
"bytes": "5606822"
},
{
"name": "NASL",
"bytes": "3852"
},
{
"name": "PowerShell",
"bytes": "7893"
},
{
"name": "Python",
"bytes": "1143963"
},
{
"name": "Shell",
"bytes": "248374"
},
{
"name": "Starlark",
"bytes": "600260"
}
],
"symlink_target": ""
}
|
import csv
import fcntl
import json
from os.path import exists, join
from pathlib import Path
from sys import argv
input_folder, output_folder = argv[1:]
variables = json.load(open(join(input_folder, 'variables.dictionary'), 'rt'))
datasets_folder = Path('datasets')
datasets_folder.mkdir(exist_ok=True)
table_path = datasets_folder / 'entries.csv'
is_table_new = True
is_entry_new = True
if exists(table_path):
key = 'repository_url'
repository_url = variables[key].strip()
with open(table_path, 'rt') as f:
csv_reader = csv.reader(f)
columns = next(csv_reader)
column_index = columns.index(key)
for row in csv_reader:
if row[column_index] == repository_url:
is_entry_new = False
is_table_new = False
with open(table_path, 'at') as f:
fcntl.flock(f, fcntl.LOCK_EX)
csv_writer = csv.writer(f)
if is_table_new:
csv_writer.writerow(variables.keys())
csv_writer.writerow(variables.values())
if is_entry_new:
response_text = 'Thanks for submitting a new entry!'
else:
response_text = 'This entry already exists. Thanks for updating it!'
json.dump({
'response_text': response_text,
}, open(join(output_folder, 'variables.dictionary'), 'wt'))
|
{
"content_hash": "99b68380eb475e422b2749601b34eb60",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 77,
"avg_line_length": 26.78723404255319,
"alnum_prop": 0.6679904686258936,
"repo_name": "crosscompute/crosscompute-examples",
"id": "c7715d97bd364a7e14293119c17447de931f8b46",
"size": "1259",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "forms/gather-packages/run.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1747"
},
{
"name": "Dockerfile",
"bytes": "402"
},
{
"name": "Jinja",
"bytes": "621"
},
{
"name": "Jupyter Notebook",
"bytes": "37263"
},
{
"name": "Python",
"bytes": "3437"
},
{
"name": "Shell",
"bytes": "853"
}
],
"symlink_target": ""
}
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Position_Salaries.csv')
X = dataset.iloc[:, 1:2].values
y = dataset.iloc[:, 2].values
# Splitting the dataset into the Training set and Test set
"""from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)"""
# Feature Scaling
"""from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
sc_y = StandardScaler()
y_train = sc_y.fit_transform(y_train)"""
# Fitting Random Forest Regression to the dataset
from sklearn.ensemble import RandomForestRegressor
regressor = RandomForestRegressor(n_estimators = 10, random_state = 0)
regressor.fit(X, y)
# Predicting a new result
y_pred = regressor.predict(6.5)
# Visualising the Random Forest Regression results (higher resolution)
X_grid = np.arange(min(X), max(X), 0.01)
X_grid = X_grid.reshape((len(X_grid), 1))
plt.scatter(X, y, color = 'red')
plt.plot(X_grid, regressor.predict(X_grid), color = 'blue')
plt.title('Truth or Bluff (Random Forest Regression)')
plt.xlabel('Position level')
plt.ylabel('Salary')
plt.show()
|
{
"content_hash": "9bad97ee97b79131b82f0f9749291678",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 95,
"avg_line_length": 33.23684210526316,
"alnum_prop": 0.7347585114806018,
"repo_name": "leofdecarvalho/MachineLearning",
"id": "fce58b1fe283a6d96886e7ebc71c1c40effe525f",
"size": "1317",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "2. Modeling/1. Regression/7. Random-Forest-Regression/random_forest_regression.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1895"
},
{
"name": "R",
"bytes": "1603"
}
],
"symlink_target": ""
}
|
# coding=utf-8
from plugins.utils import get_used_plugins_by
def get_widget_data():
""" Функция, возвращающая словарь с объектами сетевых устройств. """
return get_used_plugins_by(package='plugins.system_ip_online')
|
{
"content_hash": "699ecb8353f4ea751b0c5f4ce5459dd7",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 72,
"avg_line_length": 28.5,
"alnum_prop": 0.7280701754385965,
"repo_name": "sug4rok/Servus",
"id": "618d394105eeeffdbdaad6f8322585c90af5d71b",
"size": "282",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Servus/plugins/system_ip_online/widget.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "13156"
},
{
"name": "CSS",
"bytes": "82380"
},
{
"name": "HTML",
"bytes": "34128"
},
{
"name": "JavaScript",
"bytes": "97398"
},
{
"name": "Python",
"bytes": "159966"
}
],
"symlink_target": ""
}
|
import heapq
import sys
from collections import deque
class DataStructures:
def __init__(self):
self.clean()
def add_to_stack(self, item):
self.stack.append(item)
def remove_from_stack(self):
return self.stack.pop()
def add_to_queue(self, item):
self.queue.append(item)
def remove_from_queue(self):
return self.queue.popleft()
def add_to_priority_queue(self, item):
heapq.heappush(self.priority_queue, item * -1)
def remove_from_priority_queue(self):
return heapq.heappop(self.priority_queue) * -1
def add(self, item):
self.add_to_stack(item)
self.add_to_queue(item)
self.add_to_priority_queue(item)
def remove(self):
return {
'stack': self.remove_from_stack(),
'queue': self.remove_from_queue(),
'priority queue': self.remove_from_priority_queue(),
}
def clean(self):
self.stack = []
self.queue = deque()
self.priority_queue = []
data_structure_bag = DataStructures()
for test_case_size in sys.stdin:
test_case_size = int(test_case_size)
possible_data_structures = ['stack', 'queue', 'priority queue']
for _ in range(test_case_size):
command, x = map(int, input().split())
if command == 1:
data_structure_bag.add(x)
else:
removals = data_structure_bag.remove()
if x != removals['stack'] and 'stack' in possible_data_structures:
possible_data_structures.remove('stack')
if x != removals['queue'] and 'queue' in possible_data_structures:
possible_data_structures.remove('queue')
if (
x != removals['priority queue']
and 'priority queue' in possible_data_structures
):
possible_data_structures.remove('priority queue')
if len(possible_data_structures) == 1:
print(possible_data_structures[0])
elif len(possible_data_structures) > 1:
print('not sure')
else:
print('impossible')
data_structure_bag.clean()
|
{
"content_hash": "0a5b5c236ce932699b281bee6404b3ba",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 78,
"avg_line_length": 27.512820512820515,
"alnum_prop": 0.5852749301025163,
"repo_name": "deniscostadsc/playground",
"id": "e09853c3e2de58241aa27c6c604d0e3ef584e3bd",
"size": "2146",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "solutions/beecrowd/1340/1340.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "19932"
},
{
"name": "C#",
"bytes": "4974"
},
{
"name": "C++",
"bytes": "270707"
},
{
"name": "Clojure",
"bytes": "9520"
},
{
"name": "Dart",
"bytes": "3707"
},
{
"name": "Dockerfile",
"bytes": "11466"
},
{
"name": "Go",
"bytes": "2132"
},
{
"name": "Haskell",
"bytes": "1521"
},
{
"name": "Java",
"bytes": "5111"
},
{
"name": "JavaScript",
"bytes": "7232"
},
{
"name": "Kotlin",
"bytes": "2261"
},
{
"name": "Lua",
"bytes": "1381"
},
{
"name": "Makefile",
"bytes": "3505"
},
{
"name": "OCaml",
"bytes": "894"
},
{
"name": "PHP",
"bytes": "1551"
},
{
"name": "Pascal",
"bytes": "1643"
},
{
"name": "Python",
"bytes": "60545"
},
{
"name": "R",
"bytes": "1664"
},
{
"name": "Ruby",
"bytes": "880"
},
{
"name": "Rust",
"bytes": "3980"
},
{
"name": "Scala",
"bytes": "2061"
},
{
"name": "Shell",
"bytes": "35358"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_dressed_noble_human_male_03.iff"
result.attribute_template_id = 9
result.stfName("npc_name","human_base_male")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "b3bfccbce45618045b516a097fc8db76",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 73,
"avg_line_length": 23.615384615384617,
"alnum_prop": 0.6938110749185668,
"repo_name": "anhstudios/swganh",
"id": "f933e59505b84c0873e2f80db0c34e570e9dd61b",
"size": "452",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/mobile/shared_dressed_noble_human_male_03.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
from matplotlib.collections import PatchCollection,LineCollection
from matplotlib.path import Path
from matplotlib.patches import PathPatch
import matplotlib.pyplot as plt
import numpy as np
from .. import utils
def plot_linestring(ls,**kwargs):
ax=kwargs.pop('ax',plt.gca())
c = np.array(ls.coords)
return ax.plot( c[:,0],c[:,1],**kwargs)[0]
def plot_multilinestring(mls,**kwargs):
ax=kwargs.pop('ax',plt.gca())
if mls.type == 'MultiLineString':
segs = [np.array(ls.coords) for ls in mls.geoms]
coll = LineCollection(segs,**kwargs)
ax.add_collection(coll)
return coll
else:
return plot_linestring(mls,**kwargs)
########
# New, non-hacked way to plot polygons with holes
# From: http://sgillies.net/blog/1013/painting-punctured-polygons-with-matplotlib/
def ring_coding(ob):
# The codes will be all "LINETO" commands, except for "MOVETO"s at the
# beginning of each subpath
n = len(ob.coords)
codes = np.ones(n, dtype=Path.code_type) * Path.LINETO
codes[0] = Path.MOVETO
# unsure of difference between CLOSEPOLY and leaving as is.
# codes[-1] = Path.CLOSEPOLY # doesn't seem to make a difference
return codes
def pathify(polygon):
# Convert coordinates to path vertices. Objects produced by Shapely's
# analytic methods have the proper coordinate order, no need to sort.
# 20170707: matplotlib pickier about ordering of internal rings, may have
# reverse interiors.
# 20170719: shapely doesn't guarantee one order or the other
def ensure_orientation(a,ccw=True):
"""
take an array-like [N,2] set of points defining a polygon,
return an array which is ordered ccw (or cw is ccw=False)
"""
a=np.asarray(a) # pre-shapely 2
area=utils.signed_area(a)
if ccw == (area<0):
a=a[::-1]
return a
vertices = np.concatenate(
[ ensure_orientation(polygon.exterior.coords,ccw=True)]
+ [ ensure_orientation(r.coords,ccw=False) for r in polygon.interiors])
codes = np.concatenate(
[ring_coding(polygon.exterior)]
+ [ring_coding(r) for r in polygon.interiors])
return Path(vertices, codes)
def poly_to_patch(polygon,**kwargs):
return PathPatch(pathify(polygon), **kwargs)
def multipoly_to_patches(multipoly,*args,**kwargs):
patches = [poly_to_patch(p) for p in multipoly.geoms]
return PatchCollection(patches,*args,**kwargs)
def plot_polygon(p,*args,**kwargs):
if 'holes' in kwargs:
print("dropping obsolete holes keyword argument")
del kwargs['holes']
ax = kwargs.pop('ax',plt.gca())
patch = poly_to_patch(p,*args,**kwargs)
ax.add_patch(patch)
return patch
def plot_multipolygon(mp,*args,**kwargs):
if 'holes' in kwargs:
print("dropping obsolete holes keyword argument")
del kwargs['holes']
ax = kwargs.pop('ax',plt.gca())
coll = multipoly_to_patches(mp,*args,**kwargs)
ax.add_collection( coll )
return coll
def plot_wkb(g,*args,**kwargs):
if g.type == 'MultiPolygon':
return plot_multipolygon(g,*args,**kwargs)
elif g.type=='Polygon':
return plot_polygon(g,*args,**kwargs)
elif g.type == 'MultiLineString':
return plot_multilinestring(g,*args,**kwargs)
elif g.type =='LineString':
return plot_linestring(g,*args,**kwargs)
else:
raise Exception("no match to type")
|
{
"content_hash": "9588abf29923b32a4b093cb6fa9786e0",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 82,
"avg_line_length": 34.019417475728154,
"alnum_prop": 0.6543949771689498,
"repo_name": "rustychris/stompy",
"id": "08f9e678c3ea4786a666a01c75fdeef2f3013c54",
"size": "3504",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stompy/plot/plot_wkb.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Fortran",
"bytes": "89942"
},
{
"name": "Makefile",
"bytes": "62"
},
{
"name": "Python",
"bytes": "4305617"
},
{
"name": "Shell",
"bytes": "241"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('coding', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='commentcodeinstance',
name='coder',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
migrations.AddField(
model_name='submissioncodeinstance',
name='coder',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
]
|
{
"content_hash": "11519ec387fd65502da81d982c4d7bbb",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 124,
"avg_line_length": 31.857142857142858,
"alnum_prop": 0.6468609865470852,
"repo_name": "geosoco/reddit_coding",
"id": "2c986deb5559d1b8089d49db00fe2cb227045bf4",
"size": "964",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "coding/migrations/0002_auto_20160506_0424.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "18804"
},
{
"name": "HTML",
"bytes": "20973"
},
{
"name": "JavaScript",
"bytes": "46619"
},
{
"name": "Python",
"bytes": "68758"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/component/dna/shared_dna_template_zucca_boar.iff"
result.attribute_template_id = -1
result.stfName("craft_dna_components_n","dna_template_zucca_boar")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "18bd705853e36f1d91c6bfdb38f16189",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 85,
"avg_line_length": 26.307692307692307,
"alnum_prop": 0.7105263157894737,
"repo_name": "anhstudios/swganh",
"id": "6e6baed3a11b9cd6147272d92316f35a4f409691",
"size": "487",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/tangible/component/dna/shared_dna_template_zucca_boar.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import Pyro4
import Pyro4.constants
class CustomDaemon(Pyro4.Daemon):
def clientDisconnect(self, conn):
# If required, you *can* override this to do custom resource freeing.
# But this is not needed if your resource objects have a proper 'close' method;
# this method is called by Pyro itself once the client connection gets closed.
# In this example this override is only used to print out some info.
print("client disconnects:", conn.sock.getpeername())
print(" resources: ", [r.name for r in conn.tracked_resources])
class Resource(object):
# a fictional resource that gets allocated and must be freed again later.
def __init__(self, name, collection):
self.name = name
self.collection = collection
def close(self):
# Pyro will call this on a tracked resource once the client's connection gets closed!
# (Unless the resource can be carbage collected normally by Python.)
print("Resource: closing", self.name)
self.collection.discard(self)
@Pyro4.expose
@Pyro4.behavior(instance_mode="single")
class Service(object):
def __init__(self):
self.resources = set() # the allocated resources
def allocate(self, name):
resource = Resource(name, self.resources)
self.resources.add(resource)
Pyro4.current_context.track_resource(resource)
print("service: allocated resource", name, " for client", Pyro4.current_context.client_sock_addr)
def free(self, name):
resources = {r for r in self.resources if r.name == name}
self.resources -= resources
for r in resources:
r.close()
Pyro4.current_context.untrack_resource(r)
def list(self):
return [r.name for r in self.resources]
with CustomDaemon() as daemon:
Pyro4.Daemon.serveSimple({
Service: "service"
}, ns=False, daemon=daemon)
|
{
"content_hash": "f7f0fc29ab79303af8587307c308b590",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 105,
"avg_line_length": 35.8,
"alnum_prop": 0.664804469273743,
"repo_name": "irmen/Pyro4",
"id": "d5aba54c3748fdef720e26cfb29318452d39bca5",
"size": "1969",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/resourcetracking/server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1283"
},
{
"name": "Python",
"bytes": "618799"
},
{
"name": "Shell",
"bytes": "2394"
}
],
"symlink_target": ""
}
|
from functools import update_wrapper
from datetime import datetime, timedelta
import cgi
from tempfile import TemporaryFile
from .exceptions import HTTPException, BadRequest
from .utils import cached_property
from .datastructures import MultiDict, FileUpload, FormsDict, WSGIHeaders
from ._compat import (PY2, to_bytes, string_types, text_type,
integer_types, to_unicode, to_native, BytesIO)
if PY2:
from Cookie import SimpleCookie
else:
from http.cookies import SimpleCookie
from .utils import (urlencode, urldecode, urlquote, urlunquote, urljoin, json)
from .http import (parse_content_type, parse_date, parse_auth, parse_content_type, parse_range_header)
from .exceptions import BadRequest
MEMFILE_MAX = 4*1024*1024
class Request(object):
"""
"""
#: the charset for the request, defaults to utf-8
charset = 'utf-8'
#: the error handling procedure for errors, defaults to 'replace'
encoding_errors = 'replace'
endpoint = ''
#: a dict of view arguments that matched the request. If an exception
#: happened when matching, this will be `None`.
view_args = None
def __init__(self, environ, populate_request=True):
self.environ = environ
if populate_request:
self.environ['cocopot.request'] = self
def __repr__(self):
# make sure the __repr__ even works if the request was created
# from an invalid WSGI environment. If we display the request
# in a debug session we don't want the repr to blow up.
args = []
try:
args.append("'%s'" % to_native(self.url, self.url_charset))
args.append('[%s]' % self.method)
except Exception:
args.append('(invalid WSGI environ)')
return '<%s %s>' % (
self.__class__.__name__,
' '.join(args)
)
@property
def url_charset(self):
"""The charset that is assumed for URLs. Defaults to the value
of `charset`.
"""
return self.charset
def close(self):
"""Closes associated resources of this request object. This
closes all file handles explicitly. You can also use the request
object in a with statement with will automatically close it.
"""
if hasattr(self.stream, 'close'):
self.stream.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
self.close()
@cached_property
def stream(self):
stream = self.get_input_stream()
stream.seek(0)
return stream
@property
def input_stream(self):
return self.environ.get('wsgi.input')
@cached_property
def args(self):
query_string = self.environ.get('QUERY_STRING', '')
if query_string:
return MultiDict(urldecode(query_string))
else:
return MultiDict()
@property
def data(self):
return self.get_data()
def get_data(self, cache=True, as_text=False):
"""This reads the buffered incoming data from the client into one
bytestring. By default this is cached but that behavior can be
changed by setting `cache` to `False`.
Usually it's a bad idea to call this method without checking the
content length first as a client could send dozens of megabytes or more
to cause memory problems on the server.
If `as_text` is set to `True` the return value will be a decoded
unicode string.
"""
rv = getattr(self, '_cached_data', None)
if rv is None:
rv = self.stream.read()
if cache:
self._cached_data = rv
if as_text:
rv = rv.decode(self.charset, self.encoding_errors)
return rv
@cached_property
def chunked(self):
""" True if Chunked transfer encoding was. """
return 'chunked' in self.environ.get('HTTP_TRANSFER_ENCODING', '').lower()
def iter_body(self, read, bufsize):
maxread = max(0, self.content_length)
while maxread:
part = read(min(maxread, bufsize))
if not part: break
yield part
maxread -= len(part)
def iter_chunked(self, read, bufsize):
err = BadRequest('Error while parsing chunked transfer body.')
rn, sem, bs = to_bytes('\r\n'), to_bytes(';'), to_bytes('')
while True:
header = read(1)
while header[-2:] != rn:
c = read(1)
header += c
if not c: raise err
if len(header) > bufsize: raise err
size, _, _ = header.partition(sem)
try:
maxread = int(to_native(size.strip()), 16)
except ValueError:
raise err
if maxread == 0: break
buff = bs
while maxread > 0:
if not buff:
buff = read(min(maxread, bufsize))
part, buff = buff[:maxread], buff[maxread:]
if not part: raise err
yield part
maxread -= len(part)
if read(2) != rn:
raise err
def get_input_stream(self):
try:
read_func = self.environ['wsgi.input'].read
except KeyError:
self.environ['wsgi.input'] = BytesIO()
return self.environ['wsgi.input']
if self.chunked:
body, body_size, is_temp_file = BytesIO(), 0, False
for part in self.iter_chunked(read_func, MEMFILE_MAX):
body.write(part)
body_size += len(part)
if not is_temp_file and body_size > MEMFILE_MAX:
body, tmp = TemporaryFile(mode='w+b'), body
body.write(tmp.getvalue())
del tmp
is_temp_file = True
else:
if self.content_length > MEMFILE_MAX:
body = TemporaryFile(mode='w+b')
else:
body = BytesIO()
for part in self.iter_body(read_func, MEMFILE_MAX):
body.write(part)
self.environ['wsgi.input'] = body
body.seek(0)
return body
def parse_form_data(self):
post = FormsDict()
# We default to application/x-www-form-urlencoded for everything that
# is not multipart and take the fast path (also: 3.1 workaround)
if not self.content_type.startswith('multipart/'):
pairs = urldecode(to_unicode(self.get_data()))
for key, value in pairs:
post[key] = value
return post
safe_env = {'QUERY_STRING': ''} # Build a safe environment for cgi
for key in ('REQUEST_METHOD', 'CONTENT_TYPE', 'CONTENT_LENGTH'):
if key in self.environ:
safe_env[key] = self.environ[key]
args = dict(fp=self.stream, environ=safe_env, keep_blank_values=True)
if not PY2:
args['encoding'] = 'utf8'
data = cgi.FieldStorage(**args)
self.environ['_cgi.FieldStorage'] = data #http://bugs.python.org/issue18394
data = data.list or []
for item in data:
if item.filename:
post[item.name] = FileUpload(item.file, item.name,
item.filename, item.headers)
else:
post[item.name] = item.value
return post
@cached_property
def parsed_form_data(self):
return self.parse_form_data()
@cached_property
def form(self):
form = FormsDict()
for name, item in self.parsed_form_data.allitems():
if not isinstance(item, FileUpload):
form[name] = item
return form
@cached_property
def values(self):
"""Combined multi dict for `args` and `form`."""
args = []
for d in self.args, self.form:
if not isinstance(d, MultiDict):
for k, v in d.items():
args.append((k, v))
else:
for k, v in d.iterallitems():
args.append((k, v))
return MultiDict(args)
@cached_property
def files(self):
files = FormsDict()
for name, item in self.parsed_form_data.allitems():
if isinstance(item, FileUpload):
files[name] = item
return files
def get_host(self):
"""Return the real host for the given WSGI environment. This first checks
the `X-Forwarded-Host` header, then the normal `Host` header, and finally
the `SERVER_NAME` environment variable (using the first one it finds).
"""
environ = self.environ
if 'HTTP_X_FORWARDED_HOST' in environ:
rv = environ['HTTP_X_FORWARDED_HOST'].split(',', 1)[0].strip()
elif 'HTTP_HOST' in environ:
rv = environ['HTTP_HOST']
else:
rv = environ['SERVER_NAME']
if (environ['wsgi.url_scheme'], environ['SERVER_PORT']) not \
in (('https', '443'), ('http', '80')):
rv += ':%s'%(environ['SERVER_PORT'])
return rv
def get_content_length(self):
"""Returns the content length from the WSGI environment as
integer. If it's not available `None` is returned.
"""
content_length = self.environ.get('CONTENT_LENGTH')
if content_length is not None:
try:
return max(0, int(content_length))
except (ValueError, TypeError):
pass
return 0
def get_current_url(self, root_only=False, strip_querystring=False, host_only=False):
"""A handy helper function that recreates the full URL as IRI for the
current request or parts of it. Here an example:
>>> get_current_url()
'http://localhost/script/?param=foo'
>>> get_current_url(root_only=True)
'http://localhost/script/'
>>> get_current_url(host_only=True)
'http://localhost/'
>>> get_current_url(strip_querystring=True)
'http://localhost/script/'
"""
environ = self.environ
tmp = [environ['wsgi.url_scheme'], '://', self.get_host()]
cat = tmp.append
if host_only:
return ''.join(tmp) + '/'
cat(urlquote(environ.get('SCRIPT_NAME', '')).rstrip('/'))
cat('/')
if not root_only:
print(type(environ.get('PATH_INFO')))
cat(urlquote(environ.get('PATH_INFO', '').lstrip('/')))
if not strip_querystring:
qs = self.query_string
if qs:
cat('?' + qs)
return ''.join(tmp)
@cached_property
def cookies(self):
"""Read only access to the retrieved cookie values as dictionary."""
cookies = SimpleCookie(self.environ.get('HTTP_COOKIE', '')).values()
return FormsDict((c.key, c.value) for c in cookies)
def get_cookie(self, key):
""" Return the content of a cookie. """
value = self.cookies.get(key)
return value
@cached_property
def headers(self):
"""The headers from the WSGI environ as immutable
`~cocopot.datastructures.WSGIHeaders`.
"""
return WSGIHeaders(self.environ)
@cached_property
def path(self):
"""Requested path as unicode. This works a bit like the regular path
info in the WSGI environment but will always include a leading slash,
even if the URL root is accessed.
"""
return '/' + to_unicode(self.environ.get('PATH_INFO', '')).lstrip('/')
@property
def script_name(self):
""" The initial portion of the URL's `path` that was removed by a higher
level (server or routing middleware) before the application was
called. This script path is returned with leading and tailing
slashes. """
script_name = self.environ.get('SCRIPT_NAME', '').strip('/')
return '/' + script_name + '/' if script_name else '/'
@property
def full_path(self):
"""Requested path as unicode, including the query string."""
return urljoin(self.script_name, self.path.lstrip('/'))
@cached_property
def script_root(self):
"""The root path of the script without the trailing slash."""
raw_path = to_unicode(self.environ.get('SCRIPT_NAME') or '',
self.charset, self.encoding_errors)
return raw_path.rstrip('/')
@cached_property
def url(self):
"""The reconstructed current URL as IRI.
"""
return self.get_current_url()
@cached_property
def base_url(self):
"""Like `url` but without the querystring
"""
return self.get_current_url(strip_querystring=True)
@cached_property
def root_url(self):
"""The full URL root (with hostname), this is the application
root as IRI.
"""
return self.get_current_url(root_only=True)
@cached_property
def host_url(self):
"""Just the host with scheme as IRI.
"""
return self.get_current_url(host_only=True)
@cached_property
def host(self):
"""Just the host including the port if available.
"""
return self.get_host()
@property
def query_string(self):
return self.environ.get('QUERY_STRING', '')
@property
def method(self):
return self.environ.get('REQUEST_METHOD', 'GET').upper()
@cached_property
def access_route(self):
"""If a forwarded header exists this is a list of all ip addresses
from the client ip to the last proxy server.
"""
if 'HTTP_X_FORWARDED_FOR' in self.environ:
addr = self.environ['HTTP_X_FORWARDED_FOR'].split(',')
return list([x.strip() for x in addr])
elif 'REMOTE_ADDR' in self.environ:
return list([self.environ['REMOTE_ADDR']])
return list()
remote_route = access_route
@property
def remote_addr(self):
"""The remote address of the client."""
route = self.access_route
return route[0] if route else None
is_xhr = property(lambda x: x.environ.get('HTTP_X_REQUESTED_WITH', '')
.lower() == 'xmlhttprequest', doc='''
True if the request was triggered via a JavaScript XMLHttpRequest.
This only works with libraries that support the `X-Requested-With`
header and set it to "XMLHttpRequest". Libraries that do that are
prototype, jQuery and Mochikit and probably some more.''')
is_secure = property(lambda x: x.environ['wsgi.url_scheme'] == 'https',
doc='`True` if the request is secure.')
@cached_property
def if_modified_since(self):
"""The parsed `If-Modified-Since` header as datetime object."""
return parse_date(self.environ.get('HTTP_IF_MODIFIED_SINCE'))
@cached_property
def if_unmodified_since(self):
"""The parsed `If-Unmodified-Since` header as datetime object."""
return parse_date(self.environ.get('HTTP_IF_UNMODIFIED_SINCE'))
@cached_property
def range(self):
"""The parsed `Range` header.
"""
return parse_range_header(self.environ.get('HTTP_RANGE'))
@cached_property
def authorization(self):
header = self.environ.get('HTTP_AUTHORIZATION')
if not header: return None
return parse_auth(header)
@property
def content_type(self):
return self.environ.get('CONTENT_TYPE', '')
@cached_property
def content_length(self):
"""The Content-Length entity-header field indicates the size of the
entity-body in bytes or, in the case of the HEAD method, the size of
the entity-body that would have been sent had the request been a
GET.
"""
return self.get_content_length()
@cached_property
def parsed_content_type(self):
return parse_content_type(self.environ.get('CONTENT_TYPE', ''))
@property
def mimetype(self):
"""Like `content_type` but without parameters (eg, without
charset, type etc.). For example if the content
type is ``text/html; charset=utf-8`` the mimetype would be
``'text/html'``.
"""
return self.parsed_content_type[0]
@property
def mimetype_params(self):
"""The mimetype parameters as dict. For example if the content
type is ``text/html; charset=utf-8`` the params would be
``{'charset': 'utf-8'}``.
"""
return self.parsed_content_type[1]
@property
def blueprint(self):
"""The name of the current blueprint"""
if '.' in self.endpoint:
return self.endpoint.rsplit('.', 1)[0]
@property
def json(self):
"""If the mimetype is `application/json` this will contain the
parsed JSON data. Otherwise this will be `None`.
The `get_json` method should be used instead.
"""
return self.get_json(silent=True)
def get_json(self, force=False, silent=False, cache=True):
"""Parses the incoming JSON request data and returns it. If
parsing fails the `on_json_loading_failed` method on the
request object will be invoked. By default this function will
only load the json data if the mimetype is ``application/json``
but this can be overriden by the `force` parameter.
Args:
* force: if set to `True` the mimetype is ignored.
* silent: if set to `False` this method will fail silently
and return `False`.
* cache: if set to `True` the parsed JSON data is remembered
on the request.
"""
_missing = object()
rv = getattr(self, '_cached_json', _missing)
if rv is not _missing:
return rv
if self.mimetype != 'application/json' and not force:
return None
# We accept a request charset against the specification as
# certain clients have been using this in the past. This
# fits our general approach of being nice in what we accept
# and strict in what we send out.
request_charset = self.mimetype_params.get('charset', 'utf-8')
try:
data = to_unicode(self.get_data(cache=False))
if request_charset is not None:
rv = json.loads(data, encoding=request_charset)
else:
rv = json.loads(data)
except ValueError as e:
if silent:
rv = None
else:
raise BadRequest()
if cache:
self._cached_json = rv
return rv
|
{
"content_hash": "ff453152ea28cfa4fc61441a8fb7662c",
"timestamp": "",
"source": "github",
"line_count": 545,
"max_line_length": 102,
"avg_line_length": 34.71559633027523,
"alnum_prop": 0.5720401691331924,
"repo_name": "zeaphoo/flagon",
"id": "cbccbfa40f517321143baba2e3199b63b069a8a3",
"size": "18944",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cocopot/request.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "172641"
}
],
"symlink_target": ""
}
|
from django.db.models import F
from taggit.models import Tag
from tempo.events import models
from . import language
from . import query_models
def execute(q):
parsed_query = language.query.parseString(q)
return STATEMENT_MAPPING[parsed_query['statement']](parsed_query)
def run_select(parsed_query):
query_model = query_models.query_models.get_by_source(parsed_query['source'])
aliases = {}
values = []
for full_expr in parsed_query['exprs']:
if full_expr.get('alias'):
a = full_expr['alias']
n = full_expr['expr']['column']
aliases[a] = F(n)
values.append(a)
else:
values.append(full_expr['expr']['column'])
qs = query_model.model.objects.all()
if aliases:
qs = qs.annotate(**aliases)
qs = qs.values(*values)
return qs
STATEMENT_MAPPING = {
'SELECT': run_select,
}
|
{
"content_hash": "0faa735e11a40227b162bcd1dbee5d5a",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 81,
"avg_line_length": 22.65,
"alnum_prop": 0.6169977924944813,
"repo_name": "EliotBerriot/tempo",
"id": "09868cb67c378492a80887c1bf2770f98033c92d",
"size": "906",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tempo/query/runner.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "595412"
},
{
"name": "HTML",
"bytes": "74777"
},
{
"name": "JavaScript",
"bytes": "906006"
},
{
"name": "Nginx",
"bytes": "998"
},
{
"name": "Python",
"bytes": "127936"
},
{
"name": "Shell",
"bytes": "8049"
}
],
"symlink_target": ""
}
|
import os
from .. import base
from girder.utility import config
class CustomRootTestCase(base.TestCase):
def setUp(self):
pluginRoot = os.path.join(os.path.dirname(os.path.dirname(__file__)),
'test_plugins')
conf = config.getConfig()
conf['plugins'] = {'plugin_directory': pluginRoot}
base.enabledPlugins.append('test_plugin')
base.startServer()
def tearDown(self):
base.stopServer()
def testCustomWebRoot(self):
"""
Tests the ability of plugins to serve their own custom server roots.
"""
# Root (/) should serve our custom route
resp = self.request('/', prefix='', isJson=False)
self.assertStatusOk(resp)
self.assertEqual(resp.collapse_body(), 'hello world')
# Normal web client should now be served from /girder
resp = self.request('/girder', prefix='', isJson=False)
self.assertStatusOk(resp)
self.assertTrue('g-global-info-apiroot' in resp.collapse_body())
# Api should be served out of /api/v1
resp = self.request('/api/v1', prefix='', isJson=False)
self.assertStatusOk(resp)
self.assertTrue('Girder REST API Documentation' in resp.collapse_body())
# /api should redirect to /api/v1
resp = self.request('/api', prefix='', isJson=False)
self.assertStatus(resp, 303)
self.assertTrue('/api/v1' in resp.collapse_body())
# Our custom API augmentations should still work
resp = self.request('/describe')
self.assertStatusOk(resp)
self.assertTrue('apis' in resp.json)
otherDocs = [x for x in resp.json['apis'] if x['path'] == '/other']
self.assertEqual(len(otherDocs), 1)
resp = self.request('/describe/other')
self.assertStatusOk(resp)
self.assertEqual(len(resp.json['apis']), 1)
resp = self.request('/other')
self.assertStatusOk(resp)
self.assertEqual(resp.json, ['custom REST route'])
# Api should not be served out of /girder/api/v1
resp = self.request('/girder/api/v1', prefix='', isJson=False)
self.assertStatus(resp, 404)
# Test our staticFile method
resp = self.request('/static_route', prefix='', isJson=False)
self.assertEqual(resp.collapse_body(), 'Hello world!\n')
|
{
"content_hash": "9b610efbbce8b23059f94181b5f45417",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 80,
"avg_line_length": 36.07575757575758,
"alnum_prop": 0.6144477110457791,
"repo_name": "jcfr/girder",
"id": "f9d6fcc66610ce1e6c4c1b7a6a53cf9a96295225",
"size": "3175",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/cases/custom_root_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CMake",
"bytes": "34810"
},
{
"name": "CSS",
"bytes": "152921"
},
{
"name": "HTML",
"bytes": "154464"
},
{
"name": "JavaScript",
"bytes": "1297298"
},
{
"name": "Mako",
"bytes": "1483"
},
{
"name": "Python",
"bytes": "1121586"
},
{
"name": "Ruby",
"bytes": "9923"
},
{
"name": "Shell",
"bytes": "3430"
}
],
"symlink_target": ""
}
|
"""
Program: conserved_darkened_genes.py
Author: Josh Goodman
Description:
This script started as a first pass attempt at trying to answer a question posed
by Dr. Arash Bashirullah of the University of Wisconsin. Dr. Bashirullah wanted
to know the following: Of all the Dmel genes, which ones do we know very little
about but are known to be highly conserved across many organisms.
This script tries to answer this question by using two simple criteria for
selecting genes.
1. Generate a list of genes that have terms from 1 or less GO aspects
(molecular_function, biological_process, or cellular_component).
Terms based on predictions or those that are qualified with 'NOT'
were ignored.
2. Generate a list of genes that are conserved across all species in
the DIOPT meta orthology tool. Currently, DIOPT has data on 10
species from 18 orthology algorithms.
The intersection of these two lists is one possible answer to this question.
"""
import pandas as pd
import argparse
from sqlalchemy import create_engine
"""
Name: setup_go_func
Description:
This python function creates a temporary PostgreSQL SQL function that will return a count of
experimental GO terms for the given GO aspect. GO aspects can be either biological_process,
molecular_function, or cellular_component. The SQL function takes a gene ID (FBgn) and
the GO aspect that you wish to retrieve the count for.
This python function takes a SQLAlchemy connection object, creates the function in the
pg_temp temporary schema, and then returns back to the caller.
The PostgreSQL function can then be called in later SQL statements anywhere in this program.
e.g.
select pg_temp.experimental_go_count('FBgn0000490','biological_process');
Arguments:
conn - A SQLAlchemy Connection object.
Returns:
None
"""
def setup_go_func(conn):
conn.execute("""
create function pg_temp.experimental_go_count(fbgn text, aspect text) returns integer as $$
select count(distinct cvt.name)::integer
from feature f join feature_cvterm fcvt on (f.feature_id=fcvt.feature_id)
join cvterm cvt on (fcvt.cvterm_id=cvt.cvterm_id)
join cv on (cvt.cv_id=cv.cv_id)
join feature_cvtermprop ev_code on (fcvt.feature_cvterm_id=ev_code.feature_cvterm_id)
join cvterm ev_code_type on (ev_code.type_id=ev_code_type.cvterm_id)
where f.uniquename = $1 -- The gene ID to fetch terms for.
and cv.name = $2 -- The GO aspect to fetch terms for.
-- The following ignores terms that have been annotated with 'NOT'
and (select fcvtp_type.name
from feature_cvtermprop fcvtp join cvterm fcvtp_type on (fcvtp.type_id=fcvtp_type.cvterm_id)
where fcvtp.feature_cvterm_id = fcvt.feature_cvterm_id
and fcvtp_type.name = 'NOT'
) is null
-- Select only experimental terms, no annotations from predictions.
and ev_code_type.name = 'evidence_code'
and ev_code.value ~
'inferred from (physical interaction|direct assay|genetic interaction|mutant phenotype|expression pattern|(high throughput (experiment|direct assay|expression pattern|genetic interaction|mutant phenotype)))'
$$ language sql;
""")
"""
Name: fetch_go_counts
Description:
This function queries a FlyBase Chado database and returns a list of all FlyBase genes
that have been localized to the genome. The columns include the FlyBase FBgn ID, the
gene symbol, term counts by GO aspect, and the number of aspects with more than 0 terms.
Arguments:
conn - A SQLAlchemy Connection object.
Returns:
DataFrame - A Data frame with the gene ID, symbol, term counts for all 3 GO aspects, and
the number of aspects with more than 0 terms.
"""
def fetch_go_counts(conn):
# Install a SQL function that is used by this function.
setup_go_func(conn)
# The Following SQL returns GO term counts for all GO aspects for Dmel genes
# that have been localized to the genome.
fbgn_go_counts_sql = """
select gene.uniquename as fbid,
flybase.current_symbol(gene.uniquename) as symbol,
pg_temp.experimental_go_count(gene.uniquename,'biological_process') as biological_process,
pg_temp.experimental_go_count(gene.uniquename,'molecular_function') as molecular_function,
pg_temp.experimental_go_count(gene.uniquename,'cellular_component') as cellular_component
from feature gene join cvterm cvt on (gene.type_id=cvt.cvterm_id)
join organism org on (gene.organism_id=org.organism_id)
join featureloc fl on (gene.feature_id=fl.feature_id)
where gene.uniquename ~ '^FBgn[0-9]+$'
and gene.is_obsolete = false
and gene.is_analysis = false
and cvt.name = 'gene'
and org.genus = 'Drosophila' and org.species = 'melanogaster'
;
"""
df = pd.read_sql(fbgn_go_counts_sql, conn, index_col='fbid')
# Counts the number of GO aspect columns with non zero values and adds it as a new
# column to the DataFrame as 'num_aspects'.
df['num_aspects'] = df[['biological_process', 'molecular_function', 'cellular_component']].astype(bool).sum(axis=1)
return df
"""
Name: fetch_ortholog_counts
Description:
This function queries a FlyBase Chado database and returns a DataFrame containing the FlyBase gene ID
and the number of species that are reported by DIOPT in orthology calls. No filtering of calls by
score is attempted here, which could be a source of furture improvements.
Arguments:
conn - A SQLAlchemy Connection object.
Returns:
DataFrame - A DataFrame with the FlyBase gene ID and the number of species in the DIOPT reported
orthology calls.
"""
def fetch_ortholog_counts(conn):
# SQL to fetch counts of species in orthology calls for all Dmel genes that are localized to the genome.
ortholog_counts_sql = """
select gene.uniquename as fbid,
count(distinct fbog.organism_id) as num_ortho_species
from feature gene join feature_relationship ortho_rel on (gene.feature_id=ortho_rel.object_id)
join feature fbog on (ortho_rel.subject_id=fbog.feature_id)
join cvterm fr_type on (ortho_rel.type_id=fr_type.cvterm_id)
join feature_relationshipprop frp on (ortho_rel.feature_relationship_id=frp.feature_relationship_id)
join organism org on (gene.organism_id=org.organism_id)
join cvterm cvt on (gene.type_id=cvt.cvterm_id)
join featureloc fl on (gene.feature_id=fl.feature_id)
where gene.uniquename ~ '^FBgn[0-9]+$'
and gene.is_analysis = false
and gene.is_obsolete = false
and cvt.name = 'gene'
and fbog.is_analysis = false
and fbog.is_obsolete = false
and fr_type.name = 'orthologous_to'
and frp.value = 'DIOPT'
and org.genus = 'Drosophila'
and org.species ='melanogaster'
group by gene.uniquename
;
"""
return pd.read_sql(ortholog_counts_sql, conn, index_col='fbid')
def main():
# Setup the argument parser.
parser = argparse.ArgumentParser()
parser.add_argument("--host", help="Chado database hostname.", default="chado.flybase.org")
parser.add_argument("-U", "--username", help="Chado database username.", default="flybase")
parser.add_argument("-W", "--password", help="Chado database password.")
parser.add_argument("-d", "--dbname", help="Chado database password.", default="flybase")
parser.add_argument("-p", "--port", help="Chado database port.", default=5432, type=int)
args = parser.parse_args()
# Init the SQLAlchemy engine and connect.
engine = create_engine(
'postgresql+psycopg2://{}:{}@{}:{}/{}'.format(args.username, args.password, args.host, args.port, args.dbname),
client_encoding='utf8')
conn = engine.connect()
# Fetch GO counts and store them to a CSV file.
go_counts = fetch_go_counts(conn)
go_counts.to_csv('dmel_go_counts.csv')
# Select out genes with 1 or less GO aspects and store to a file.
genes_few_go_aspects = go_counts[go_counts['num_aspects'] <= 1]
genes_few_go_aspects.to_csv('dmel_few_go_aspects.csv')
# Fetch ortholog counts and store to a file.
gene_orthologs_species_count = fetch_ortholog_counts(conn)
gene_orthologs_species_count.to_csv('dmel_orthologs_species_count.csv')
# Calculate the intersection between the GO and orthology lists.
merged_gene_list = pd.merge(genes_few_go_aspects, gene_orthologs_species_count, on='fbid')
final_filename = 'conserved_darkened_genes.csv'
print("Saving final gene list to {}".format(final_filename))
# Save those genes from the merged list that are conserved across all current DIOPT species.
merged_gene_list[merged_gene_list['num_ortho_species'] == 9].to_csv(final_filename)
if __name__ == "__main__":
main()
|
{
"content_hash": "46ccc4616359dec90e5c99ca913ae249",
"timestamp": "",
"source": "github",
"line_count": 210,
"max_line_length": 217,
"avg_line_length": 44.24761904761905,
"alnum_prop": 0.6726216099870856,
"repo_name": "FlyBase/chado",
"id": "9b74cfdaade557e3be07f1d0904f9f93e752b29a",
"size": "9314",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "misc/conserved_darkened_genes.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "PLpgSQL",
"bytes": "64479"
},
{
"name": "Perl",
"bytes": "21125"
},
{
"name": "Python",
"bytes": "20350"
}
],
"symlink_target": ""
}
|
import logging
import struct
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_3
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet
class SimpleSwitch13(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
def __init__(self, *args, **kwargs):
super(SimpleSwitch13, self).__init__(*args, **kwargs)
self.mac_to_port = {}
def add_flow(self, datapath, port, dst, actions):
ofproto = datapath.ofproto
match = datapath.ofproto_parser.OFPMatch(in_port=port,
eth_dst=dst)
inst = [datapath.ofproto_parser.OFPInstructionActions(
ofproto.OFPIT_APPLY_ACTIONS, actions)]
mod = datapath.ofproto_parser.OFPFlowMod(
datapath=datapath, cookie=0, cookie_mask=0, table_id=0,
command=ofproto.OFPFC_ADD, idle_timeout=0, hard_timeout=0,
priority=0, buffer_id=ofproto.OFP_NO_BUFFER,
out_port=ofproto.OFPP_ANY,
out_group=ofproto.OFPG_ANY,
flags=0, match=match, instructions=inst)
datapath.send_msg(mod)
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
msg = ev.msg
datapath = msg.datapath
ofproto = datapath.ofproto
in_port = msg.match['in_port']
pkt = packet.Packet(msg.data)
eth = pkt.get_protocols(ethernet.ethernet)[0]
dst = eth.dst
src = eth.src
dpid = datapath.id
self.mac_to_port.setdefault(dpid, {})
self.logger.info("packet in %s %s %s %s", dpid, src, dst, in_port)
# learn a mac address to avoid FLOOD next time.
self.mac_to_port[dpid][src] = in_port
if dst in self.mac_to_port[dpid]:
out_port = self.mac_to_port[dpid][dst]
else:
out_port = ofproto.OFPP_FLOOD
actions = [datapath.ofproto_parser.OFPActionOutput(out_port)]
# install a flow to avoid packet_in next time
if out_port != ofproto.OFPP_FLOOD:
self.add_flow(datapath, in_port, dst, actions)
out = datapath.ofproto_parser.OFPPacketOut(
datapath=datapath, buffer_id=msg.buffer_id, in_port=in_port,
actions=actions)
datapath.send_msg(out)
|
{
"content_hash": "5c97f1ade1d617a2027c3869230960df",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 74,
"avg_line_length": 34.083333333333336,
"alnum_prop": 0.6214343928280358,
"repo_name": "citrix-openstack/build-ryu",
"id": "18f874c5c05a6c6df66a9c02aa7b04e9d040ad0c",
"size": "3067",
"binary": false,
"copies": "2",
"ref": "refs/heads/ctx-nova-network-smoke-latest",
"path": "ryu/app/simple_switch_13.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Erlang",
"bytes": "670197"
},
{
"name": "Python",
"bytes": "2583730"
},
{
"name": "Shell",
"bytes": "11931"
}
],
"symlink_target": ""
}
|
class SearchNode:
def __init__(self, action, state, parent):
self.state = state
self.action = action
self.parent = parent
def path(self):
if self.parent == None:
return [(self.action, self.state)]
else:
return self.parent.path() + [(self.action, self.state)]
def inPath(self, s):
if s == self.state:
return True
elif self.parent == None:
return False
else:
return self.parent.inPath(s)
def breadthFirstSearch(initialState, goalTest, actions, successor):
agenda = Queue()
if goalTest(initialState):
return [(None, initialState)]
agenda.push(SearchNode(None, initialState, None))
while not agenda.isEmpty():
parent = agenda.pop()
newChildStates = []
for a in actions(parent.state):
newS = successor(parent.state, a)
newN = SearchNode(a, newS, parent)
if goalTest(newS):
return newN.path()
elif newS in newChildStates:
pass
elif parent.inPath(newS):
pass
else:
newChildStates.append(newS)
agenda.push(newN)
return None
|
{
"content_hash": "65edfb8ddfaf9d6338e52076f6ba078b",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 67,
"avg_line_length": 32.256410256410255,
"alnum_prop": 0.5437201907790143,
"repo_name": "Thuva4/Algorithms_Example",
"id": "9223efcefc3728b8857f4d44ed5e9d747bd04040",
"size": "1258",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "Breadth First Search/Python/BreadthFirstSearch.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "27042"
},
{
"name": "C#",
"bytes": "11219"
},
{
"name": "C++",
"bytes": "105492"
},
{
"name": "Crystal",
"bytes": "812"
},
{
"name": "Go",
"bytes": "12389"
},
{
"name": "Haskell",
"bytes": "932"
},
{
"name": "Java",
"bytes": "98826"
},
{
"name": "JavaScript",
"bytes": "26306"
},
{
"name": "Kotlin",
"bytes": "2888"
},
{
"name": "Makefile",
"bytes": "727"
},
{
"name": "Perl",
"bytes": "277"
},
{
"name": "Python",
"bytes": "67474"
},
{
"name": "Racket",
"bytes": "132"
},
{
"name": "Ruby",
"bytes": "10824"
},
{
"name": "Rust",
"bytes": "3485"
},
{
"name": "Swift",
"bytes": "13531"
}
],
"symlink_target": ""
}
|
import re
import string
from bottle import abort, get, response, run, static_file, template
from pygments import highlight
from pygments.lexers import get_lexer_by_name
from pygments.formatters.html import HtmlFormatter
from pygments.util import ClassNotFound
__author__ = "Nigel Small <nigel@nigelsmall.name>"
__copyright__ = "2011-2016 Nigel Small"
__license__ = "Apache License, Version 2.0"
__version__ = "v2"
URI_PATTERN = re.compile(r"""(?i)\b((?:[a-z][\w-]+:(?:/{1,3}|[a-z0-9%])|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:'".,<>?«»“”‘’]))""")
def auto_link(text):
out = HTML()
bits = URI_PATTERN.split(text)
out.write_text(bits[0])
p = 1
while p < len(bits):
url = bits[p]
out.element("a", {"href": url}, text=url)
p += 5
out.write_text(bits[p])
p += 1
return out.html
def code_writer(out, source):
return out.element("code", text=source)
def image_writer(out, source):
src, alt = source.partition("|")[0::2]
out.tag("img", {"src": src, "alt": alt or None})
def script_writer(out, source):
return out.element("script", raw=source)
class HTML(object):
@staticmethod
def entities(text):
chars = list(text)
for i, ch in enumerate(chars):
if ch == "&":
chars[i] = "&"
elif ch == "'":
chars[i] = "'"
elif ch == "\"":
chars[i] = """
elif ch == "<":
chars[i] = "<"
elif ch == ">":
chars[i] = ">"
return "".join(chars)
def __init__(self, processor=None):
self.tokens = []
self.stack = []
self.token_buffer = []
self.processor = processor or HTML.entities
@property
def html(self):
return "".join(self.tokens)
def __repr__(self):
return self.html
def _flush(self):
if self.token_buffer:
buffer = "".join(self.token_buffer)
self.tokens.append(self.processor(buffer))
self.token_buffer = []
def write_html(self, html):
self._flush()
self.tokens.append(html)
def write_text(self, text, post_process=False):
if post_process:
self.token_buffer.extend(text)
else:
self._flush()
self.tokens.extend(HTML.entities(text))
def write_raw(self, text):
self._flush()
self.tokens.extend(text)
def tag(self, tag, attributes=None):
if attributes:
self.write_html("<{0} {1}>".format(
tag,
" ".join(
'{0}="{1}"'.format(key, HTML.entities(str(value)))
for key, value in sorted(attributes.items())
if value is not None
)
))
else:
self.write_html("<{0}>".format(tag))
def start_tag(self, tag, attributes=None, void=False):
self.tag(tag, attributes)
if not void:
self.stack.append(tag)
def end_tag(self, tag=None):
if not self.stack:
raise ValueError("No tags to close")
if not tag:
tag = self.stack[-1]
if tag not in self.stack:
raise ValueError("End tag </{0}> should have corresponding "
"start tag <{0}>".format(tag))
while True:
t = self.stack.pop()
self.write_html("</{0}>".format(t))
if t == tag:
break
def element(self, tag, attributes=None, html=None, text=None, raw=None):
if sum(map(lambda x: 1 if x else 0, (html, text, raw))) > 1:
raise ValueError("Cannot specify multiple content types")
self.start_tag(tag, attributes)
if html:
self.write_html(html)
if text:
self.write_text(text)
if raw:
self.write_raw(raw)
self.end_tag()
def close(self):
self._flush()
while self.stack:
t = self.stack.pop()
self.write_html("</{0}>".format(t))
class Lexer(object):
def __init__(self, escape, *markers):
self.escape = escape
self.markers = [self.escape]
self.markers.extend(markers)
self.marker_chars = set(marker[0] for marker in self.markers)
def tokens(self, source):
p, q = 0, 0
while q < len(source):
if source[q] in self.marker_chars:
if self.escape and source[q] == self.escape:
start = q + len(self.escape)
else:
start = q
for seq in self.markers:
end = start + len(seq)
if source[start:end] == seq:
if q > p:
yield source[p:q]
yield source[q:end]
p, q = end, end
break
else:
q += 1
else:
q += 1
if q > p:
yield source[p:q]
class Text(object):
def __init__(self, source=None):
self.source = source
partitioner = Lexer("~",
"http://", "https://", "ftp://", "mailto:", "<<", ">>",
Quote.BLOCK_DELIMITER, "<--", "-->",
"\\\\", "{{", "}}", Literal.INLINE_DELIMITER, Quote.INLINE_DELIMITER,
"**", "//", "^^", "__", "[[", "]]", "|"
)
self.tokens = list(partitioner.tokens(source))
@property
def html(self):
out = HTML(processor=auto_link)
tokens = self.tokens[:]
while tokens:
token = tokens.pop(0)
if token[0] == "~":
out.write_text(token[1:])
elif token in SIMPLE_TOKENS:
out.write_html(SIMPLE_TOKENS[token])
elif token in TOGGLE_TOKENS:
tag = TOGGLE_TOKENS[token]
if tag in out.stack:
out.end_tag(tag)
else:
out.start_tag(tag)
elif token in BRACKET_TOKENS:
end_token, writer = BRACKET_TOKENS[token]
source = []
while tokens:
token = tokens.pop(0)
if token[0] == "~":
source.append(token[1:])
elif token == end_token:
break
else:
source.append(token)
writer(out, "".join(source))
elif token == "[[":
href = []
while tokens:
token = tokens.pop(0)
if token in ("|", "]]"):
break
elif token[0] == "~":
href.append(token[1:])
else:
href.append(token)
href = "".join(href)
out.start_tag("a", {"href": href})
if token != "|":
out.write_text(href)
out.end_tag("a")
elif token == "]]":
try:
out.end_tag("a")
except ValueError:
out.write_text(token)
else:
out.write_text(token, post_process=True)
out.close()
return out.html
class Heading(object):
@classmethod
def check(cls, source):
return source.startswith("=")
def __init__(self, source):
if not Heading.check(source):
raise ValueError("Heading must start with '='")
chars = list(source)
self.level = 0
while chars and chars[0] == "=":
chars.pop(0)
self.level += 1
self.text = Text("".join(chars).strip().rstrip("=").rstrip())
if self.level > 6:
self.level = 6
@property
def html(self):
out = HTML()
if self.level == 1:
out.element("h1", html=self.text.html)
else:
heading_text = self.text
heading_id = "".join(ch if ch in "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" else "-"
for ch in heading_text.source)
heading_id = heading_id.strip("-").lower()
while "--" in heading_id:
heading_id = heading_id.replace("--", "-")
tag = "h%d" % self.level
out.start_tag(tag, {"id": heading_id})
out.write_html(heading_text.html)
out.element("a", {"href": "#%s" % heading_id}, raw="§")
out.end_tag(tag)
return out.html
class HorizontalRule(object):
@classmethod
def check(cls, source):
return source.startswith("----")
def __init__(self, source):
if not HorizontalRule.check(source):
raise ValueError("Horizontal rule must start with '----'")
@property
def html(self):
out = HTML()
out.tag("hr")
return out.html
class ListItem(object):
@classmethod
def check(cls, source, content_type):
if content_type is ListItem or not source.startswith("**"):
return source and source[0] in "#*"
else:
return False
def __init__(self, source):
chars = list(source)
signature = []
while chars and chars[0] in "#*":
signature.append(chars.pop(0))
self.signature = tuple(signature)
self.level = len(signature)
self.item = Text("".join(chars).strip())
def ordered(self, level):
return self.signature[level] == "#"
def list_tag(self, level):
return "ol" if self.ordered(level) else "ul"
def compatible(self, other):
m = min(len(self.signature), len(other.signature))
return self.signature[0:m] == other.signature[0:m]
@property
def html(self):
out = HTML()
out.element("li", html=self.item.html)
return out.html
class Literal(object):
INLINE_DELIMITER = "``"
BLOCK_DELIMITER = "```"
def __init__(self, source):
self.line = source
@property
def html(self):
out = HTML()
out.start_tag("li")
out.element("code", text=self.line)
out.end_tag()
return out.html
class Quote(object):
INLINE_DELIMITER = '""'
BLOCK_DELIMITER = '"""'
def __init__(self, source):
self.text = Text(source)
@property
def html(self):
return self.text.html
class TableRow(object):
def __init__(self, source):
assert source.startswith("|")
bracket_tokens = {
Literal.INLINE_DELIMITER: Literal.INLINE_DELIMITER,
"[[": "]]",
"{{": "}}",
}
lexer = Lexer("~", "|", Literal.INLINE_DELIMITER, "[[", "]]", "{{", "}}")
source = source.rstrip()
if source.endswith("|"):
source = source[:-1]
tokens = list(lexer.tokens(source))
cells = []
while tokens:
token = tokens.pop(0)
if token == "|":
cells.append([])
elif token in bracket_tokens:
end = bracket_tokens[token]
cells[-1].append(token)
while tokens:
token = tokens.pop(0)
cells[-1].append(token)
if token == end:
break
else:
cells[-1].append(token)
self.cells = ["".join(cell) for cell in cells]
@property
def html(self):
out = HTML()
out.start_tag("tr")
for cell in self.cells:
stripped_cell = cell.strip()
attributes = {}
if stripped_cell.startswith("="):
tag = "th"
content = cell[1:]
elif stripped_cell.startswith("`") and stripped_cell.endswith("`"):
tag = "td"
content = cell
attributes["class"] = "code"
else:
tag = "td"
content = cell
align = None
if content:
left_padded = content[0] in string.whitespace
right_padded = content[-1] in string.whitespace
if left_padded and right_padded:
align = "center"
elif right_padded:
align = "left"
elif left_padded:
align = "right"
if align:
content = content.strip()
attributes["style"] = "text-align:%s" % align
out.element(tag, attributes, html=Text(content).html)
out.end_tag("tr")
return out.html
class Block(object):
def __init__(self, content_type=None, metadata=None, lines=None):
self.content_type = content_type
self.metadata = metadata
self.lines = []
if lines:
for line in lines:
self.append(line)
def __len__(self):
return len(self.lines)
def __nonzero__(self):
return bool(self.lines)
def append(self, line):
if not self.content_type or isinstance(line, self.content_type):
self.lines.append(line)
else:
raise ValueError("Cannot add {0} to block of {1}".format(line.__class__.__name__, self.content_type.__name__))
class Parser(object):
def __init__(self):
self.blocks = []
self.context = Block()
self.title = None
self.title_level = 7
def parse(self, source):
def append(block):
if block:
self.blocks.append(block)
def parse_literal(line):
if line.startswith(Literal.BLOCK_DELIMITER):
append(self.context)
self.context = Block()
else:
self.context.lines.append(Literal(line))
def parse_quote(line):
if line.startswith(Quote.BLOCK_DELIMITER):
append(self.context)
self.context = Block()
else:
self.context.lines.append(Quote(line))
for line in source.splitlines(True):
if self.context.content_type is Literal:
parse_literal(line)
elif self.context.content_type is Quote:
parse_quote(line)
else:
line = line.rstrip()
stripped_line = line.lstrip()
if Heading.check(line):
append(self.context)
self.context = Block()
source = Heading(line)
append(Block(Heading, lines=[source]))
if not self.title or source.level < self.title_level:
self.title, self.title_level = source.text.html, source.level
elif line.startswith("----"):
append(self.context)
self.context = Block()
append(Block(HorizontalRule, lines=[HorizontalRule(line)]))
elif ListItem.check(stripped_line, self.context.content_type):
source = ListItem(stripped_line)
if not (self.context and self.context.content_type is ListItem and self.context.lines[0].compatible(source)):
append(self.context)
self.context = Block(ListItem)
self.context.lines.append(source)
elif line.startswith(Literal.BLOCK_DELIMITER):
metadata = line.lstrip("`").strip()
append(self.context)
self.context = Block(Literal, metadata=metadata)
elif line.startswith(Quote.BLOCK_DELIMITER):
metadata = line.lstrip('"').strip()
append(self.context)
self.context = Block(Quote, metadata=metadata)
elif line.startswith("|"):
if self.context.content_type is not TableRow:
append(self.context)
self.context = Block(TableRow)
self.context.lines.append(TableRow(line))
else:
if self.context.content_type is not None:
append(self.context)
self.context = Block()
if line:
self.context.lines.append(line)
else:
if self.context:
append(self.context)
self.context = Block()
append(self.context)
class Document(object):
def __init__(self):
self.parser = Parser()
self.blocks = []
self.block = Block()
def parse(self, source):
self.parser.parse(source)
@property
def title(self):
return self.parser.title
@property
def html(self):
out = HTML()
for block in self.parser.blocks:
if block.content_type is None:
out.element("p", html=Text(" ".join(block.lines)).html)
elif block.content_type in (Heading, HorizontalRule):
for line in block.lines:
out.write_html(line.html)
elif block.content_type is Literal:
source = "".join(line.line for line in block.lines)
lang, _, metadata = block.metadata.partition(" ")
try:
lexer = get_lexer_by_name(lang)
except ClassNotFound:
lexer = None
if lexer is None:
out.start_tag("pre")
out.write_text(source)
out.end_tag("pre")
else:
out.write_raw(highlight(source, lexer, HtmlFormatter()))
elif block.content_type is Quote:
out.start_tag("blockquote")
for line in block.lines:
out.write_html(line.html)
out.end_tag("blockquote")
elif block.content_type is ListItem:
level = 0
for line in block.lines:
while level > line.level:
out.end_tag()
level -= 1
while level < line.level:
out.start_tag(line.list_tag(level))
level += 1
out.write_html(line.html)
while level:
out.end_tag()
level -= 1
elif block.content_type is TableRow:
out.start_tag("table")
for line in block.lines:
out.write_html(line.html)
out.end_tag("table")
return out.html
SIMPLE_TOKENS = {
"\\\\": "<br>",
"-->": "→",
"<--": "←",
}
TOGGLE_TOKENS = {
"//": "em",
Quote.INLINE_DELIMITER: "q",
"**": "strong",
"__": "sub",
"^^": "sup",
}
BRACKET_TOKENS = {
"<<": (">>", script_writer),
Literal.INLINE_DELIMITER: (Literal.INLINE_DELIMITER, code_writer),
"{{": ("}}", image_writer),
}
@get("/<name>")
def content(name):
try:
with open("content/%s.syntaq" % name) as f:
source = f.read()
document = Document()
document.parse(source)
return template("templates/content.html", title=document.title, body=document.html)
except FileNotFoundError:
abort(404)
@get("/_style/pygments.css")
def pygments_style():
response.content_type = "text/css"
return HtmlFormatter().get_style_defs('.highlight')
@get("/_style/<name>.css")
def style(name):
return static_file("%s.css" % name, "style")
if __name__ == "__main__":
run(reloader=True)
|
{
"content_hash": "45e0a4907fb229956d55eaa279d82408",
"timestamp": "",
"source": "github",
"line_count": 640,
"max_line_length": 237,
"avg_line_length": 31.3890625,
"alnum_prop": 0.4788690328040221,
"repo_name": "nigelsmall/syntaq",
"id": "4501914136d73d55b576a02dc090e22fa49752e0",
"size": "20726",
"binary": false,
"copies": "1",
"ref": "refs/heads/v2",
"path": "syntaq.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "49935"
}
],
"symlink_target": ""
}
|
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import stats
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'stats'
copyright = u'2013, Johan Bloemberg'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = stats.__version__
# The full version, including alpha/beta/rc tags.
release = stats.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'statsdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'stats.tex', u'stats Documentation',
u'Johan Bloemberg', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'stats', u'stats Documentation',
[u'Johan Bloemberg'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'stats', u'stats Documentation',
u'Johan Bloemberg', 'stats', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
{
"content_hash": "90ce62b776dadc2bc287cbb1b084e36c",
"timestamp": "",
"source": "github",
"line_count": 246,
"max_line_length": 80,
"avg_line_length": 32.077235772357724,
"alnum_prop": 0.7044734507666962,
"repo_name": "aequitas/stats",
"id": "6c437c06e87fce0d7476b32e7c29ce9db82d266e",
"size": "8334",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "10223"
},
{
"name": "Shell",
"bytes": "6466"
}
],
"symlink_target": ""
}
|
import steel
import unittest
class NameAwareOrderedDictTests(unittest.TestCase):
def setUp(self):
self.d = steel.NameAwareOrderedDict()
def test_ignore_object(self):
# Objects without a set_name() method should be ignored
self.d['example'] = object()
self.assertFalse(hasattr(self.d['example'], 'name'))
def test_auto_name(self):
# Objects with a set_name() method should be told their name
class NamedObject:
def set_name(self, name):
self.name = name
self.d['example'] = NamedObject()
self.assertEqual(self.d['example'].name, 'example')
def test_errors(self):
# Make sure set_name() errors are raised, not swallowed
class ErrorObject:
"Just a simple object that errors out while setting its name"
def set_name(self, name):
raise TypeError('Something went wrong')
with self.assertRaises(TypeError):
self.d['example'] = ErrorObject()
class SizeTests(unittest.TestCase):
def test_explicit_sizes(self):
class Test(steel.Structure):
field1 = steel.Bytes(size=2)
field2 = steel.Bytes(size=4)
self.assertEqual(Test.size, 6)
class InstantiationTests(unittest.TestCase):
def test_empty_args(self):
class Test(steel.Structure):
field1 = steel.Bytes(size=2)
field2 = steel.Bytes(size=4)
obj = Test()
self.assertNotIn('field1', obj.__dict__)
self.assertNotIn('field2', obj.__dict__)
def test_filled_args(self):
class Test(steel.Structure):
field1 = steel.Bytes(size=2)
field2 = steel.Bytes(size=4)
obj = Test(field1=b'f1', field2=b'fld2')
self.assertEqual(obj.field1, b'f1')
self.assertEqual(obj.field2, b'fld2')
class StructureTupleTests(unittest.TestCase):
def test_empty_args(self):
class Test(steel.StructureTuple):
field1 = steel.Bytes(size=2)
field2 = steel.Bytes(size=4)
obj = Test()
self.assertIsInstance(obj, tuple)
self.assertEqual(obj.field1, None)
self.assertEqual(obj.field2, None)
def test_filled_args(self):
class Test(steel.Structure):
field1 = steel.Bytes(size=2)
field2 = steel.Bytes(size=4)
obj = Test(field1=b'f1', field2=b'fld2')
self.assertEqual(obj.field1, b'f1')
self.assertEqual(obj.field2, b'fld2')
|
{
"content_hash": "981b5ab6347d3756086ab71d65d547a8",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 73,
"avg_line_length": 29.25581395348837,
"alnum_prop": 0.6049284578696343,
"repo_name": "gulopine/steel-experiment",
"id": "528ee6b1474a8c3dd4f81c07e7567696fcca31f0",
"size": "2516",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/base/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "31706"
}
],
"symlink_target": ""
}
|
from telemetry import value as value_module
class SkipValue(value_module.Value):
def __init__(self, page, reason, description=None):
"""A value representing a skipped page.
Args:
page: The skipped page object.
reason: The string reason the page was skipped.
"""
super(SkipValue, self).__init__(page, 'skip', '', True, description, None,
None)
self._reason = reason
def __repr__(self):
page_name = self.page.name
return 'SkipValue(%s, %s, description=%s)' % (page_name, self._reason,
self.description)
@property
def reason(self):
return self._reason
def GetBuildbotDataType(self, output_context):
return None
def GetBuildbotValue(self):
return None
def GetChartAndTraceNameForPerPageResult(self):
return None
def GetRepresentativeNumber(self):
return None
def GetRepresentativeString(self):
return None
@staticmethod
def GetJSONTypeName():
return 'skip'
def AsDict(self):
d = super(SkipValue, self).AsDict()
d['reason'] = self._reason
return d
@staticmethod
def FromDict(value_dict, page_dict):
kwargs = value_module.Value.GetConstructorKwArgs(value_dict, page_dict)
del kwargs['name']
del kwargs['units']
if 'important' in kwargs:
del kwargs['important']
kwargs['reason'] = value_dict['reason']
if 'tir_label' in kwargs:
del kwargs['tir_label']
if 'grouping_keys' in kwargs:
del kwargs['grouping_keys']
return SkipValue(**kwargs)
@classmethod
def MergeLikeValuesFromSamePage(cls, values):
assert False, 'Should not be called.'
@classmethod
def MergeLikeValuesFromDifferentPages(cls, values):
assert False, 'Should not be called.'
|
{
"content_hash": "f12845d879e1cb9f23d334bf09d68f66",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 78,
"avg_line_length": 25.450704225352112,
"alnum_prop": 0.6452684006640841,
"repo_name": "catapult-project/catapult-csm",
"id": "571de28915c6b31a006d0f2bedd802f0ce06ae77",
"size": "1970",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "telemetry/telemetry/value/skip.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4902"
},
{
"name": "C++",
"bytes": "43728"
},
{
"name": "CSS",
"bytes": "24873"
},
{
"name": "Go",
"bytes": "80325"
},
{
"name": "HTML",
"bytes": "11817766"
},
{
"name": "JavaScript",
"bytes": "518002"
},
{
"name": "Makefile",
"bytes": "1588"
},
{
"name": "Python",
"bytes": "6207634"
},
{
"name": "Shell",
"bytes": "2558"
}
],
"symlink_target": ""
}
|
"""
Copyright (2010-2014) INCUBAID BVBA
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
Module implementing the Arakoon protocol
"""
from ArakoonExceptions import *
from ArakoonValidators import SignatureValidator
from NurseryRouting import RoutingInfo
import os.path
import ssl
import struct
import select
import operator
import cStringIO
import types
FILTER = ''.join([(len(repr(chr(x)))==3) and chr(x) or '.' for x in range(256)])
ARA_CFG_TRY_CNT = 1
ARA_CFG_CONN_TIMEOUT = 60
ARA_CFG_CONN_BACKOFF = 5
ARA_CFG_NO_MASTER_RETRY = 60
from ovs.log.logHandler import LogHandler
logging = LogHandler.get('arakoon', name='internal')
class ArakoonClientConfig :
def __init__ (self, clusterId, nodes,
tls=False, tls_ca_cert=None, tls_cert=None):
"""
Constructor of an ArakoonClientConfig object
The constructor takes one optional parameter 'nodes'.
This is a dictionary containing info on the arakoon server nodes. It contains:
- nodeids as keys
- ([ip], port) as values
e.g. ::
cfg = ArakoonClientConfig ('ricky',
{ "myFirstNode" : (["127.0.0.1"], 4000 ),
"mySecondNode" :(["127.0.0.1"], 5000 ),
"myThirdNode" :(["127.0.0.1","10.0.0.1"], 6000 )] })
Note: This client package only supports TLSv1 when connecting to nodes,
due to Python 2.x only supporting this TLS version. If your cluster is
configured to use another TLS version, you'll need to use another
Arakoon client which can work using a different socket interface which
supports different TLS versions.
@type clusterId: string
@param clusterId: name of the cluster
@type nodes: dict
@param nodes: A dictionary containing the locations for the server nodes
@param tls: Use a TLS connection
If `tls_ca_cert` is given, this *must* be `True`, otherwise a
`ValueError` will be raised.
@type tls: `bool`
@param tls_ca_cert: Path to CA certificate file
If set, this will be used to validate node certificates.
@type tls_ca_cert: `str`
@param tls_cert: Path of client certificate & key files
These should be passed as a tuple. When provided, `tls_ca_cert`
*must* be provided as well, otherwise a `ValueError` will be raised.
@type tls_cert: `(str, str)`
"""
self._clusterId = clusterId
self._nodes = self._cleanUp(nodes)
if tls_ca_cert and not tls:
raise ValueError('tls_ca_cert passed, but tls is False')
if tls_cert and not tls_ca_cert:
raise ValueError('tls_cert passed, but tls_ca_cert not given')
if tls_ca_cert is not None and not os.path.isfile(tls_ca_cert):
raise ValueError('Invalid TLS CA cert path: %s' % tls_ca_cert)
if tls_cert:
cert, key = tls_cert
if not os.path.isfile(cert):
raise ValueError('Invalid TLS cert path: %s' % cert)
if not os.path.isfile(key):
raise ValueError('Invalid TLS key path: %s' % key)
self._tls = tls
self._tls_ca_cert = tls_ca_cert
self._tls_cert = tls_cert
tls = property(operator.attrgetter('_tls'))
tls_ca_cert = property(operator.attrgetter('_tls_ca_cert'))
tls_cert = property(operator.attrgetter('_tls_cert'))
def _cleanUp(self, nodes):
for k in nodes.keys():
t = nodes[k]
maybe_string = t[0]
if type(maybe_string) == types.StringType:
ip_list = maybe_string.split(',')
port = t[1]
nodes[k] = (ip_list, port)
return nodes
def __str__(self):
r = "ArakoonClientConfig('%s', %s)" % (self._clusterId,
str(self._nodes))
return r
@staticmethod
def getNoMasterRetryPeriod() :
"""
Retrieve the period messages to the master should be retried when a master re-election occurs
This period is specified in seconds
@rtype: integer
@return: Returns the retry period in seconds
"""
return ARA_CFG_NO_MASTER_RETRY
def getNodeLocations(self, nodeId):
"""
Retrieve location of the server node with give node identifier
A location is a pair consisting of a hostname or ip address as first element.
The second element of the pair is the tcp port
@type nodeId: string
@param nodeId: The node identifier whose location you are interested in
@rtype: pair(string,int)
@return: Returns a pair with the nodes hostname or ip and the tcp port, e.g. ("127.0.0.1", 4000)
"""
return self._nodes[ nodeId ]
def getNodeLocation(self, nodeId):
"""
"""
ns,port = self.getNodeLocations(nodeId)
ip0 =ns[0]
loc = ip0,port
return loc
def getTryCount (self):
"""
Retrieve the number of attempts a message should be tried before giving up
Can be controlled by changing the global variable L{ARA_CFG_TRY_CNT}
@rtype: integer
@return: Returns the max retry count.
"""
return ARA_CFG_TRY_CNT
def getNodes(self):
"""
Retrieve the dictionary with node locations
@rtype: dict
@return: Returns a dictionary mapping the node identifiers (string) to its location ( pair<string,integer> )
"""
return self._nodes
@staticmethod
def getConnectionTimeout():
"""
Retrieve the tcp connection timeout
Can be controlled by changing the global variable L{ARA_CFG_CONN_TIMEOUT}
@rtype: integer
@return: Returns the tcp connection timeout
"""
return ARA_CFG_CONN_TIMEOUT
@staticmethod
def getBackoffInterval():
"""
Retrieves the backoff interval.
If an attempt to send a message to the server fails,
the client will wait a random number of seconds. The maximum wait time is n*getBackoffInterVal()
with n being the attempt counter.
Can be controlled by changing the global variable L{ARA_CFG_CONN_BACKOFF}
@rtype: integer
@return: The maximum backoff interval
"""
return ARA_CFG_CONN_BACKOFF
def getClusterId(self):
return self._clusterId
class ArakoonClientLogger :
@staticmethod
def logWarning( msg, *args ):
logging.warning(msg, *args )
@staticmethod
def logError( msg, *args ):
logging.error( msg, *args )
@staticmethod
def logCritical( msg, *args ):
logging.critical( msg, *args )
@staticmethod
def logDebug ( msg, *args ):
logging.debug ( msg, *args )
def dump(src, length=8):
N = 0
result = ''
while src:
s, src = src[:length], src[length:]
hexa = ' '.join(["%02X"%ord(x) for x in s])
s = s.translate(FILTER)
result += "%04X %-*s %s\n" % (N, length*3, hexa, s)
N += length
return result
# Define the size of an int in bytes
ARA_TYPE_INT64_SIZE = 8
ARA_TYPE_INT_SIZE = 4
ARA_TYPE_BOOL_SIZE = 1
# Magic used to mask each command
ARA_CMD_MAG = 0xb1ff0000
ARA_CMD_VER = 0x00000001
# Hello command
ARA_CMD_HEL = 0x00000001 | ARA_CMD_MAG
# Who is master?
ARA_CMD_WHO = 0x00000002 | ARA_CMD_MAG
# Existence of a value for a key
ARA_CMD_EXISTS = 0x00000007 | ARA_CMD_MAG
# Get a value
ARA_CMD_GET = 0x00000008 | ARA_CMD_MAG
# Update a value
ARA_CMD_SET = 0x00000009 | ARA_CMD_MAG
# Delete a key value pair
ARA_CMD_ASSERT = 0x00000016 | ARA_CMD_MAG
ARA_CMD_DEL = 0x0000000a | ARA_CMD_MAG
# Get a range of keys
ARA_CMD_RAN = 0x0000000b | ARA_CMD_MAG
# Get keys matching a prefix
ARA_CMD_PRE = 0x0000000c | ARA_CMD_MAG
# Test and set a value
ARA_CMD_TAS = 0x0000000d | ARA_CMD_MAG
# range entries
ARA_CMD_RAN_E = 0x0000000f | ARA_CMD_MAG
#sequence
ARA_CMD_SEQ = 0x00000010 | ARA_CMD_MAG
ARA_CMD_MULTI_GET = 0x00000011 | ARA_CMD_MAG
ARA_CMD_EXPECT_PROGRESS_POSSIBLE = 0x00000012 | ARA_CMD_MAG
ARA_CMD_STATISTICS = 0x00000013 | ARA_CMD_MAG
ARA_CMD_USER_FUNCTION = 0x00000015 | ARA_CMD_MAG
ARA_CMD_KEY_COUNT = 0x0000001a | ARA_CMD_MAG
ARA_CMD_CONFIRM = 0x0000001c | ARA_CMD_MAG
ARA_CMD_GET_NURSERY_CFG = 0x00000020 | ARA_CMD_MAG
ARA_CMD_REV_RAN_E = 0x00000023 | ARA_CMD_MAG
ARA_CMD_SYNCED_SEQUENCE = 0x00000024 | ARA_CMD_MAG
ARA_CMD_DELETE_PREFIX = 0x00000027 | ARA_CMD_MAG
ARA_CMD_VERSION = 0x00000028 | ARA_CMD_MAG
ARA_CMD_ASSERT_EXISTS = 0x00000029 | ARA_CMD_MAG
ARA_CMD_MULTI_GET_OPTION = 0x00000031 | ARA_CMD_MAG
ARA_CMD_CURRENT_STATE = 0x00000032 | ARA_CMD_MAG
ARA_CMD_REPLACE = 0x00000033 | ARA_CMD_MAG
ARA_CMD_NOP = 0x00000041 | ARA_CMD_MAG
ARA_CMD_GET_TXID = 0x00000043 | ARA_CMD_MAG
# Arakoon error codes
# Success
ARA_ERR_SUCCESS = 0
# No entity
ARA_ERR_NO_ENT = 1
# Node is not the master
ARA_ERR_NOT_MASTER = 4
# not found
ARA_ERR_NOT_FOUND = 5
# wrong cluster
ARA_ERR_WRONG_CLUSTER = 6
ARA_ERR_ASSERTION_FAILED = 7
ARA_ERR_RANGE_ERROR = 9
ARA_ERR_GOING_DOWN = 16
ARA_ERR_ASSERTEXISTS_FAILED = 17
ARA_ERR_NOT_SUPPORTED = 0x20
ARA_ERR_NO_LONGER_MASTER = 0x21
ARA_ERR_BAD_INPUT = 0x26
ARA_ERR_INCONSISTENT_READ = 0x80
ARA_ERR_USERFUNCTION_FAILURE= 0x81
NAMED_FIELD_TYPE_INT = 1
NAMED_FIELD_TYPE_INT64 = 2
NAMED_FIELD_TYPE_FLOAT = 3
NAMED_FIELD_TYPE_STRING = 4
NAMED_FIELD_TYPE_LIST = 5
def _packString( toPack ):
toPackLength = len( toPack )
return struct.pack("I%ds" % ( toPackLength), toPackLength, toPack )
def _packStringOption ( toPack = None ):
if toPack is None:
return _packBool ( 0 )
else :
return _packBool ( 1 ) + _packString (toPack)
def _packInt ( toPack ):
return struct.pack( "I", toPack )
def _packInt64 ( toPack ):
return struct.pack( "q", toPack )
def _packSignedInt ( toPack ):
return struct.pack( "i", toPack )
def _packBool ( toPack) :
return struct.pack( "?", toPack)
def sendPrologue(socket, clusterId):
p = _packInt(ARA_CMD_MAG)
p += _packInt(ARA_CMD_VER)
p += _packString(clusterId)
socket.sendall(p)
def _readExactNBytes( con, n ):
if not con._connected :
raise ArakoonSockRecvClosed()
bytesRemaining = n
tmpResult = ""
timeout = ArakoonClientConfig.getConnectionTimeout()
if isinstance(con._socket, ssl.SSLSocket):
s = con._socket
pending = s.pending()
if pending > 0:
tmpResult = s.recv(min(n, pending))
bytesRemaining = bytesRemaining - len(tmpResult)
while bytesRemaining > 0 :
tripleList = select.select( [con._socket] , [] , [] , timeout )
if ( len ( tripleList [0]) != 0 ) :
newChunk = ""
try :
newChunk = tripleList [0][0].recv ( bytesRemaining)
except Exception, ex:
ArakoonClientLogger.logError ("Error while receiving from socket. %s: '%s'" % (ex.__class__.__name__, ex) )
con._connected = False
raise ArakoonSockRecvError()
newChunkSize = len( newChunk )
if newChunkSize == 0 :
try:
con._socket.close()
except Exception, ex:
ArakoonClientLogger.logError( "Error while closing socket. %s: %s" % (ex.__class__.__name__,ex))
con._connected = False
raise ArakoonSockReadNoBytes ()
tmpResult = tmpResult + newChunk
bytesRemaining = bytesRemaining - newChunkSize
else :
msg = str(con._socketInfo)
try:
con._socket.close()
except Exception, ex:
ArakoonClientLogger.logError( "Error while closing socket. %s: %s" % (ex.__class__.__name__,ex))
con._connected = False
raise ArakoonSockNotReadable(msg = msg)
return tmpResult
def _recvString ( con ):
strLength = _recvInt( con )
buf = _readExactNBytes( con, strLength)
return struct.unpack( "%ds" % strLength, buf ) [0]
def _unpackInt(buf, offset):
r=struct.unpack_from( "I", buf,offset)
return r[0], offset + ARA_TYPE_INT_SIZE
def _unpackSignedInt(buf, offset):
r=struct.unpack_from( "i", buf,offset)
return r[0], offset + ARA_TYPE_INT_SIZE
def _unpackInt64(buf, offset):
r= struct.unpack_from("q", buf, offset)
return r[0], offset + 8
def _unpackString(buf, offset):
size,o2 = _unpackInt(buf, offset)
v = buf[o2:o2 + size]
return v, o2+size
def _unpackStringList(buf, offset):
size,offset = _unpackInt(buf, offset)
retVal = []
for i in range( size ) :
x, offset = _unpackString(buf, offset)
retVal.append(x)
return retVal, offset
def _unpackNamedField(buf, offset):
type, offset = _unpackInt(buf, offset)
name, offset = _unpackString(buf, offset)
result = dict()
if type == NAMED_FIELD_TYPE_INT:
result[name], offset = _unpackInt(buf,offset)
return result, offset
if type == NAMED_FIELD_TYPE_INT64:
result[name], offset = _unpackInt64(buf,offset)
return result, offset
if type == NAMED_FIELD_TYPE_FLOAT:
result[name], offset = _unpackFloat(buf,offset)
return result, offset
if type == NAMED_FIELD_TYPE_STRING:
result[name], offset = _unpackString(buf,offset)
return result, offset
if type == NAMED_FIELD_TYPE_LIST:
length, offset = _unpackInt(buf,offset)
localDict = dict()
for i in range(length):
field, offset = _unpackNamedField(buf, offset)
localDict.update( field )
result[name] = localDict
return result, offset
raise ArakoonException("Cannot decode named field %s. Invalid type: %d" % (name,type) )
def _recvInt ( con ):
buf = _readExactNBytes ( con, ARA_TYPE_INT_SIZE )
i,o2 = _unpackInt(buf,0)
return i
def _recvInt64 ( con ):
buf = _readExactNBytes( con, ARA_TYPE_INT64_SIZE )
i,o2 = _unpackInt64(buf,0)
return i
def _unpackBool(buf, offset):
r = struct.unpack_from( "?", buf, offset) [0]
return r, offset+1
def _recvBool ( con ):
buf = _readExactNBytes( con, 1 )
b, o2 = _unpackBool(buf,0)
return b
def _unpackFloat(buf, offset):
r = struct.unpack_from("d", buf, offset)
return r[0], offset+8
def _recvFloat(buf):
buf = _readExactNBytes(con, 8)
f,o2 = _unpackFloat(buf,0)
return f
def _recvStringOption ( con ):
isSet = _recvBool( con )
if( isSet ) :
return _recvString( con )
else :
return None
class Consistency:
def __init__(self):
self._v = _packBool(False)
def encode(self):
return self._v
def isDirty(self):
return False
def __str__(self):
return self.__class__.__name__
__repr__ = __str__
class Consistent(Consistency):
pass
class NoGuarantee(Consistency):
def __init__(self):
self._v = _packBool(True)
def isDirty(self):
return True
class AtLeast(Consistency):
def __init__(self,i):
self._i = i
self._v = "\x02" + _packInt64(i)
def __str__(self):
return "AtLeast(%i)" % self._i
def isDirty(self):
return True
class Update(object):
pass
class Set(Update):
def __init__(self,key,value):
self._key = key
self._value = value
def write(self, fob):
fob.write(_packInt(1))
fob.write(_packString(self._key))
fob.write(_packString(self._value))
class Delete(Update):
def __init__(self,key):
self._key = key
def write(self, fob):
fob.write(_packInt(2))
fob.write(_packString(self._key))
class Assert(Update):
def __init__(self, key, vo):
self._key = key
self._vo = vo
def write(self, fob):
fob.write(_packInt(8))
fob.write(_packString(self._key))
fob.write(_packStringOption(self._vo))
class AssertExists(Update):
def __init__(self, key):
self._key = key
def write(self, fob):
fob.write(_packInt(15))
fob.write(_packString(self._key))
class Sequence(Update):
def __init__(self):
self._updates = []
def addUpdate(self,u):
self._updates.append(u)
@SignatureValidator( 'string', 'string' )
def addSet(self, key,value):
self._updates.append(Set(key,value))
@SignatureValidator( 'string' )
def addDelete(self, key):
self._updates.append(Delete(key))
def addAssert(self, key,vo):
self._updates.append(Assert(key,vo))
def addAssertExists(self, key):
self._updates.append(AssertExists(key))
def write(self, fob):
fob.write( _packInt(5))
fob.write( _packInt(len(self._updates)))
for update in self._updates:
update.write(fob)
class ArakoonProtocol :
@staticmethod
def encodePing(clientId, clusterId ):
r = _packInt(ARA_CMD_HEL)
r += _packString(clientId)
r += _packString(clusterId)
return r
@staticmethod
def encodeGetVersion():
r = _packInt(ARA_CMD_VERSION)
return r
@staticmethod
def encodeGetCurrentState():
r = _packInt(ARA_CMD_CURRENT_STATE)
return r
@staticmethod
def encodeWhoMaster():
return _packInt( ARA_CMD_WHO )
@staticmethod
def encodeExists(key, consistency):
msg = _packInt(ARA_CMD_EXISTS)
msg += consistency.encode()
msg += _packString(key)
return msg
@staticmethod
def encodeAssert(key, vo, consistency):
msg = _packInt(ARA_CMD_ASSERT)
msg += consistency.encode()
msg += _packString(key)
msg += _packStringOption(vo)
return msg
@staticmethod
def encodeAssertExists(key, consistency):
msg = _packInt(ARA_CMD_ASSERT_EXISTS)
msg += consistency.encode()
msg += _packString(key)
return msg
@staticmethod
def encodeGet(key , consistency):
msg = _packInt(ARA_CMD_GET)
msg += consistency.encode()
msg += _packString(key)
return msg
@staticmethod
def encodeSet( key, value ):
return _packInt( ARA_CMD_SET ) + _packString( key ) + _packString ( value )
@staticmethod
def encodeNOP():
return _packInt(ARA_CMD_NOP)
@staticmethod
def encodeGetTxid():
return _packInt(ARA_CMD_GET_TXID)
@staticmethod
def encodeConfirm(key, value):
return _packInt(ARA_CMD_CONFIRM) + _packString(key) + _packString(value)
@staticmethod
def encodeSequence(seq, sync):
r = cStringIO.StringIO()
seq.write(r)
flattened = r.getvalue()
r.close()
cmd = ARA_CMD_SEQ
if sync:
cmd = ARA_CMD_SYNCED_SEQUENCE
return _packInt(cmd) + _packString(flattened)
@staticmethod
def encodeDelete( key ):
return _packInt ( ARA_CMD_DEL ) + _packString ( key )
@staticmethod
def encodeRange( bKey, bInc, eKey, eInc, maxCnt , consistency):
retVal = _packInt( ARA_CMD_RAN ) + consistency.encode()
retVal += _packStringOption( bKey ) + _packBool ( bInc )
retVal += _packStringOption( eKey ) + _packBool (eInc) + _packSignedInt (maxCnt)
return retVal
@staticmethod
def encodeRangeEntries(first, finc, last, linc, maxEntries, consistency):
r = _packInt(ARA_CMD_RAN_E) + consistency.encode()
r += _packStringOption(first) + _packBool(finc)
r += _packStringOption(last) + _packBool(linc)
r += _packSignedInt(maxEntries)
return r
@staticmethod
def encodeReverseRangeEntries(first, finc, last, linc, maxEntries, consistency):
r = _packInt(ARA_CMD_REV_RAN_E) + consistency.encode()
r += _packStringOption(first) + _packBool(finc)
r += _packStringOption(last) + _packBool(linc)
r += _packSignedInt(maxEntries)
return r
@staticmethod
def encodePrefixKeys( key, maxCnt, consistency ):
retVal = _packInt( ARA_CMD_PRE) + consistency.encode()
retVal += _packString( key )
retVal += _packSignedInt( maxCnt )
return retVal
@staticmethod
def encodeTestAndSet( key, oldVal, newVal ):
retVal = _packInt( ARA_CMD_TAS ) + _packString( key )
retVal += _packStringOption( oldVal )
retVal += _packStringOption( newVal )
return retVal
@staticmethod
def encodeReplace(key, wanted):
retVal = _packInt(ARA_CMD_REPLACE) + _packString(key)
retVal += _packStringOption(wanted)
return retVal
@staticmethod
def encodeMultiGet(keys, consistency):
retVal = _packInt(ARA_CMD_MULTI_GET) + consistency.encode()
retVal += _packInt(len(keys))
for key in keys:
retVal += _packString(key)
return retVal
@staticmethod
def encodeMultiGetOption(keys, consistency):
retVal = _packInt(ARA_CMD_MULTI_GET_OPTION) + consistency.encode()
retVal += _packInt(len(keys))
for key in keys:
retVal += _packString(key)
return retVal
@staticmethod
def encodeExpectProgressPossible():
retVal = _packInt(ARA_CMD_EXPECT_PROGRESS_POSSIBLE)
return retVal
@staticmethod
def encodeStatistics():
retVal = _packInt(ARA_CMD_STATISTICS)
return retVal
@staticmethod
def encodeUserFunction(name, argument):
retVal = _packInt(ARA_CMD_USER_FUNCTION)
retVal += _packString(name)
retVal += _packStringOption(argument)
return retVal
@staticmethod
def encodeDeletePrefix(prefix):
retVal = _packInt(ARA_CMD_DELETE_PREFIX)
retVal += _packString(prefix)
return retVal
@staticmethod
def _evaluateErrorCode( con ):
errorCode = _recvInt ( con )
# """ ArakoonException( "Received invalid response from the server" )"""
if errorCode == ARA_ERR_SUCCESS :
return
else :
errorMsg = _recvString ( con )
if errorCode == ARA_ERR_NOT_FOUND:
raise ArakoonNotFound(errorMsg)
if errorCode == ARA_ERR_NOT_MASTER:
raise ArakoonNodeNotMaster()
if errorCode == ARA_ERR_NO_LONGER_MASTER:
raise ArakoonNodeNoLongerMaster()
if errorCode == ARA_ERR_BAD_INPUT:
raise ArakoonBadInput(errorMsg)
if errorCode == ARA_ERR_INCONSISTENT_READ:
raise ArakoonInconsistentRead(errorMsg)
if errorCode == ARA_ERR_USERFUNCTION_FAILURE:
raise ArakoonUserfunctionFailure(errorMsg)
if errorCode == ARA_ERR_ASSERTION_FAILED:
raise ArakoonAssertionFailed(errorMsg)
if errorCode == ARA_ERR_ASSERTEXISTS_FAILED:
raise ArakoonAssertExistsFailed(errorMsg)
if errorCode == ARA_ERR_RANGE_ERROR:
raise NurseryRangeError(errorMsg)
if errorCode == ARA_ERR_GOING_DOWN:
raise ArakoonGoingDown(errorMsg)
if errorCode != ARA_ERR_SUCCESS:
raise ArakoonException( "EC=%d. %s" % (errorCode, errorMsg) )
@staticmethod
def decodeInt64Result( con ) :
ArakoonProtocol._evaluateErrorCode( con )
return _recvInt64( con )
@staticmethod
def decodeIntResult(con):
ArakoonProtocol._evaluateErrorCode(con)
return _recvInt(con)
@staticmethod
def decodeVoidResult( con ):
ArakoonProtocol._evaluateErrorCode( con )
@staticmethod
def decodeBoolResult( con ):
ArakoonProtocol._evaluateErrorCode( con )
return _recvBool( con )
@staticmethod
def decodeStringResult ( con ):
ArakoonProtocol._evaluateErrorCode( con )
return _recvString( con )
@staticmethod
def decodeStringOptionResult ( con ):
ArakoonProtocol._evaluateErrorCode( con )
return _recvStringOption( con )
@staticmethod
def decodeStringListResult( con ):
ArakoonProtocol._evaluateErrorCode( con )
retVal = []
arraySize = _recvInt( con )
for i in xrange( arraySize ) :
retVal[:0] = [ _recvString( con ) ]
return retVal
@staticmethod
def decodeStringArrayResult(con):
ArakoonProtocol._evaluateErrorCode(con)
retVal = []
size = _recvInt(con)
for i in xrange(size):
s = _recvString(con)
retVal.append(s)
return retVal
@staticmethod
def decodeStringOptionArrayResult(con):
ArakoonProtocol._evaluateErrorCode(con)
retVal = []
arraySize = _recvInt(con)
for i in xrange(arraySize):
s = _recvStringOption(con)
retVal.append(s)
return retVal
@staticmethod
def decodeNurseryCfgResult( con ):
ArakoonProtocol._evaluateErrorCode(con)
offset = 0
encoded = _recvString( con )
routing, offset = RoutingInfo.unpack(encoded, offset, _unpackBool, _unpackString)
cfgCount, offset = _unpackInt(encoded, offset)
resultCfgs = {}
for i in range(cfgCount) :
clusterId, offset = _unpackString(encoded, offset)
clusterSize, offset = _unpackInt(encoded, offset)
cfg = dict()
for j in range(clusterSize):
nodeId, offset = _unpackString(encoded, offset)
ips, offset = _unpackStringList(encoded, offset)
port, offset = _unpackInt(encoded, offset)
cfg[nodeId] = (ips,port)
cliCfg = ArakoonClientConfig(clusterId, cfg)
resultCfgs[clusterId] = cliCfg
return (routing, resultCfgs)
@staticmethod
def decodeGetTxidResult(con):
ArakoonProtocol._evaluateErrorCode(con)
x= _readExactNBytes( con, 1)
r = None
if x == '\x00':
r = NoGuarantees()
elif x == '\x01':
r = Consistent()
elif x == '\x02':
i = _recvInt64(con)
r = AtLeast(i)
else:
raise ArakoonException("%c does not denote a consistency")
return r
@staticmethod
def decodeStringPairListResult(con):
ArakoonProtocol._evaluateErrorCode(con)
result = []
size = _recvInt( con )
for i in range(size):
k = _recvString ( con )
v = _recvString ( con )
result [:0] = [(k, v)]
return result
@staticmethod
def decodeStatistics(con):
ArakoonProtocol._evaluateErrorCode(con)
buffer = _recvString(con)
result, offset = _unpackNamedField(buffer,0)
return result['arakoon_stats']
@staticmethod
def decodeVersionResult(con):
ArakoonProtocol._evaluateErrorCode(con)
major = _recvInt(con)
minor = _recvInt(con)
patch = _recvInt(con)
info = _recvString(con)
return (major,minor, patch, info)
@staticmethod
def encodeGetKeyCount () :
return _packInt(ARA_CMD_KEY_COUNT)
@staticmethod
def encodeGetNurseryCfg ():
return _packInt(ARA_CMD_GET_NURSERY_CFG)
|
{
"content_hash": "47fdcb145881c35f4689a5f3ef370a10",
"timestamp": "",
"source": "github",
"line_count": 937,
"max_line_length": 123,
"avg_line_length": 29.937033084311633,
"alnum_prop": 0.6039000392142883,
"repo_name": "tcpcloud/openvstorage",
"id": "b03b7ee42982a03d5bf215ba74b3eab75e71441f",
"size": "28051",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ovs/extensions/db/arakoon/arakoon/ArakoonProtocol.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "11498"
},
{
"name": "DTrace",
"bytes": "215"
},
{
"name": "HTML",
"bytes": "208883"
},
{
"name": "JavaScript",
"bytes": "818191"
},
{
"name": "Makefile",
"bytes": "1335"
},
{
"name": "Python",
"bytes": "1849659"
},
{
"name": "Shell",
"bytes": "12612"
}
],
"symlink_target": ""
}
|
def greetings(msg):
print(msg+"!!!!!");
|
{
"content_hash": "14d6804763fe79595ca84ac753f5f2c2",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 23,
"avg_line_length": 22,
"alnum_prop": 0.5227272727272727,
"repo_name": "PetarZDuric/cs3240-labdemo",
"id": "24860c3e8fe9d888468f86ad75387e048875a3b3",
"size": "98",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "helper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "286"
}
],
"symlink_target": ""
}
|
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from meregistro.shortcuts import my_render
from apps.seguridad.decorators import login_required, credential_required
from apps.seguridad.models import Ambito, Rol
from apps.registro.models import Establecimiento, EstadoEstablecimiento, Anexo, EstadoAnexo
from apps.titulos.models import TituloNacional, EstadoTituloNacional, EstadoNormativaJurisdiccional, NormativaJurisdiccional
from apps.validez_nacional.forms import SolicitudFormFilters, SolicitudDatosBasicosForm, SolicitudNormativasForm,\
SolicitudCohortesForm, SolicitudControlForm, ValidezInstitucionalFormFilters, SolicitudAsignacionFormFilters, InformeSolicitudForm
from apps.validez_nacional.models import EstadoSolicitud, Solicitud, SolicitudEstablecimiento, ValidezNacional, InformeSolicitud
from django.core.paginator import Paginator
from helpers.MailHelper import MailHelper
from apps.reportes.views.validez_nacional import solicitudes as reporte_solicitudes
from apps.reportes.views.solicitud import detalle_numeracion as reporte_detalle_numeracion
from apps.reportes.models import Reporte
from apps.validez_nacional.FSM import FSMSolicitud
ITEMS_PER_PAGE = 50
fsmSolicitud = FSMSolicitud()
def __puede_editarse_solicitud(request, solicitud):
# Sólo se puede editar mientras está en estado Pendiente
# pero el AdminNacional puede hacerlo mientras no esté numerado
return (solicitud.estado.nombre == EstadoSolicitud.PENDIENTE) or \
(solicitud.estado.nombre != EstadoSolicitud.NUMERADO and request.get_perfil().rol.nombre == Rol.ROL_ADMIN_NACIONAL)
def __flat_list(list_to_flat):
"Método para aplanar las listas"
return [i for j in list_to_flat for i in j]
@login_required
@credential_required('validez_nacional_solicitud_consulta')
def index(request):
if request.method == 'GET':
form_filter = SolicitudFormFilters(request.GET)
else:
form_filter = SolicitudFormFilters()
q = build_query(form_filter, 1, request)
try:
if request.GET['export'] == '1':
return reporte_solicitudes(request, q)
except KeyError:
pass
ambito = request.get_perfil().ambito
perfil_jurisdiccional = ambito.tipo.nombre == Ambito.TIPO_JURISDICCION
perfil_establecimiento = ambito.tipo.nombre == Ambito.TIPO_ESTABLECIMIENTO
perfil_anexo = ambito.tipo.nombre == Ambito.TIPO_ANEXO
if perfil_jurisdiccional or perfil_establecimiento or perfil_anexo:
form_filter.fields['jurisdiccion'].queryset = form_filter.fields['jurisdiccion'].queryset.filter(id=request.get_perfil().jurisdiccion().id)
if perfil_jurisdiccional:
q = q.filter(jurisdiccion__id=request.get_perfil().jurisdiccion().id)
elif perfil_establecimiento:
establecimiento = Establecimiento.objects.get(ambito__path=ambito.path)
q = q.filter(establecimientos__establecimiento__id=establecimiento.id)
elif perfil_anexo:
anexo = Anexo.objects.get(ambito__path=ambito.path)
q = q.filter(anexos__anexo__id=anexo.id)
paginator = Paginator(q, ITEMS_PER_PAGE)
try:
page_number = int(request.GET['page'])
except (KeyError, ValueError):
page_number = 1
# chequear los límites
if page_number < 1:
page_number = 1
elif page_number > paginator.num_pages:
page_number = paginator.num_pages
page = paginator.page(page_number)
objects = page.object_list
return my_render(request, 'validez_nacional/solicitud/index.html', {
'form_filters': form_filter,
'objects': objects,
'paginator': paginator,
'page': page,
'page_number': page_number,
'pages_range': range(1, paginator.num_pages + 1),
'next_page': page_number + 1,
'prev_page': page_number - 1,
'export_url': Reporte.build_export_url(request.build_absolute_uri()),
've_acciones': ambito.tipo.nombre not in [Ambito.TIPO_ESTABLECIMIENTO, Ambito.TIPO_ANEXO]
})
def build_query(filters, page, request):
"""
Construye el query de búsqueda a partir de los filtros.
"""
return filters.buildQuery().order_by('-estado__nombre', 'jurisdiccion__nombre', 'primera_cohorte')
@login_required
@credential_required('validez_nacional_solicitud_create')
def create(request):
try:
jurisdiccion_id = jurisdiccion_id=request.get_perfil().jurisdiccion().id
except AttributeError:
jurisdiccion_id = None
if request.method == 'POST':
form = SolicitudDatosBasicosForm(request.POST, jurisdiccion_id=jurisdiccion_id)
if form.is_valid():
solicitud = form.save(commit=False)
solicitud.estado = EstadoSolicitud.objects.get(nombre=EstadoSolicitud.PENDIENTE)
solicitud.jurisdiccion = request.get_perfil().jurisdiccion()
solicitud.save()
solicitud.registrar_estado()
request.set_flash('success', 'Datos guardados correctamente.')
return HttpResponseRedirect(reverse('validezNacionalSolicitudEdit', args=[solicitud.id]))
else:
request.set_flash('warning', 'Ocurrió un error guardando los datos.')
else:
form = SolicitudDatosBasicosForm(jurisdiccion_id=jurisdiccion_id)
# Agrego el filtro por jurisdicción
return my_render(request, 'validez_nacional/solicitud/new.html', {
'form': form,
'form_template': 'validez_nacional/solicitud/form_datos_basicos.html',
'is_new': True,
'page_title': 'Título',
'current_page': 'datos_basicos',
})
@login_required
@credential_required('validez_nacional_solicitud_editar')
# Editar datos básicos
def edit(request, solicitud_id):
"""
Edición de los datos de un título jurisdiccional.
"""
solicitud = Solicitud.objects.get(pk=solicitud_id)
if not __puede_editarse_solicitud(request, solicitud):
request.set_flash('warning', 'No puede editarse la solicitud.')
return HttpResponseRedirect(reverse('validezNacionalSolicitudIndex'))
estado_id = solicitud.estado_id
if request.method == 'POST':
form = SolicitudDatosBasicosForm(request.POST, instance=solicitud, jurisdiccion_id=solicitud.jurisdiccion_id)
if form.is_valid():
sol = form.save(commit=False)
sol.id = solicitud.id
sol.jurisdiccion_id = solicitud.jurisdiccion_id
sol.estado_id = solicitud.estado_id
form.save()
request.set_flash('success', 'Datos actualizados correctamente.')
else:
request.set_flash('warning', 'Ocurrió un error actualizando los datos.')
else:
form = SolicitudDatosBasicosForm(instance=solicitud, jurisdiccion_id=solicitud.jurisdiccion_id)
return my_render(request, 'validez_nacional/solicitud/edit.html', {
'form': form,
'solicitud': solicitud,
'form_template': 'validez_nacional/solicitud/form_datos_basicos.html',
'is_new': False,
'page_title': 'Título',
'current_page': 'datos_basicos',
})
@login_required
@credential_required('validez_nacional_solicitud_editar')
def editar_normativas(request, solicitud_id):
"""
Edición de normativas
"""
try:
solicitud = Solicitud.objects.get(pk=solicitud_id)
except:
# Es nuevo, no mostrar el formulario antes de que guarden los datos básicos
return my_render(request, 'validez_nacional/solicitud/new.html', {
'solicitud': None,
'form_template': 'validez_nacional/solicitud/form_normativas.html',
'page_title': 'Normativas',
'current_page': 'normativas',
})
if not __puede_editarse_solicitud(request, solicitud):
request.set_flash('warning', 'No puede editarse la solicitud.')
return HttpResponseRedirect(reverse('validezNacionalSolicitudIndex'))
if request.method == 'POST':
form = SolicitudNormativasForm(request.POST, instance=solicitud)
if form.is_valid():
normativas = form.save()
request.set_flash('success', 'Datos guardados correctamente.')
# redirigir a edit
return HttpResponseRedirect(reverse('solicitudNormativasEdit', args=[solicitud.id]))
else:
request.set_flash('warning', 'Ocurrió un error guardando los datos.')
else:
form = SolicitudNormativasForm(instance=solicitud)
current_ids = [n.id for n in solicitud.normativas_jurisdiccionales.all().order_by('numero_anio')]
restantes_ids = [n.id for n in NormativaJurisdiccional.objects.filter(jurisdiccion=solicitud.jurisdiccion).exclude(id__in=current_ids).order_by('numero_anio')]
# http://blog.mathieu-leplatre.info/django-create-a-queryset-from-a-list-preserving-order.html
pk_list = current_ids + restantes_ids
clauses = ' '.join(['WHEN id=%s THEN %s' % (pk, i) for i, pk in enumerate(pk_list)])
ordering = 'CASE %s END' % clauses
queryset = NormativaJurisdiccional.objects.filter(pk__in=pk_list).extra(
select={'ordering': ordering}, order_by=('ordering',))
form.fields['normativas_jurisdiccionales'].queryset = queryset
return my_render(request, 'validez_nacional/solicitud/edit.html', {
'form': form,
'solicitud': solicitud,
'form_template': 'validez_nacional/solicitud/form_normativas.html',
'is_new': False,
'page_title': 'Normativas',
'current_page': 'normativas',
})
@login_required
@credential_required('validez_nacional_solicitud_editar')
def editar_cohortes(request, solicitud_id):
"""
Edición de datos de cohortes
"""
try:
solicitud = Solicitud.objects.get(pk=solicitud_id)
except:
# Es nuevo, no mostrar el formulario antes de que guarden los datos básicos
return my_render(request, 'validez_nacional/solicitud/new.html', {
'solicitud': None,
'is_new': True,
'form_template': 'validez_nacional/solicitud/form_cohortes.html',
'page_title': 'Cohortes',
'current_page': 'cohortes',
})
if not __puede_editarse_solicitud(request, solicitud):
request.set_flash('warning', 'No puede editarse la solicitud.')
return HttpResponseRedirect(reverse('validezNacionalSolicitudIndex'))
if request.method == 'POST':
form = SolicitudCohortesForm(request.POST, instance=solicitud)
if form.is_valid():
cohorte = form.save()
request.set_flash('success', 'Datos guardados correctamente.')
# redirigir a edit
return HttpResponseRedirect(reverse('solicitudCohortesEdit', args=[solicitud.id]))
else:
request.set_flash('warning', 'Ocurrió un error guardando los datos.')
else:
form = SolicitudCohortesForm(instance=solicitud)
return my_render(request, 'validez_nacional/solicitud/edit.html', {
'form': form,
'solicitud': solicitud,
'form_template': 'validez_nacional/solicitud/form_cohortes.html',
'is_new': solicitud.primera_cohorte is None and solicitud.ultima_cohorte is None,
'page_title': 'Cohortes',
'current_page': 'cohortes',
})
@login_required
@credential_required('validez_nacional_solicitud_control')
def control(request, solicitud_id):
solicitud = Solicitud.objects.get(pk=solicitud_id)
estado_anterior = solicitud.estado
#raise Exception(estado_anterior)
if request.method == 'POST':
form = SolicitudControlForm(request.POST, instance=solicitud)
if form.is_valid():
sol = form.save()
if sol.estado != estado_anterior:
sol.registrar_estado()
request.set_flash('success', 'Datos actualizados correctamente.')
else:
request.set_flash('warning', 'Ocurrió un error actualizando los datos.')
else:
form = SolicitudControlForm(instance=solicitud)
form.fields["estado"].choices = map(lambda e: (e.id, e), fsmSolicitud.estadosDesde(solicitud.estado))
return my_render(request, 'validez_nacional/solicitud/edit.html', {
'form': form,
'solicitud': solicitud,
'form_template': 'validez_nacional/solicitud/form_control.html',
'is_new': False,
'page_title': 'Control de Solicitud',
'current_page': 'control',
})
@login_required
@credential_required('validez_nacional_solicitud_editar')
def asignar_establecimientos(request, solicitud_id):
solicitud = Solicitud.objects.get(pk=solicitud_id)
if not __puede_editarse_solicitud(request, solicitud):
request.set_flash('warning', 'No puede editarse la solicitud.')
return HttpResponseRedirect(reverse('validezNacionalSolicitudIndex'))
form_filter = SolicitudAsignacionFormFilters(request.GET, tipo_ue='Establecimiento', solicitud=solicitud)
q = form_filter.buildQuery()
"Traigo los ids de los establecimientos actualmente asignados a la solicitud"
current_establecimientos_ids = __flat_list(solicitud.establecimientos.all().values_list("establecimiento_id"))
q1 = q.filter(solicitudes__establecimiento__id__in=current_establecimientos_ids).order_by('cue') # seleccionados
q2 = q.exclude(id__in=[e.id for e in q1]).order_by('cue') # no seleccionados
from itertools import chain
res = list(chain(q1, q2))
paginator = Paginator(res, ITEMS_PER_PAGE)
try:
page_number = int(request.GET['page'])
except (KeyError, ValueError):
page_number = 1
# chequear los límites
if page_number < 1:
page_number = 1
elif page_number > paginator.num_pages:
page_number = paginator.num_pages
page = paginator.page(page_number)
objects = page.object_list
"Procesamiento"
if request.method == 'POST':
values_dict = {
'establecimientos_procesados_ids': [e.id for e in objects], # Son los establecimientos de la página actual
'current_establecimientos_ids': current_establecimientos_ids,
'establecimientos_seleccionados_ids': request.POST.getlist("establecimientos"),
}
solicitud.save_establecimientos(**values_dict)
request.set_flash('success', 'Datos actualizados correctamente.')
# redirigir a edit
return HttpResponseRedirect(reverse('solicitudAsignarEstablecimientos', args=[solicitud.id]))
return my_render(request, 'validez_nacional/solicitud/asignar_establecimientos.html', {
'is_new': False,
'current_page': 'asignar-establecimientos',
'solicitud': solicitud,
'form_filters': form_filter,
'current_establecimientos_ids': current_establecimientos_ids,
'objects': objects,
'paginator': paginator,
'page': page,
'page_number': page_number,
'pages_range': range(1, paginator.num_pages + 1),
'next_page': page_number + 1,
'prev_page': page_number - 1
})
@login_required
@credential_required('validez_nacional_solicitud_editar')
def asignar_anexos(request, solicitud_id):
solicitud = Solicitud.objects.get(pk=solicitud_id)
if not __puede_editarse_solicitud(request, solicitud):
request.set_flash('warning', 'No puede editarse la solicitud.')
return HttpResponseRedirect(reverse('validezNacionalSolicitudIndex'))
form_filter = SolicitudAsignacionFormFilters(request.GET, tipo_ue='Anexo', solicitud=solicitud)
q = form_filter.buildQuery()
"Traigo los ids de los establecimientos actualmente asignados a la solicitud"
current_anexos_ids = __flat_list(solicitud.anexos.all().values_list("anexo_id"))
q1 = q.filter(solicitudes__anexo__id__in=current_anexos_ids).order_by('cue') # seleccionados
q2 = q.exclude(id__in=[a.id for a in q1]).order_by('cue') # no seleccionados
from itertools import chain
res = list(chain(q1, q2))
paginator = Paginator(res, ITEMS_PER_PAGE)
try:
page_number = int(request.GET['page'])
except (KeyError, ValueError):
page_number = 1
# chequear los límites
if page_number < 1:
page_number = 1
elif page_number > paginator.num_pages:
page_number = paginator.num_pages
page = paginator.page(page_number)
objects = page.object_list
"Procesamiento"
if request.method == 'POST':
values_dict = {
'anexos_procesados_ids': [a.id for a in objects], # Son los establecimientos de la página actual
'current_anexos_ids': current_anexos_ids,
'anexos_seleccionados_ids': request.POST.getlist("anexos"),
}
solicitud.save_anexos(**values_dict)
request.set_flash('success', 'Datos actualizados correctamente.')
# redirigir a edit
return HttpResponseRedirect(reverse('solicitudAsignarAnexos', args=[solicitud.id]))
return my_render(request, 'validez_nacional/solicitud/asignar_anexos.html', {
'is_new': False,
'current_page': 'asignar-anexos',
'solicitud': solicitud,
'form_filters': form_filter,
'current_anexos_ids': current_anexos_ids,
'objects': objects,
'paginator': paginator,
'page': page,
'page_number': page_number,
'pages_range': range(1, paginator.num_pages + 1),
'next_page': page_number + 1,
'prev_page': page_number - 1
})
@login_required
@credential_required('validez_nacional_solicitud_eliminar')
def delete(request, solicitud_id):
solicitud = Solicitud.objects.get(pk=solicitud_id)
if solicitud.is_deletable():
solicitud.delete()
request.set_flash('success', 'Registro eliminado correctamente.')
else:
request.set_flash('warning', 'El registro no puede ser eliminado.')
return HttpResponseRedirect(reverse('validezNacionalSolicitudIndex'))
def build_query_institucional(filters, page, request):
"""
Construye el query de búsqueda a partir de los filtros.
"""
return filters.buildQuery().order_by('cue')
@login_required
@credential_required('validez_nacional_solicitud_consulta_institucional')
def consulta_institucional(request):
ambito = request.get_perfil().ambito
perfil_establecimiento = ambito.tipo.nombre == Ambito.TIPO_ESTABLECIMIENTO
perfil_anexo = ambito.tipo.nombre == Ambito.TIPO_ANEXO
if perfil_establecimiento:
establecimiento = Establecimiento.objects.get(ambito__path=ambito.path)
anexos = establecimiento.anexos.all()
elif perfil_anexo:
establecimiento = Establecimiento.objects.get(anexos__ambito__path__istartswith=ambito.path)
anexos = establecimiento.anexos.all()
if request.method == 'GET':
form_filter = ValidezInstitucionalFormFilters(request.GET, establecimiento=establecimiento)
else:
form_filter = ValidezInstitucionalFormFilters(establecimiento=establecimiento)
q = build_query_institucional(form_filter, 1, request)
"""
try:
if request.GET['export'] == '1':
return reporte_solicitudes(request, q)
except KeyError:
pass
"""
paginator = Paginator(q, ITEMS_PER_PAGE)
try:
page_number = int(request.GET['page'])
except (KeyError, ValueError):
page_number = 1
# chequear los límites
if page_number < 1:
page_number = 1
elif page_number > paginator.num_pages:
page_number = paginator.num_pages
page = paginator.page(page_number)
objects = page.object_list
return my_render(request, 'validez_nacional/solicitud/consulta_institucional.html', {
'form_filters': form_filter,
'objects': objects,
'paginator': paginator,
'page': page,
'page_number': page_number,
'pages_range': range(1, paginator.num_pages + 1),
'next_page': page_number + 1,
'prev_page': page_number - 1,
'export_url': Reporte.build_export_url(request.build_absolute_uri()),
})
@login_required
@credential_required('validez_nacional_solicitud_numerar')
def numerar(request, solicitud_id):
solicitud = Solicitud.objects.get(pk=solicitud_id)
if len(solicitud.normativas_jurisdiccionales.all()) > 0:
normativas_jurisdiccionales = ', '.join([n.numero_anio for n in solicitud.normativas_jurisdiccionales.all().order_by('numero_anio')])
else:
normativas_jurisdiccionales = solicitud.normativa_jurisdiccional_migrada
if not solicitud.is_numerable():
request.set_flash('warning', 'La solicitud no se puede numerar.')
return HttpResponseRedirect(reverse('validezNacionalSolicitudIndex'))
solicitud_establecimientos = solicitud.establecimientos.all()
solicitud_anexos = solicitud.anexos.all()
if request.method == 'POST':
import time
referencia = str(int(time.time()))
solicitud.estado = EstadoSolicitud.objects.get(nombre=EstadoSolicitud.NUMERADO)
solicitud.save()
solicitud.registrar_estado()
# solicitud-establecimientos
for se in solicitud_establecimientos:
v = ValidezNacional()
v.tipo_unidad_educativa = 'Sede'
v.unidad_educativa_id = se.establecimiento.id
v.cue = se.establecimiento.cue
v.solicitud_id = solicitud.id
v.carrera = solicitud.carrera.nombre
v.titulo_nacional = solicitud.titulo_nacional.nombre
v.primera_cohorte = solicitud.primera_cohorte
v.ultima_cohorte = solicitud.ultima_cohorte
v.dictamen_cofev = solicitud.dictamen_cofev
v.normativas_nacionales = solicitud.normativas_nacionales
v.normativa_jurisdiccional = normativas_jurisdiccionales
v.referencia = referencia
v.save() # Necesito recuperar el ID en la siguiente línea
v.nro_infd = v.calcular_nro_infd_establecimiento()
v.save()
MailHelper.notify_by_email(MailHelper.NUMERACION_SOLICITUD, v)
# solicitud-anexos
for sa in solicitud_anexos:
v = ValidezNacional()
v.tipo_unidad_educativa = 'Anexo'
v.unidad_educativa_id = sa.anexo.id
v.cue = sa.anexo.cue
v.solicitud_id = solicitud.id
v.carrera = solicitud.carrera.nombre
v.titulo_nacional = solicitud.titulo_nacional.nombre
v.primera_cohorte = solicitud.primera_cohorte
v.ultima_cohorte = solicitud.ultima_cohorte
v.dictamen_cofev = solicitud.dictamen_cofev
v.normativas_nacionales = solicitud.normativas_nacionales
v.normativa_jurisdiccional = normativas_jurisdiccionales
v.referencia = referencia
v.save() # Necesito recuperar el ID en la siguiente línea
v.nro_infd = v.calcular_nro_infd_anexo()
v.save()
MailHelper.notify_by_email(MailHelper.NUMERACION_SOLICITUD, v)
request.set_flash('success', 'Se ha generado la validez de títulos.')
return HttpResponseRedirect(reverse('validezNacionalDetalleNumeracion', args=[solicitud.id, referencia]))
return my_render(request, 'validez_nacional/solicitud/numerar.html', {
'solicitud': solicitud,
'solicitud_establecimientos': solicitud_establecimientos,
'solicitud_anexos': solicitud_anexos,
'normativas_jurisdiccionales': normativas_jurisdiccionales,
})
@login_required
@credential_required('validez_nacional_solicitud_numerar')
def detalle_numeracion(request, solicitud_id, referencia):
solicitud = Solicitud.objects.get(pk=solicitud_id)
validez = ValidezNacional.objects.filter(solicitud=solicitud, referencia=referencia)
if 'export' in request.GET:
return reporte_detalle_numeracion(request, validez)
return my_render(request, 'validez_nacional/solicitud/detalle_numeracion.html', {
'solicitud': solicitud,
'validez': validez,
'export_url': Reporte.build_export_url(request.build_absolute_uri()),
})
@login_required
@credential_required('validez_nacional_solicitud_informe')
def informe(request, solicitud_id):
solicitud = Solicitud.objects.get(pk=solicitud_id, estado__nombre__in=[EstadoSolicitud.CONTROLADO, EstadoSolicitud.EVALUADO, EstadoSolicitud.RETENIDO])
try:
informe = solicitud.informe.get()
except InformeSolicitud.DoesNotExist:
informe = solicitud.generar_informe()
if request.method == 'POST':
form = InformeSolicitudForm(request.POST, instance=informe)
informe.solicitud = solicitud
if form.is_valid():
informe = form.save(commit=False)
informe.solicitud = solicitud
informe.save()
request.set_flash('success', 'Datos actualizados correctamente.')
else:
request.set_flash('warning', 'Ocurrió un error actualizando los datos.')
raise Exception(form.errors)
else:
form = InformeSolicitudForm(instance=informe)
return my_render(request, 'validez_nacional/solicitud/informe.html', {
'solicitud': solicitud,
'informe': informe,
'form': form,
})
@login_required
@credential_required('validez_nacional_solicitud_informe_impresion')
def informe_impresion(request, solicitud_id):
solicitud = Solicitud.objects.get(pk=solicitud_id, estado__nombre__in=[EstadoSolicitud.CONTROLADO, EstadoSolicitud.RETENIDO, EstadoSolicitud.EVALUADO])
try:
informe = solicitud.informe.get()
except InformeSolicitud.DoesNotExist:
informe = solicitud.generar_informe()
return my_render(request, 'validez_nacional/solicitud/informe_impresion.html', {
'solicitud': solicitud,
'informe': informe,
})
|
{
"content_hash": "6305660c373ee7b4f4b0db59ed648c96",
"timestamp": "",
"source": "github",
"line_count": 658,
"max_line_length": 163,
"avg_line_length": 39.61854103343465,
"alnum_prop": 0.6675361540527063,
"repo_name": "MERegistro/meregistro",
"id": "8b2b2b515c119c478b854be4367ee6bc2ae7bc4b",
"size": "26125",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "meregistro/apps/validez_nacional/views/solicitud.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "79500"
},
{
"name": "HTML",
"bytes": "782188"
},
{
"name": "JavaScript",
"bytes": "106755"
},
{
"name": "PLpgSQL",
"bytes": "515442"
},
{
"name": "Python",
"bytes": "7190737"
},
{
"name": "Shell",
"bytes": "804"
}
],
"symlink_target": ""
}
|
from msrest.serialization import Model
class EventContent(Model):
"""The content of the event request message.
:param id: The event ID.
:type id: str
:param timestamp: The time at which the event occurred.
:type timestamp: datetime
:param action: The action that encompasses the provided event.
:type action: str
:param target: The target of the event.
:type target: :class:`Target
<azure.mgmt.containerregistry.v2017_06_01_preview.models.Target>`
:param request: The request that generated the event.
:type request: :class:`Request
<azure.mgmt.containerregistry.v2017_06_01_preview.models.Request>`
:param actor: The agent that initiated the event. For most situations,
this could be from the authorization context of the request.
:type actor: :class:`Actor
<azure.mgmt.containerregistry.v2017_06_01_preview.models.Actor>`
:param source: The registry node that generated the event. Put
differently, while the actor initiates the event, the source generates it.
:type source: :class:`Source
<azure.mgmt.containerregistry.v2017_06_01_preview.models.Source>`
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'timestamp': {'key': 'timestamp', 'type': 'iso-8601'},
'action': {'key': 'action', 'type': 'str'},
'target': {'key': 'target', 'type': 'Target'},
'request': {'key': 'request', 'type': 'Request'},
'actor': {'key': 'actor', 'type': 'Actor'},
'source': {'key': 'source', 'type': 'Source'},
}
def __init__(self, id=None, timestamp=None, action=None, target=None, request=None, actor=None, source=None):
self.id = id
self.timestamp = timestamp
self.action = action
self.target = target
self.request = request
self.actor = actor
self.source = source
|
{
"content_hash": "8162ab806cda4c5986c35f158fe378bb",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 113,
"avg_line_length": 40.93478260869565,
"alnum_prop": 0.6399362719065321,
"repo_name": "SUSE/azure-sdk-for-python",
"id": "46a3ce6bc5dbf2bab32063b9c5c4df25b77c134f",
"size": "2357",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2017_06_01_preview/models/event_content.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9090161"
}
],
"symlink_target": ""
}
|
from __future__ import (absolute_import, unicode_literals)
import concurrent.futures
import imp
import importlib
import logging
import os
import threading
from datetime import datetime
from fval.external.six import iteritems, text_type
from fval.utils import get_relative_path
class Counter(object):
def __init__(self, start=0):
self.lock = threading.Lock()
self.value = start
def increment(self):
logging.debug('Waiting for lock')
self.lock.acquire()
try:
logging.debug('Acquired lock')
self.value += 1
finally:
self.lock.release()
class OutputWriter(object):
def __init__(self, output=None, output_path=None):
self.lock = threading.Lock()
self.output = output
self.output_path = output_path
def write(self):
self.lock.acquire()
try:
if self.output:
with open(self.output_path, mode='a') as output_file:
output_file.writelines(text_type(self.output) + os.linesep)
finally:
self.lock.release()
def _check_worker(config=None, unit_path=None, check_name=None,
check_args=None, unit_content=None, error_counter=None,
output_writer=None):
logger = config['logger']
logger.debug('Checking Unit: {0}, Name: {1}, Args: {2}'.format(unit_path, check_name, check_args))
check_module = None
# Try loading check module from cwd/library/<check_name> first
try:
check_module = imp.load_source(check_name,
'{0}{1}library{2}{3}.py'.format(os.getcwd(), os.sep, os.sep, check_name))
except ImportError:
logger.debug('Could not import user supplied module.')
except IOError:
logger.debug('Could not load: {0}{1}library{2}{3}.py'.format(os.getcwd(), os.sep, os.sep, check_name))
# If check module not found locally, then try and load a built-in
if not check_module:
try:
check_module = importlib.import_module(
'fval.checks.{0}'.format(check_name), 'fval.checks')
except ImportError:
logger.error('{0}: {1}'.format(get_relative_path(unit_path),
'CHECK MODULE: \'{0}\' NOT FOUND'.format(check_name)))
output_written = False
if check_module:
execution_args = dict(unit_path=unit_path,
unit_content=unit_content,
check_args=check_args,
config=config)
try:
result = check_module.run(**execution_args)
if result['level'] != 'INFO':
error_counter.increment()
execute_output = result.get('output')
if execute_output and output_writer:
# Replace returned \n with actual line separators
execute_output = text_type(
execute_output).replace('\\n', os.linesep)
if os.linesep not in text_type(execute_output):
output_writer.output = execute_output
else:
output_writer.output = text_type(execute_output)
output_writer.write()
output_written = True
if not config.get('silent'): # pragma: no cover
if result['level'] == 'DEBUG':
logger.debug(
'{0}: {1}'.format(get_relative_path(unit_path), result['message']))
if result['level'] == 'INFO':
logger.info(
'{0}: {1}'.format(get_relative_path(unit_path), result['message']))
elif result['level'] == 'WARN':
logger.warn(
'{0}: {1}'.format(get_relative_path(unit_path), result['message']))
elif result['level'] == 'ERROR':
logger.error(
'{0}: {1}'.format(get_relative_path(unit_path), result['message']))
except AttributeError:
logger.error('{0}: {1}'.format(
get_relative_path(unit_path),
'CHECK MODULE: \'{0}\' HAS FAILED - RUN WITH --debug FOR DETAILS'.format(check_name)))
# TODO: Output trace with debug loglevel
return dict(output_written=output_written)
def execute_plan(plan=None, config=None):
""" Execute the plan of checks
Args:
plan: The list of checks to perform
config: The configuration that applies to the checks being run
Returns:
int: The number of unsuccessful executions
"""
logger = config['logger']
error_counter = Counter()
output_writer = None
output_path = None
if config.get('output'):
output_filename = 'fval_{0}'.format(
datetime.now().strftime('%Y%m%d%H%M%S%f')[:-3])
output_path = config.get('output') + os.sep + output_filename
output_writer = OutputWriter(output_path=output_path)
# If mime_type and windows then strip them out and provide not currently supported message
if config['platform'].startswith('win'):
logger.info('mime_type checks currently disabled due to Windows compatibility')
# Loop through each step of the plan
output_written = False
for item in plan:
unit_path, unit_checks = item.get('unit_path'), item.get('unit_checks')
with open(unit_path, mode='rb') as unit_file:
unit_content = unit_file.read()
# TODO: Determine optimum max_worker count
# Add check to thread pool for execution
with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:
executor_list = list()
try:
for check_name, check_args in iteritems(unit_checks):
thread_args = dict(config=config,
unit_path=unit_path,
unit_content=unit_content,
check_name=check_name,
check_args=check_args,
output_writer=output_writer,
error_counter=error_counter)
if not (config['platform'].startswith('win') and check_name == 'mime_type'):
executor_list.append(executor.submit(_check_worker,
**thread_args))
for future in executor_list:
result = future.result()
if result['output_written']:
output_written = True
except:
raise
if output_written:
logger.info('Output written to: {0}'.format(output_path))
return error_counter.value
|
{
"content_hash": "20be0923fcaa0697e2841311e7c7f3e0",
"timestamp": "",
"source": "github",
"line_count": 169,
"max_line_length": 112,
"avg_line_length": 40.65680473372781,
"alnum_prop": 0.5447533110173192,
"repo_name": "jonhadfield/fval",
"id": "d959edcd0c598a19a30ac432f354b09e3b3bb386",
"size": "6895",
"binary": false,
"copies": "1",
"ref": "refs/heads/devel",
"path": "fval/execute.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "63194"
}
],
"symlink_target": ""
}
|
from datetime import datetime
import sys
import subprocess
from libs.common.Variable_Manager import *
from libs.utility.BDD import *
from libs.common import State_Manager as SM
from libs.utility import LogMod
from libs.utility import DateMod
def Check_Partner(backupHost):
#DEBUG
now = datetime.now()
cursor.execute("""
SELECT ip As ''
FROM partner
WHERE host = "%s"
""",
(backupHost))
rows = cursor.fetchall()
results = len(rows)
if results > 0:
#DEBUG
for row in rows:
IP = row[0]
else:
IP = ""
if not IP:
#DEBUG
IP_CMD = "nslookup " + backupHost + " | tail -2 | head -n 1 | cut -d' ' -f2"
IP = str(subprocess.check_output(IP_CMD, stdin=subprocess.PIPE, shell=True))
IP = IP[:-1]
#DEBUG
Res_CMD = "traceroute " + backupHost + " 22 | grep " + IP + " | sed '1d'"
Res = subprocess.check_output(Res_CMD, stdin=subprocess.PIPE, shell=True)
Res = Res[:-1]
if not Res:
#DEBUG
Res_CMD = "traceroute " + backupHost + " 22 -I | grep " + IP + " | sed '1d'"
Res = subprocess.check_output(Res_CMD, stdin=subprocess.PIPE, shell=True)
Res = Res[:-1]
#DEBUG
if not Res:
LogMod.addWarning("L'hôte " + backupHost + " est injoignable")
cursor.execute("""
SELECT count(*)
FROM partner
""")
rows = cursor.fetchall()
results = len(rows)
if results > 0:
#DEBUG
for row in rows:
nbHosts = row[0]
if nbHosts > 1:
#DEBUG
LogMod.addWarning("Transfert annulé")
else:
LogMod.addError("Transfert annulé")
#SM.set_State(4)
return False
else:
#DEBUG
LogMod.addInfo("L'hôte " + backupHost + " est joignable")
return True
|
{
"content_hash": "311b55c806c2b6deab964439c0d1d283",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 78,
"avg_line_length": 20.883116883116884,
"alnum_prop": 0.6442786069651741,
"repo_name": "RootKitDev/Athena",
"id": "4447c8ab1ff8d48ec3ac0546e891bb33d62bef34",
"size": "1894",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Core/libs/common/Partner_Manager.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "31461"
},
{
"name": "Shell",
"bytes": "21766"
}
],
"symlink_target": ""
}
|
from azure.identity import DefaultAzureCredential
from azure.mgmt.logic import LogicManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-logic
# USAGE
python get_schemas_by_integration_account_name.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = LogicManagementClient(
credential=DefaultAzureCredential(),
subscription_id="<subscriptionId>",
)
response = client.integration_account_schemas.list(
resource_group_name="testResourceGroup",
integration_account_name="<integrationAccountName>",
)
for item in response:
print(item)
# x-ms-original-file: specification/logic/resource-manager/Microsoft.Logic/stable/2019-05-01/examples/IntegrationAccountSchemas_List.json
if __name__ == "__main__":
main()
|
{
"content_hash": "13659700157ed3dcb76a69b8d54acb33",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 137,
"avg_line_length": 34,
"alnum_prop": 0.7326989619377162,
"repo_name": "Azure/azure-sdk-for-python",
"id": "c23b8f2dea6d547ec77f08a41906d629a21d95fd",
"size": "1624",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/logic/azure-mgmt-logic/generated_samples/get_schemas_by_integration_account_name.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
import numpy as np
import cv2
import matplotlib.pyplot as plt
from PyQt4.QtGui import *
from PyQt4.QtCore import *
import sys
from ivf.batch.batch import DatasetBatch
from ivf.io_util.image import loadNormal
from ivf.cv.image import to32F, luminance, setAlpha
from ivf.np.norm import normalizeVector
from ivf.core.shader.toon import ToonShader
from ivf.core.sfs.depth_from_gradient import depthFromGradient
from ivf.core.sfs.depth_to_normal import depthToNormal
from ivf.plot.window import SubplotGrid, showMaximize
from ivf.cv.normal import normalToColor
from ivf.core.shader.lambert import LambertShader
from ivf.ui.model_view import ModelView
class DepthFromGradientBatch(DatasetBatch):
def __init__(self, view, name="Depth From Gradient", dataset_name="3dmodel"):
super(DepthFromGradientBatch, self).__init__(name, dataset_name)
self._view = view
def _runImp(self):
normal_data = loadNormal(self._data_file)
if normal_data is None:
return
N0_32F, A_8U = normal_data
A_32F = to32F(A_8U)
L = normalizeVector(np.array([-0.2, 0.3, 0.7]))
#C0_32F = ToonShader().diffuseShading(L, N0_32F)
C0_32F = LambertShader().diffuseShading(L, N0_32F)
I0_32F = luminance(C0_32F)
I0_low_32F = cv2.resize(I0_32F, (256, 256))
A_low_8U = cv2.resize(A_8U, I0_low_32F.shape)
D_32F = depthFromGradient(I0_low_32F, A_low_8U)
D_32F = cv2.resize(D_32F, I0_32F.shape)
N_32F = depthToNormal(D_32F)
self._view.setRGBAD(setAlpha(C0_32F, A_32F), D_32F)
# fig, axes = plt.subplots(figsize=(11, 5))
# font_size = 15
# fig.subplots_adjust(left=0.05, right=0.95, top=0.9, hspace=0.12, wspace=0.05)
# fig.suptitle(self.name(), fontsize=font_size)
#
# num_rows = 1
# num_cols = 4
# plot_grid = SubplotGrid(num_rows, num_cols)
#
# plot_grid.showImage(setAlpha(C0_32F, A_32F), r'Input Shading: $C$')
# plot_grid.showImage(normalToColor(N0_32F, A_8U), r'Ground Truth Normal: $N_g$')
# plot_grid.showImage(D_32F, r'Estimated Depth: $D$')
# plot_grid.showImage(normalToColor(N_32F, A_8U), r'Estimated Normal: $N$')
#
# showMaximize()
if __name__ == '__main__':
app = QApplication(sys.argv)
view = ModelView()
batch = DepthFromGradientBatch(view)
view.setReturnCallback(batch.runNext)
view.show()
sys.exit(app.exec_())
|
{
"content_hash": "85e7925c59270249fef59b85cf1ff243",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 89,
"avg_line_length": 32.81333333333333,
"alnum_prop": 0.6542056074766355,
"repo_name": "tody411/ImageViewerFramework",
"id": "93e2adddc152b4f9d4d26e9359cacd48817ccac4",
"size": "2628",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ivf/batch/depth_from_gradient.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "GLSL",
"bytes": "8089"
},
{
"name": "Python",
"bytes": "337507"
}
],
"symlink_target": ""
}
|
import logging as lg
class Paper(object):
"""
Wrapper base class for a paper.
"""
def __init__(self, bd):
"""
Constructor.
@param [in] bd BibTeX entry dictionary.
"""
## The ID.
self.__id = bd['ID']
## The title.
self.__title = None
#
if 'title' in bd.keys():
self.__title = bd['title']
## The year of publication.
self.__year = None
#
if 'year' in bd.keys():
self.__year = bd['year']
## The Digital Object Identifier (DOI).
self.__doi = None
## The URL.
self.__url = None
if bd['ENTRYTYPE'] == "book":
lg.info(" * %s is a book, skipping." % (self.get_id()))
lg.info(" *")
return None
if 'doi' in bd.keys():
self.__doi = bd['doi']
if 'link' in bd.keys():
self.__url = bd['link']
## Custom annotation - used in summary tables etc.
self.__annotation = None
#
if 'annote' in bd.keys():
self.__annotation = bd['annote']
lg.info(" * Paper ID : '%s'" % (self.get_id()))
lg.info(bd)
if self.has_title(): lg.info(" * Title : '%s'" % (self.get_title()))
else: lg.info(" * '%s' HAS NO TITLE!" % (self.get_id()))
if self.has_year(): lg.info(" * Year : '%s'" % (self.get_year()))
else:
lg.info(" * '%s' HAS NO YEAR!" % (self.get_id()))
#raise IOError("* %s has no year!" % (self.get_id()))
if self.has_doi(): lg.info(" * DOI : '%s'" % (self.get_doi()))
else:
lg.info(" * '%s' HAS NO DOI!" % (self.get_id()))
#raise IOError("* %s has no DOI!" % (self.get_id()))
lg.info(" *")
def __lt__(self, other):
return self.get_year() < other.get_year()
def get_id(self):
return self.__id
def has_title(self):
return self.__title != None
def get_title(self):
return self.__title
def has_year(self):
return self.__year is not None
def get_year(self):
return self.__year
def has_doi(self):
return self.__doi is not None
def get_doi(self):
return self.__doi
def has_url(self):
return self.__url is not None
def get_url(self):
return self.__url
def has_annotation(self):
return self.__annotation is not None
def get_annotation(self):
return self.__annotation
|
{
"content_hash": "e8e2ca725e3880061b320932e9919c3d",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 80,
"avg_line_length": 27.793478260869566,
"alnum_prop": 0.4704732107938991,
"repo_name": "CERNatschool/public-doc-index",
"id": "f6f1bdf4937d08a72131b15bf57477af21e198bf",
"size": "2625",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "wrappers/paper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "47818"
},
{
"name": "Shell",
"bytes": "1070"
},
{
"name": "TeX",
"bytes": "117865"
}
],
"symlink_target": ""
}
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'UserStatus'
db.create_table(u'profiles_userstatus', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(related_name='status', to=orm['auth.User'])),
('start_date', self.gf('django.db.models.fields.DateField')(null=True, blank=True)),
('end_date', self.gf('django.db.models.fields.DateField')(null=True, blank=True)),
('replacement_rep', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='replaced_rep', null=True, to=orm['auth.User'])),
('created_on', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
))
db.send_create_signal(u'profiles', ['UserStatus'])
# Adding field 'UserProfile.is_unavailable'
db.add_column(u'profiles_userprofile', 'is_unavailable',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Deleting model 'UserStatus'
db.delete_table(u'profiles_userstatus')
# Deleting field 'UserProfile.is_unavailable'
db.delete_column(u'profiles_userprofile', 'is_unavailable')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'profiles.functionalarea': {
'Meta': {'ordering': "['name']", 'object_name': 'FunctionalArea'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '100', 'blank': 'True'})
},
u'profiles.useravatar': {
'Meta': {'object_name': 'UserAvatar'},
'avatar_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '400'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_update': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 4, 29, 0, 0)', 'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'})
},
u'profiles.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'added_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'users_added'", 'null': 'True', 'to': u"orm['auth.User']"}),
'bio': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'birth_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50'}),
'country': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50'}),
'current_streak_start': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_joined_program': ('django.db.models.fields.DateField', [], {'blank': 'True'}),
'diaspora_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'display_name': ('django.db.models.fields.CharField', [], {'default': "''", 'unique': 'True', 'max_length': '50', 'blank': 'True'}),
'facebook_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'functional_areas': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'users_matching'", 'symmetrical': 'False', 'to': u"orm['profiles.FunctionalArea']"}),
'gender': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'irc_channels': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'irc_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50'}),
'is_unavailable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'jabber_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50', 'blank': 'True'}),
'last_report_notification': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'lat': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'linkedin_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'local_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'lon': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'longest_streak_end': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'longest_streak_start': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'mentor': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'mentees'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'mozillian_username': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '40', 'blank': 'True'}),
'mozillians_profile_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'personal_blog_feed': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'personal_website_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'private_email': ('django.db.models.fields.EmailField', [], {'default': "''", 'max_length': '75', 'null': 'True'}),
'receive_email_on_add_comment': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'receive_email_on_add_event_comment': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'region': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50'}),
'registration_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'tracked_functional_areas': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'users_tracking'", 'symmetrical': 'False', 'to': u"orm['profiles.FunctionalArea']"}),
'twitter_account': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '16', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'}),
'wiki_profile_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200'})
},
u'profiles.userstatus': {
'Meta': {'ordering': "['-start_date', '-created_on']", 'object_name': 'UserStatus'},
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'replacement_rep': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'replaced_rep'", 'null': 'True', 'to': u"orm['auth.User']"}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'status'", 'to': u"orm['auth.User']"})
}
}
complete_apps = ['profiles']
|
{
"content_hash": "bfd7d7e9813f67a3182736125ca09141",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 199,
"avg_line_length": 81.81159420289855,
"alnum_prop": 0.5601417183348095,
"repo_name": "chirilo/remo",
"id": "bf4c9954c41178669c42fa1470be2180d7d27f83",
"size": "11314",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "remo/profiles/migrations/0042_auto__add_userstatus__add_field_userprofile_status.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "993"
},
{
"name": "Batchfile",
"bytes": "4531"
},
{
"name": "CSS",
"bytes": "372453"
},
{
"name": "HTML",
"bytes": "373393"
},
{
"name": "JavaScript",
"bytes": "606447"
},
{
"name": "Makefile",
"bytes": "4630"
},
{
"name": "Puppet",
"bytes": "7140"
},
{
"name": "Python",
"bytes": "7483058"
},
{
"name": "Shell",
"bytes": "3221"
},
{
"name": "Smarty",
"bytes": "215"
},
{
"name": "TeX",
"bytes": "1525"
}
],
"symlink_target": ""
}
|
from kapal.algo import *
from kapal.world import *
from kapal.state import *
import kapal.tools
import time
# TODO: walk through example with comments
# TODO: post this example on wiki as a tutorial
n = 50 # width/height of world
c = kapal.tools.rand_cost_map(n, n, min_val=1, max_val=3, flip=True)
w = World2d(c, state_type = State2dAStar)
astar = AStar(w, w.state(0,0), w.state(n-1, n-1))
start_time = time.time()
path = astar.plan()
total_time = time.time() - start_time
print total_time, "seconds."
# TODO: finish the example. show the output in a human-readable format.
# perhaps possible interface with Seaship.
|
{
"content_hash": "6e4b025e6e6f31b20abbfd72ca4fb8a2",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 71,
"avg_line_length": 28.818181818181817,
"alnum_prop": 0.7066246056782335,
"repo_name": "elben/kapal",
"id": "baa9c24c709efe50a8f04eb22a11cb123883b70b",
"size": "634",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/simple01.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25150"
}
],
"symlink_target": ""
}
|
import io
import json
import numpy as np
import pytest
import pyarrow as pa
from pyarrow.fs import LocalFileSystem, SubTreeFileSystem
from pyarrow.tests.parquet.common import (
parametrize_legacy_dataset, parametrize_legacy_dataset_not_supported)
from pyarrow.util import guid
try:
import pyarrow.parquet as pq
from pyarrow.tests.parquet.common import (_read_table, _test_dataframe,
_write_table)
except ImportError:
pq = None
try:
import pandas as pd
import pandas.testing as tm
from pyarrow.tests.parquet.common import (_roundtrip_pandas_dataframe,
alltypes_sample)
except ImportError:
pd = tm = None
# Marks all of the tests in this module
# Ignore these with pytest ... -m 'not parquet'
pytestmark = pytest.mark.parquet
@pytest.mark.pandas
def test_pandas_parquet_custom_metadata(tempdir):
df = alltypes_sample(size=10000)
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df)
assert b'pandas' in arrow_table.schema.metadata
_write_table(arrow_table, filename, version='2.6', coerce_timestamps='ms')
metadata = pq.read_metadata(filename).metadata
assert b'pandas' in metadata
js = json.loads(metadata[b'pandas'].decode('utf8'))
assert js['index_columns'] == [{'kind': 'range',
'name': None,
'start': 0, 'stop': 10000,
'step': 1}]
@pytest.mark.pandas
def test_merging_parquet_tables_with_different_pandas_metadata(tempdir):
# ARROW-3728: Merging Parquet Files - Pandas Meta in Schema Mismatch
schema = pa.schema([
pa.field('int', pa.int16()),
pa.field('float', pa.float32()),
pa.field('string', pa.string())
])
df1 = pd.DataFrame({
'int': np.arange(3, dtype=np.uint8),
'float': np.arange(3, dtype=np.float32),
'string': ['ABBA', 'EDDA', 'ACDC']
})
df2 = pd.DataFrame({
'int': [4, 5],
'float': [1.1, None],
'string': [None, None]
})
table1 = pa.Table.from_pandas(df1, schema=schema, preserve_index=False)
table2 = pa.Table.from_pandas(df2, schema=schema, preserve_index=False)
assert not table1.schema.equals(table2.schema, check_metadata=True)
assert table1.schema.equals(table2.schema)
writer = pq.ParquetWriter(tempdir / 'merged.parquet', schema=schema)
writer.write_table(table1)
writer.write_table(table2)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_pandas_parquet_column_multiindex(tempdir, use_legacy_dataset):
df = alltypes_sample(size=10)
df.columns = pd.MultiIndex.from_tuples(
list(zip(df.columns, df.columns[::-1])),
names=['level_1', 'level_2']
)
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df)
assert arrow_table.schema.pandas_metadata is not None
_write_table(arrow_table, filename, version='2.6', coerce_timestamps='ms')
table_read = pq.read_pandas(
filename, use_legacy_dataset=use_legacy_dataset)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_pandas_parquet_2_0_roundtrip_read_pandas_no_index_written(
tempdir, use_legacy_dataset
):
df = alltypes_sample(size=10000)
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df, preserve_index=False)
js = arrow_table.schema.pandas_metadata
assert not js['index_columns']
# ARROW-2170
# While index_columns should be empty, columns needs to be filled still.
assert js['columns']
_write_table(arrow_table, filename, version='2.6', coerce_timestamps='ms')
table_read = pq.read_pandas(
filename, use_legacy_dataset=use_legacy_dataset)
js = table_read.schema.pandas_metadata
assert not js['index_columns']
read_metadata = table_read.schema.metadata
assert arrow_table.schema.metadata == read_metadata
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
# TODO(dataset) duplicate column selection actually gives duplicate columns now
@pytest.mark.pandas
@parametrize_legacy_dataset_not_supported
def test_pandas_column_selection(tempdir, use_legacy_dataset):
size = 10000
np.random.seed(0)
df = pd.DataFrame({
'uint8': np.arange(size, dtype=np.uint8),
'uint16': np.arange(size, dtype=np.uint16)
})
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df)
_write_table(arrow_table, filename)
table_read = _read_table(
filename, columns=['uint8'], use_legacy_dataset=use_legacy_dataset)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df[['uint8']], df_read)
# ARROW-4267: Selection of duplicate columns still leads to these columns
# being read uniquely.
table_read = _read_table(
filename, columns=['uint8', 'uint8'],
use_legacy_dataset=use_legacy_dataset)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df[['uint8']], df_read)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_pandas_parquet_native_file_roundtrip(tempdir, use_legacy_dataset):
df = _test_dataframe(10000)
arrow_table = pa.Table.from_pandas(df)
imos = pa.BufferOutputStream()
_write_table(arrow_table, imos, version='2.6')
buf = imos.getvalue()
reader = pa.BufferReader(buf)
df_read = _read_table(
reader, use_legacy_dataset=use_legacy_dataset).to_pandas()
tm.assert_frame_equal(df, df_read)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_read_pandas_column_subset(tempdir, use_legacy_dataset):
df = _test_dataframe(10000)
arrow_table = pa.Table.from_pandas(df)
imos = pa.BufferOutputStream()
_write_table(arrow_table, imos, version='2.6')
buf = imos.getvalue()
reader = pa.BufferReader(buf)
df_read = pq.read_pandas(
reader, columns=['strings', 'uint8'],
use_legacy_dataset=use_legacy_dataset
).to_pandas()
tm.assert_frame_equal(df[['strings', 'uint8']], df_read)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_pandas_parquet_empty_roundtrip(tempdir, use_legacy_dataset):
df = _test_dataframe(0)
arrow_table = pa.Table.from_pandas(df)
imos = pa.BufferOutputStream()
_write_table(arrow_table, imos, version='2.6')
buf = imos.getvalue()
reader = pa.BufferReader(buf)
df_read = _read_table(
reader, use_legacy_dataset=use_legacy_dataset).to_pandas()
tm.assert_frame_equal(df, df_read)
@pytest.mark.pandas
def test_pandas_can_write_nested_data(tempdir):
data = {
"agg_col": [
{"page_type": 1},
{"record_type": 1},
{"non_consecutive_home": 0},
],
"uid_first": "1001"
}
df = pd.DataFrame(data=data)
arrow_table = pa.Table.from_pandas(df)
imos = pa.BufferOutputStream()
# This succeeds under V2
_write_table(arrow_table, imos)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_pandas_parquet_pyfile_roundtrip(tempdir, use_legacy_dataset):
filename = tempdir / 'pandas_pyfile_roundtrip.parquet'
size = 5
df = pd.DataFrame({
'int64': np.arange(size, dtype=np.int64),
'float32': np.arange(size, dtype=np.float32),
'float64': np.arange(size, dtype=np.float64),
'bool': np.random.randn(size) > 0,
'strings': ['foo', 'bar', None, 'baz', 'qux']
})
arrow_table = pa.Table.from_pandas(df)
with filename.open('wb') as f:
_write_table(arrow_table, f, version="2.4")
data = io.BytesIO(filename.read_bytes())
table_read = _read_table(data, use_legacy_dataset=use_legacy_dataset)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_pandas_parquet_configuration_options(tempdir, use_legacy_dataset):
size = 10000
np.random.seed(0)
df = pd.DataFrame({
'uint8': np.arange(size, dtype=np.uint8),
'uint16': np.arange(size, dtype=np.uint16),
'uint32': np.arange(size, dtype=np.uint32),
'uint64': np.arange(size, dtype=np.uint64),
'int8': np.arange(size, dtype=np.int16),
'int16': np.arange(size, dtype=np.int16),
'int32': np.arange(size, dtype=np.int32),
'int64': np.arange(size, dtype=np.int64),
'float32': np.arange(size, dtype=np.float32),
'float64': np.arange(size, dtype=np.float64),
'bool': np.random.randn(size) > 0
})
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df)
for use_dictionary in [True, False]:
_write_table(arrow_table, filename, version='2.6',
use_dictionary=use_dictionary)
table_read = _read_table(
filename, use_legacy_dataset=use_legacy_dataset)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
for write_statistics in [True, False]:
_write_table(arrow_table, filename, version='2.6',
write_statistics=write_statistics)
table_read = _read_table(filename,
use_legacy_dataset=use_legacy_dataset)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
for compression in ['NONE', 'SNAPPY', 'GZIP', 'LZ4', 'ZSTD']:
if (compression != 'NONE' and
not pa.lib.Codec.is_available(compression)):
continue
_write_table(arrow_table, filename, version='2.6',
compression=compression)
table_read = _read_table(
filename, use_legacy_dataset=use_legacy_dataset)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
@pytest.mark.pandas
def test_spark_flavor_preserves_pandas_metadata():
df = _test_dataframe(size=100)
df.index = np.arange(0, 10 * len(df), 10)
df.index.name = 'foo'
result = _roundtrip_pandas_dataframe(df, {'version': '2.0',
'flavor': 'spark'})
tm.assert_frame_equal(result, df)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_index_column_name_duplicate(tempdir, use_legacy_dataset):
data = {
'close': {
pd.Timestamp('2017-06-30 01:31:00'): 154.99958999999998,
pd.Timestamp('2017-06-30 01:32:00'): 154.99958999999998,
},
'time': {
pd.Timestamp('2017-06-30 01:31:00'): pd.Timestamp(
'2017-06-30 01:31:00'
),
pd.Timestamp('2017-06-30 01:32:00'): pd.Timestamp(
'2017-06-30 01:32:00'
),
}
}
path = str(tempdir / 'data.parquet')
dfx = pd.DataFrame(data).set_index('time', drop=False)
tdfx = pa.Table.from_pandas(dfx)
_write_table(tdfx, path)
arrow_table = _read_table(path, use_legacy_dataset=use_legacy_dataset)
result_df = arrow_table.to_pandas()
tm.assert_frame_equal(result_df, dfx)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_multiindex_duplicate_values(tempdir, use_legacy_dataset):
num_rows = 3
numbers = list(range(num_rows))
index = pd.MultiIndex.from_arrays(
[['foo', 'foo', 'bar'], numbers],
names=['foobar', 'some_numbers'],
)
df = pd.DataFrame({'numbers': numbers}, index=index)
table = pa.Table.from_pandas(df)
filename = tempdir / 'dup_multi_index_levels.parquet'
_write_table(table, filename)
result_table = _read_table(filename, use_legacy_dataset=use_legacy_dataset)
assert table.equals(result_table)
result_df = result_table.to_pandas()
tm.assert_frame_equal(result_df, df)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_backwards_compatible_index_naming(datadir, use_legacy_dataset):
expected_string = b"""\
carat cut color clarity depth table price x y z
0.23 Ideal E SI2 61.5 55.0 326 3.95 3.98 2.43
0.21 Premium E SI1 59.8 61.0 326 3.89 3.84 2.31
0.23 Good E VS1 56.9 65.0 327 4.05 4.07 2.31
0.29 Premium I VS2 62.4 58.0 334 4.20 4.23 2.63
0.31 Good J SI2 63.3 58.0 335 4.34 4.35 2.75
0.24 Very Good J VVS2 62.8 57.0 336 3.94 3.96 2.48
0.24 Very Good I VVS1 62.3 57.0 336 3.95 3.98 2.47
0.26 Very Good H SI1 61.9 55.0 337 4.07 4.11 2.53
0.22 Fair E VS2 65.1 61.0 337 3.87 3.78 2.49
0.23 Very Good H VS1 59.4 61.0 338 4.00 4.05 2.39"""
expected = pd.read_csv(io.BytesIO(expected_string), sep=r'\s{2,}',
index_col=None, header=0, engine='python')
table = _read_table(
datadir / 'v0.7.1.parquet', use_legacy_dataset=use_legacy_dataset)
result = table.to_pandas()
tm.assert_frame_equal(result, expected)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_backwards_compatible_index_multi_level_named(
datadir, use_legacy_dataset
):
expected_string = b"""\
carat cut color clarity depth table price x y z
0.23 Ideal E SI2 61.5 55.0 326 3.95 3.98 2.43
0.21 Premium E SI1 59.8 61.0 326 3.89 3.84 2.31
0.23 Good E VS1 56.9 65.0 327 4.05 4.07 2.31
0.29 Premium I VS2 62.4 58.0 334 4.20 4.23 2.63
0.31 Good J SI2 63.3 58.0 335 4.34 4.35 2.75
0.24 Very Good J VVS2 62.8 57.0 336 3.94 3.96 2.48
0.24 Very Good I VVS1 62.3 57.0 336 3.95 3.98 2.47
0.26 Very Good H SI1 61.9 55.0 337 4.07 4.11 2.53
0.22 Fair E VS2 65.1 61.0 337 3.87 3.78 2.49
0.23 Very Good H VS1 59.4 61.0 338 4.00 4.05 2.39"""
expected = pd.read_csv(
io.BytesIO(expected_string), sep=r'\s{2,}',
index_col=['cut', 'color', 'clarity'],
header=0, engine='python'
).sort_index()
table = _read_table(datadir / 'v0.7.1.all-named-index.parquet',
use_legacy_dataset=use_legacy_dataset)
result = table.to_pandas()
tm.assert_frame_equal(result, expected)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_backwards_compatible_index_multi_level_some_named(
datadir, use_legacy_dataset
):
expected_string = b"""\
carat cut color clarity depth table price x y z
0.23 Ideal E SI2 61.5 55.0 326 3.95 3.98 2.43
0.21 Premium E SI1 59.8 61.0 326 3.89 3.84 2.31
0.23 Good E VS1 56.9 65.0 327 4.05 4.07 2.31
0.29 Premium I VS2 62.4 58.0 334 4.20 4.23 2.63
0.31 Good J SI2 63.3 58.0 335 4.34 4.35 2.75
0.24 Very Good J VVS2 62.8 57.0 336 3.94 3.96 2.48
0.24 Very Good I VVS1 62.3 57.0 336 3.95 3.98 2.47
0.26 Very Good H SI1 61.9 55.0 337 4.07 4.11 2.53
0.22 Fair E VS2 65.1 61.0 337 3.87 3.78 2.49
0.23 Very Good H VS1 59.4 61.0 338 4.00 4.05 2.39"""
expected = pd.read_csv(
io.BytesIO(expected_string),
sep=r'\s{2,}', index_col=['cut', 'color', 'clarity'],
header=0, engine='python'
).sort_index()
expected.index = expected.index.set_names(['cut', None, 'clarity'])
table = _read_table(datadir / 'v0.7.1.some-named-index.parquet',
use_legacy_dataset=use_legacy_dataset)
result = table.to_pandas()
tm.assert_frame_equal(result, expected)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_backwards_compatible_column_metadata_handling(
datadir, use_legacy_dataset
):
expected = pd.DataFrame(
{'a': [1, 2, 3], 'b': [.1, .2, .3],
'c': pd.date_range("2017-01-01", periods=3, tz='Europe/Brussels')})
expected.index = pd.MultiIndex.from_arrays(
[['a', 'b', 'c'],
pd.date_range("2017-01-01", periods=3, tz='Europe/Brussels')],
names=['index', None])
path = datadir / 'v0.7.1.column-metadata-handling.parquet'
table = _read_table(path, use_legacy_dataset=use_legacy_dataset)
result = table.to_pandas()
tm.assert_frame_equal(result, expected)
table = _read_table(
path, columns=['a'], use_legacy_dataset=use_legacy_dataset)
result = table.to_pandas()
tm.assert_frame_equal(result, expected[['a']].reset_index(drop=True))
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_categorical_index_survives_roundtrip(use_legacy_dataset):
# ARROW-3652, addressed by ARROW-3246
df = pd.DataFrame([['a', 'b'], ['c', 'd']], columns=['c1', 'c2'])
df['c1'] = df['c1'].astype('category')
df = df.set_index(['c1'])
table = pa.Table.from_pandas(df)
bos = pa.BufferOutputStream()
pq.write_table(table, bos)
ref_df = pq.read_pandas(
bos.getvalue(), use_legacy_dataset=use_legacy_dataset).to_pandas()
assert isinstance(ref_df.index, pd.CategoricalIndex)
assert ref_df.index.equals(df.index)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_categorical_order_survives_roundtrip(use_legacy_dataset):
# ARROW-6302
df = pd.DataFrame({"a": pd.Categorical(
["a", "b", "c", "a"], categories=["b", "c", "d"], ordered=True)})
table = pa.Table.from_pandas(df)
bos = pa.BufferOutputStream()
pq.write_table(table, bos)
contents = bos.getvalue()
result = pq.read_pandas(
contents, use_legacy_dataset=use_legacy_dataset).to_pandas()
tm.assert_frame_equal(result, df)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_pandas_categorical_na_type_row_groups(use_legacy_dataset):
# ARROW-5085
df = pd.DataFrame({"col": [None] * 100, "int": [1.0] * 100})
df_category = df.astype({"col": "category", "int": "category"})
table = pa.Table.from_pandas(df)
table_cat = pa.Table.from_pandas(df_category)
buf = pa.BufferOutputStream()
# it works
pq.write_table(table_cat, buf, version='2.6', chunk_size=10)
result = pq.read_table(
buf.getvalue(), use_legacy_dataset=use_legacy_dataset)
# Result is non-categorical
assert result[0].equals(table[0])
assert result[1].equals(table[1])
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_pandas_categorical_roundtrip(use_legacy_dataset):
# ARROW-5480, this was enabled by ARROW-3246
# Have one of the categories unobserved and include a null (-1)
codes = np.array([2, 0, 0, 2, 0, -1, 2], dtype='int32')
categories = ['foo', 'bar', 'baz']
df = pd.DataFrame({'x': pd.Categorical.from_codes(
codes, categories=categories)})
buf = pa.BufferOutputStream()
pq.write_table(pa.table(df), buf)
result = pq.read_table(
buf.getvalue(), use_legacy_dataset=use_legacy_dataset).to_pandas()
assert result.x.dtype == 'category'
assert (result.x.cat.categories == categories).all()
tm.assert_frame_equal(result, df)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_write_to_dataset_pandas_preserve_extensiondtypes(
tempdir, use_legacy_dataset
):
df = pd.DataFrame({'part': 'a', "col": [1, 2, 3]})
df['col'] = df['col'].astype("Int64")
table = pa.table(df)
pq.write_to_dataset(
table, str(tempdir / "case1"), partition_cols=['part'],
use_legacy_dataset=use_legacy_dataset
)
result = pq.read_table(
str(tempdir / "case1"), use_legacy_dataset=use_legacy_dataset
).to_pandas()
tm.assert_frame_equal(result[["col"]], df[["col"]])
pq.write_to_dataset(
table, str(tempdir / "case2"), use_legacy_dataset=use_legacy_dataset
)
result = pq.read_table(
str(tempdir / "case2"), use_legacy_dataset=use_legacy_dataset
).to_pandas()
tm.assert_frame_equal(result[["col"]], df[["col"]])
pq.write_table(table, str(tempdir / "data.parquet"))
result = pq.read_table(
str(tempdir / "data.parquet"), use_legacy_dataset=use_legacy_dataset
).to_pandas()
tm.assert_frame_equal(result[["col"]], df[["col"]])
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_write_to_dataset_pandas_preserve_index(tempdir, use_legacy_dataset):
# ARROW-8251 - preserve pandas index in roundtrip
df = pd.DataFrame({'part': ['a', 'a', 'b'], "col": [1, 2, 3]})
df.index = pd.Index(['a', 'b', 'c'], name="idx")
table = pa.table(df)
df_cat = df[["col", "part"]].copy()
df_cat["part"] = df_cat["part"].astype("category")
pq.write_to_dataset(
table, str(tempdir / "case1"), partition_cols=['part'],
use_legacy_dataset=use_legacy_dataset
)
result = pq.read_table(
str(tempdir / "case1"), use_legacy_dataset=use_legacy_dataset
).to_pandas()
tm.assert_frame_equal(result, df_cat)
pq.write_to_dataset(
table, str(tempdir / "case2"), use_legacy_dataset=use_legacy_dataset
)
result = pq.read_table(
str(tempdir / "case2"), use_legacy_dataset=use_legacy_dataset
).to_pandas()
tm.assert_frame_equal(result, df)
pq.write_table(table, str(tempdir / "data.parquet"))
result = pq.read_table(
str(tempdir / "data.parquet"), use_legacy_dataset=use_legacy_dataset
).to_pandas()
tm.assert_frame_equal(result, df)
@pytest.mark.pandas
@pytest.mark.parametrize('preserve_index', [True, False, None])
def test_dataset_read_pandas_common_metadata(tempdir, preserve_index):
# ARROW-1103
nfiles = 5
size = 5
dirpath = tempdir / guid()
dirpath.mkdir()
test_data = []
frames = []
paths = []
for i in range(nfiles):
df = _test_dataframe(size, seed=i)
df.index = pd.Index(np.arange(i * size, (i + 1) * size), name='index')
path = dirpath / '{}.parquet'.format(i)
table = pa.Table.from_pandas(df, preserve_index=preserve_index)
# Obliterate metadata
table = table.replace_schema_metadata(None)
assert table.schema.metadata is None
_write_table(table, path)
test_data.append(table)
frames.append(df)
paths.append(path)
# Write _metadata common file
table_for_metadata = pa.Table.from_pandas(
df, preserve_index=preserve_index
)
pq.write_metadata(table_for_metadata.schema, dirpath / '_metadata')
dataset = pq.ParquetDataset(dirpath)
columns = ['uint8', 'strings']
result = dataset.read_pandas(columns=columns).to_pandas()
expected = pd.concat([x[columns] for x in frames])
expected.index.name = (
df.index.name if preserve_index is not False else None)
tm.assert_frame_equal(result, expected)
@pytest.mark.pandas
def test_read_pandas_passthrough_keywords(tempdir):
# ARROW-11464 - previously not all keywords were passed through (such as
# the filesystem keyword)
df = pd.DataFrame({'a': [1, 2, 3]})
filename = tempdir / 'data.parquet'
_write_table(df, filename)
result = pq.read_pandas(
'data.parquet',
filesystem=SubTreeFileSystem(str(tempdir), LocalFileSystem())
)
assert result.equals(pa.table(df))
@pytest.mark.pandas
def test_read_pandas_map_fields(tempdir):
# ARROW-10140 - table created from Pandas with mapping fields
df = pd.DataFrame({
'col1': pd.Series([
[('id', 'something'), ('value2', 'else')],
[('id', 'something2'), ('value', 'else2')],
]),
'col2': pd.Series(['foo', 'bar'])
})
filename = tempdir / 'data.parquet'
udt = pa.map_(pa.string(), pa.string())
schema = pa.schema([pa.field('col1', udt), pa.field('col2', pa.string())])
arrow_table = pa.Table.from_pandas(df, schema)
_write_table(arrow_table, filename)
result = pq.read_pandas(filename).to_pandas()
tm.assert_frame_equal(result, df)
|
{
"content_hash": "301d457baf706ccd098f6397a5e95475",
"timestamp": "",
"source": "github",
"line_count": 690,
"max_line_length": 79,
"avg_line_length": 34.98840579710145,
"alnum_prop": 0.6159390274210919,
"repo_name": "kou/arrow",
"id": "3bc204c978a00974c305a5567c79ba1a2f1a4b44",
"size": "24928",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "python/pyarrow/tests/parquet/test_pandas.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Awk",
"bytes": "3709"
},
{
"name": "Batchfile",
"bytes": "30689"
},
{
"name": "C",
"bytes": "1400442"
},
{
"name": "C#",
"bytes": "1029129"
},
{
"name": "C++",
"bytes": "24695324"
},
{
"name": "CMake",
"bytes": "711360"
},
{
"name": "Cython",
"bytes": "1554440"
},
{
"name": "Dockerfile",
"bytes": "147322"
},
{
"name": "Emacs Lisp",
"bytes": "1064"
},
{
"name": "FreeMarker",
"bytes": "2312"
},
{
"name": "Go",
"bytes": "4586449"
},
{
"name": "HTML",
"bytes": "3430"
},
{
"name": "Java",
"bytes": "7045674"
},
{
"name": "JavaScript",
"bytes": "127157"
},
{
"name": "Jinja",
"bytes": "19948"
},
{
"name": "Lua",
"bytes": "8771"
},
{
"name": "MATLAB",
"bytes": "40399"
},
{
"name": "Makefile",
"bytes": "32873"
},
{
"name": "Meson",
"bytes": "69508"
},
{
"name": "Objective-C++",
"bytes": "11472"
},
{
"name": "Perl",
"bytes": "3803"
},
{
"name": "Python",
"bytes": "3059136"
},
{
"name": "R",
"bytes": "1561613"
},
{
"name": "Ruby",
"bytes": "1615226"
},
{
"name": "Shell",
"bytes": "390773"
},
{
"name": "Thrift",
"bytes": "34246"
},
{
"name": "TypeScript",
"bytes": "1075563"
},
{
"name": "Vala",
"bytes": "24798"
}
],
"symlink_target": ""
}
|
from collections import deque
from itertools import takewhile, imap
from operator import itemgetter
from util import primes
print deque(
imap(itemgetter(1),
takewhile(lambda (i, v): i < 10001, enumerate(primes()))),
maxlen=1).pop()
|
{
"content_hash": "d100dfef460b9e2c011a52b328c6bf1e",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 67,
"avg_line_length": 22.90909090909091,
"alnum_prop": 0.7103174603174603,
"repo_name": "jcdenton/project-euler",
"id": "b88ea356f06499edfa5c41a07e4d998ab023aff1",
"size": "252",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/problem007.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9538"
}
],
"symlink_target": ""
}
|
import redis
import threading
from twisted.internet import reactor
import logging
import time
class RedisCommand(object):
def __init__(self,index,func,params,ctx,finish):
self.__index =index
self.__func = func
self.__params = params
self.__finish = finish
self.__ctx = ctx
self.__res = None
def excute(self,rediscon):
self.__res = self.__func(rediscon,*self.__params)
def finish(self,error):
self.__finish(error,self.__ctx,self.__res)
def getIndex(self):
return self.__index
class RedisConnection(object):
def __init__(self,ip='192.168.1.100',port=6379,db=0,password=None):
self.__ip = ip
self.__port = port
self.__db = db
self.__passwd = password
self.__pool = None
self.__strictredis = None
self.__exit = None
self.__thread = None
self.__event = None
self.__lock = None
self.__queue = []
def start(self):
self.__pool = redis.ConnectionPool(host=self.__ip,port=self.__port,db=self.__db,password=self.__passwd)
self.__strictredis = redis.StrictRedis(connection_pool=self.__pool)
try:
self.__strictredis.ping()
except(redis.exceptions.RedisError):
logging.exception(u"redis connect failed %s:%d",self.__ip,self.__port)
self.__strictredis = None
if self.__strictredis:
logging.info(u"redis connected %s:%d[%d]",self.__ip,self.__port,self.__db)
self.__event = threading.Event()
self.__lock = threading.Lock()
self.__thread = threading.Thread(target=self.run)
self.__thread.start()
def connect(self):
self.__pool = redis.ConnectionPool(host=self.__ip,port=self.__port,db=self.__db,password=self.__passwd)
self.__strictredis = redis.StrictRedis(connection_pool=self.__pool)
try:
self.__strictredis.ping()
except(redis.exceptions.RedisError):
logging.exception(u"redis reconnect failed! %s:%d",self.__ip, self.__port)
self.__strictredis = None
if self.__strictredis:
logging.warn(u"redis %s:%d reconnected", self.__ip, self.__port)
def stop(self):
self.__exit = True
self.__thread.join()
self.__thread = None
self.__event = None
self.__lock = None
self.__queue = []
def run(self):
while not self.__exit:
if not self.__strictredis:
self.connect()
i = 0
while i<50 and not self.__exit:
time.sleep(0.1)
i = i + 1
continue
if not self.__event.wait(timeout=0.1):
continue;
cmd = None
self.__lock.acquire()
if len(self.__queue)==0:
self.__lock.release()
self.__event.clear()
continue
cmd = self.__queue.pop(0)
self.__lock.release()
try:
cmd.excute(self.__strictredis)
except Exception as e:
if isinstance(e, redis.exceptions.ConnectionError):
self.connect()
reactor.callFromThread(cmd.finish, e)
continue
reactor.callFromThread(cmd.finish,None)
def getStrictRedis(self):
return self.__strictredis
def getPipe(self):
return self.__strictredis.pipeline()
def putCmd(self,cmd):
self.__queue.append(cmd)
self.__event.set()
class RedisConnectionPool(object):
def __init__(self, ip, port, db,password, linkcount=5):
self.__ip = ip
self.__port = port
self.__db = db
self.__passwd = password
self.__cons = []
for i in range(linkcount): # 线程池
self.__cons.append(RedisConnection(ip,port,db,password))
def start(self):
for con in self.__cons:
con.start()
def stop(self):
for con in self.__cons:
con.stop()
self.__cons = {}
def getCharset(self):
return self.__charset
def putCmd(self, cmd):
self.__cons[cmd.getIndex()].putCmd(cmd)
if __name__ == "__main__":
redispool = RedisConnectionPool(ip="192.168.1.6",
port=6379,
db=4,
password="zhanghe",
linkcount=10)
redispool.start()
def func(rediscon,ip,prot):
print (ip,prot)
rediscon.sadd(u"testset",time.time())
def finesh(error,ctx,rows):
print (ctx,error,rows)
testcmd = RedisCommand(index=1,
func=func,
params=("127.0.0.1",1000),
ctx=(100,888,999),
finish=finesh)
redispool.putCmd(testcmd)
reactor.run()
|
{
"content_hash": "8b5ea5952880a338c258078d218270b5",
"timestamp": "",
"source": "github",
"line_count": 172,
"max_line_length": 111,
"avg_line_length": 29.122093023255815,
"alnum_prop": 0.517069275304452,
"repo_name": "xiexiangwei/xGame",
"id": "497fd94aecfea5a53b2dd36999f92803502d22fc",
"size": "5063",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "common/redispool.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "124202"
}
],
"symlink_target": ""
}
|
import json
import os
import shutil
import tempfile
import time
import zipfile
import multiprocessing
import contextlib
from unittest import mock
from django import forms
from django.conf import settings
from django.core.files.storage import default_storage as storage
from django.core.files.uploadedfile import SimpleUploadedFile
from django.forms import ValidationError
from django.test.utils import override_settings
import pytest
from olympia import amo
from olympia.amo.tests import TestCase, user_factory
from olympia.amo.tests.test_helpers import get_addon_file
from olympia.applications.models import AppVersion
from olympia.files import utils
pytestmark = pytest.mark.django_db
def _touch(fname):
open(fname, 'a').close()
os.utime(fname, None)
class AppVersionsMixin:
@classmethod
def setUpTestData(cls):
cls.create_webext_default_versions()
@classmethod
def create_appversion(cls, name, version):
return AppVersion.objects.get_or_create(
application=amo.APPS[name].id, version=version
)[0]
@classmethod
def create_webext_default_versions(cls):
cls.create_appversion('firefox', '36.0') # Incompatible with webexts.
cls.create_appversion('firefox', amo.DEFAULT_WEBEXT_MIN_VERSION)
cls.create_appversion('firefox', amo.DEFAULT_WEBEXT_MAX_VERSION)
cls.create_appversion('firefox', amo.DEFAULT_WEBEXT_MIN_VERSION_NO_ID)
cls.create_appversion('android', amo.DEFAULT_WEBEXT_MIN_VERSION_ANDROID)
cls.create_appversion('android', amo.DEFAULT_WEBEXT_MAX_VERSION)
cls.create_appversion('firefox', amo.DEFAULT_STATIC_THEME_MIN_VERSION_FIREFOX)
cls.create_appversion('android', amo.DEFAULT_STATIC_THEME_MIN_VERSION_ANDROID)
cls.create_appversion('firefox', amo.DEFAULT_WEBEXT_MIN_VERSION_MV3_FIREFOX)
cls.create_appversion('android', amo.DEFAULT_WEBEXT_MIN_VERSION_MV3_ANDROID)
class TestManifestJSONExtractor(AppVersionsMixin, TestCase):
def test_parse_xpi_no_manifest(self):
fake_zip = utils.make_xpi({'dummy': 'dummy'})
with mock.patch(
'olympia.files.utils.get_file'
) as get_file_mock, self.assertRaises(utils.NoManifestFound) as exc:
get_file_mock.return_value = fake_zip
utils.parse_xpi(None)
assert isinstance(exc.exception, forms.ValidationError)
assert exc.exception.message == ('No manifest.json found')
def test_static_theme_max_size(self):
xpi_file = mock.Mock(size=settings.MAX_STATICTHEME_SIZE - 1)
manifest = utils.ManifestJSONExtractor('{"theme": {}}').parse()
# Calling to check it doesn't raise.
assert utils.check_xpi_info(manifest, xpi_file=xpi_file)
# Increase the size though and it should raise an error.
xpi_file.size = settings.MAX_STATICTHEME_SIZE + 1
with pytest.raises(forms.ValidationError) as exc:
utils.check_xpi_info(manifest, xpi_file=xpi_file)
assert exc.value.message == 'Maximum size for WebExtension themes is 7.0 MB.'
# dpuble check only static themes are limited
manifest = utils.ManifestJSONExtractor('{}').parse()
assert utils.check_xpi_info(manifest, xpi_file=xpi_file)
def parse(self, base_data):
return utils.ManifestJSONExtractor(json.dumps(base_data)).parse()
def test_guid_from_applications(self):
"""Use applications>gecko>id for the guid."""
assert (
self.parse({'applications': {'gecko': {'id': 'some-id'}}})['guid']
== 'some-id'
)
def test_guid_from_browser_specific_settings(self):
"""Use applications>gecko>id for the guid."""
assert (
self.parse({'browser_specific_settings': {'gecko': {'id': 'some-id'}}})[
'guid'
]
== 'some-id'
)
def test_non_string_guid(self):
"""Test that guid is converted to a string (or None)"""
assert (
self.parse({'browser_specific_settings': {'gecko': {'id': 12345}}})['guid']
== '12345'
)
assert (
self.parse({'browser_specific_settings': {'gecko': {'id': None}}})['guid']
is None
)
def test_name_for_guid_if_no_id(self):
"""Don't use the name for the guid if there is no id."""
assert self.parse({'name': 'addon-name'})['guid'] is None
def test_type(self):
"""manifest.json addons with no specific properties present are extensions."""
assert self.parse({})['type'] == amo.ADDON_EXTENSION
def test_name(self):
"""Use name for the name."""
assert self.parse({'name': 'addon-name'})['name'] == 'addon-name'
def test_version(self):
"""Use version for the version."""
assert self.parse({'version': '23.0.1'})['version'] == '23.0.1'
def test_homepage(self):
"""Use homepage_url for the homepage."""
expected_homepage = 'http://my-addon.org'
assert (
self.parse({'homepage_url': expected_homepage})['homepage']
== expected_homepage
)
def test_homepage_with_developer_url(self):
expected_homepage = 'http://my-addon.org'
assert (
self.parse(
{
'homepage_url': 'http://should-be-overridden',
'developer': {'url': expected_homepage},
}
)['homepage']
== expected_homepage
)
def test_homepage_with_developer_and_no_url(self):
expected_homepage = 'http://my-addon.org'
assert (
self.parse(
{
'homepage_url': expected_homepage,
'developer': {'name': 'some name'},
}
)['homepage']
== expected_homepage
)
def test_summary(self):
"""Use description for the summary."""
assert self.parse({'description': 'An addon.'})['summary'] == 'An addon.'
def test_invalid_strict_min_version(self):
data = {
'applications': {
'gecko': {
'strict_min_version': 'A',
'id': '@invalid_strict_min_version',
}
}
}
with pytest.raises(forms.ValidationError) as exc:
self.parse(data)
assert exc.value.message == 'Lowest supported "strict_min_version" is 42.0.'
def test_unknown_strict_min_version(self):
data = {
'applications': {
'gecko': {
'strict_min_version': '76.0',
'id': '@unknown_strict_min_version',
}
}
}
with pytest.raises(forms.ValidationError) as exc:
self.parse(data)
assert exc.value.message == ('Unknown "strict_min_version" 76.0 for Firefox')
def test_unknown_strict_max_version(self):
data = {
'applications': {
'gecko': {
'strict_max_version': '76.0',
'id': '@unknown_strict_max_version',
}
}
}
with pytest.raises(forms.ValidationError) as exc:
self.parse(data)
assert exc.value.message == ('Unknown "strict_max_version" 76.0 for Firefox')
def test_strict_min_version_needs_to_be_higher_than_42_if_specified(self):
"""strict_min_version needs to be higher than 42.0 if specified."""
data = {
'applications': {
'gecko': {
'strict_min_version': '36.0',
'id': '@too_old_strict_min_version',
}
}
}
with pytest.raises(forms.ValidationError) as exc:
self.parse(data)
assert exc.value.message == 'Lowest supported "strict_min_version" is 42.0.'
def test_apps_use_provided_versions(self):
"""Use the min and max versions if provided."""
firefox_min_version = self.create_appversion('firefox', '47.0')
firefox_max_version = self.create_appversion('firefox', '47.*')
data = {
'applications': {
'gecko': {
'strict_min_version': '>=47.0',
'strict_max_version': '=47.*',
'id': '@random',
}
}
}
apps = self.parse(data)['apps']
assert len(apps) == 2
app = apps[0]
assert app.appdata == amo.FIREFOX
assert app.min == firefox_min_version
assert app.max == firefox_max_version
# We have no way of specifying a different version for Android when an
# explicit version number is provided... That being said, we know that
# 47.0 is too low for Android, so we silently cap it at 48.0. That
# forces us to also change the max version for android.
app = apps[1]
assert app.appdata == amo.ANDROID
assert app.min.version == amo.DEFAULT_WEBEXT_MIN_VERSION_ANDROID
assert app.max.version == amo.DEFAULT_WEBEXT_MIN_VERSION_ANDROID
def test_strict_min_version_100(self):
firefox_min_version = self.create_appversion('firefox', '100.0')
firefox_max_version = self.create_appversion('firefox', '100.*')
android_min_version = self.create_appversion('android', '100.0')
android_max_version = self.create_appversion('android', '100.*')
data = {
'applications': {
'gecko': {
'strict_min_version': '>=100.0',
'strict_max_version': '=100.*',
'id': '@radioactive',
}
}
}
apps = self.parse(data)['apps']
assert len(apps) == 2
assert apps[0].appdata == amo.FIREFOX
assert apps[0].min == firefox_min_version
assert apps[0].max == firefox_max_version
assert apps[1].appdata == amo.ANDROID
assert apps[1].min == android_min_version
assert apps[1].max == android_max_version
def test_apps_use_default_versions_if_none_provided(self):
"""Use the default min and max versions if none provided."""
data = {'applications': {'gecko': {'id': 'some-id'}}}
apps = self.parse(data)['apps']
assert len(apps) == 2
app = apps[0]
assert app.appdata == amo.FIREFOX
assert app.min.version == amo.DEFAULT_WEBEXT_MIN_VERSION
assert app.max.version == amo.DEFAULT_WEBEXT_MAX_VERSION
app = apps[1]
assert app.appdata == amo.ANDROID
assert app.min.version == amo.DEFAULT_WEBEXT_MIN_VERSION_ANDROID
assert app.max.version == amo.DEFAULT_WEBEXT_MAX_VERSION
# But if 'browser_specific_settings' is used, it's higher min version.
data = {'browser_specific_settings': {'gecko': {'id': 'some-id'}}}
apps = self.parse(data)['apps']
assert len(apps) == 2
app = apps[0]
assert app.appdata == amo.FIREFOX
assert app.min.version == (amo.DEFAULT_WEBEXT_MIN_VERSION_BROWSER_SPECIFIC)
assert app.max.version == amo.DEFAULT_WEBEXT_MAX_VERSION
app = apps[1]
assert app.appdata == amo.ANDROID
assert app.min.version == (amo.DEFAULT_WEBEXT_MIN_VERSION_BROWSER_SPECIFIC)
assert app.max.version == amo.DEFAULT_WEBEXT_MAX_VERSION
# And if mv3 then a higher min version again
data['manifest_version'] = 3
apps = self.parse(data)['apps']
assert len(apps) == 2
app = apps[0]
assert app.appdata == amo.FIREFOX
assert app.min.version == (amo.DEFAULT_WEBEXT_MIN_VERSION_MV3_FIREFOX)
assert app.max.version == amo.DEFAULT_WEBEXT_MAX_VERSION
app = apps[1]
assert app.appdata == amo.ANDROID
assert app.min.version == (amo.DEFAULT_WEBEXT_MIN_VERSION_MV3_ANDROID)
assert app.max.version == amo.DEFAULT_WEBEXT_MAX_VERSION
def test_static_theme(self):
manifest = utils.ManifestJSONExtractor('{"theme": {}}').parse()
utils.check_xpi_info(manifest)
assert self.parse({'theme': {}})['type'] == amo.ADDON_STATICTHEME
def test_extensions_dont_have_strict_compatibility(self):
assert self.parse({})['strict_compatibility'] is False
@mock.patch('olympia.addons.models.resolve_i18n_message')
def test_mozilla_trademark_disallowed(self, resolve_message):
resolve_message.return_value = 'Notify Mozilla'
addon = amo.tests.addon_factory()
file_obj = addon.current_version.file
fixture = 'src/olympia/files/fixtures/files/notify-link-clicks-i18n.xpi'
with amo.tests.copy_file(fixture, file_obj.file_path):
with pytest.raises(forms.ValidationError) as exc:
utils.parse_xpi(file_obj.file_path)
assert dict(exc.value.messages)['en-us'].startswith(
'Add-on names cannot contain the Mozilla or'
)
@mock.patch('olympia.addons.models.resolve_i18n_message')
def test_mozilla_trademark_for_prefix_allowed(self, resolve_message):
resolve_message.return_value = 'Notify for Mozilla'
addon = amo.tests.addon_factory()
file_obj = addon.current_version.file
fixture = 'src/olympia/files/fixtures/files/notify-link-clicks-i18n.xpi'
with amo.tests.copy_file(fixture, file_obj.file_path):
utils.parse_xpi(file_obj.file_path)
def test_apps_use_default_versions_if_applications_is_omitted(self):
"""
WebExtensions are allowed to omit `applications[/gecko]` and we
previously skipped defaulting to any `AppVersion` once this is not
defined. That resulted in none of our plattforms being selectable.
See https://github.com/mozilla/addons-server/issues/2586 and
probably many others.
"""
data = {}
apps = self.parse(data)['apps']
assert len(apps) == 2
assert apps[0].appdata == amo.FIREFOX
assert apps[0].min.version == amo.DEFAULT_WEBEXT_MIN_VERSION_NO_ID
assert apps[0].max.version == amo.DEFAULT_WEBEXT_MAX_VERSION
assert apps[1].appdata == amo.ANDROID
assert apps[1].min.version == amo.DEFAULT_WEBEXT_MIN_VERSION_ANDROID
assert apps[1].max.version == amo.DEFAULT_WEBEXT_MAX_VERSION
def test_handle_utf_bom(self):
manifest = b'\xef\xbb\xbf{"manifest_version": 2, "name": "..."}'
parsed = utils.ManifestJSONExtractor(manifest).parse()
assert parsed['name'] == '...'
def test_raise_error_if_no_optional_id_support(self):
"""
We only support optional ids in Firefox 48+ and will throw an error
otherwise.
"""
data = {
'applications': {
'gecko': {
'strict_min_version': '42.0',
'strict_max_version': '49.0',
}
}
}
with pytest.raises(forms.ValidationError) as exc:
self.parse(data)['apps']
assert exc.value.message == 'Add-on ID is required for Firefox 47 and below.'
def test_comments_are_allowed(self):
json_string = """
{
// Required
"manifest_version": 2,
"name": "My Extension",
"version": "versionString",
// Recommended
"default_locale": "en",
"description": "A plain text description"
}
"""
manifest = utils.ManifestJSONExtractor(json_string).parse()
assert manifest.get('name') == 'My Extension'
def test_dont_skip_apps_because_of_strict_version_incompatibility(self):
# We shouldn't skip adding specific apps to the WebExtension
# no matter any potential incompatibility, e.g
# browser_specific_settings is only supported from Firefox 48.0
# onwards, now if the user specifies strict_min_compat as 42.0
# we shouldn't skip the app because of that. Instead we override the
# value with the known min version that started supporting that.
data = {
'browser_specific_settings': {
'gecko': {'strict_min_version': '42.0', 'id': '@random'}
}
}
apps = self.parse(data)['apps']
assert len(apps) == 2
assert apps[0].appdata == amo.FIREFOX
assert apps[0].min.version == (amo.DEFAULT_WEBEXT_MIN_VERSION_BROWSER_SPECIFIC)
assert apps[1].appdata == amo.ANDROID
assert apps[1].min.version == (amo.DEFAULT_WEBEXT_MIN_VERSION_BROWSER_SPECIFIC)
def test_devtools_page(self):
json_string = """
{
// Required
"manifest_version": 2,
"name": "My Extension",
"version": "versionString",
// Recommended
"default_locale": "en",
"description": "A plain text description",
"devtools_page": "devtools/my-page.html"
}
"""
parsed_data = utils.ManifestJSONExtractor(json_string).parse()
assert parsed_data['devtools_page'] == 'devtools/my-page.html'
def test_version_not_string(self):
"""Test parsing doesn't fail if version is not a string - that error
should be handled downstream by the linter."""
data = {'version': 42}
assert self.parse(data)['version'] == '42'
data = {'version': 42.0}
assert self.parse(data)['version'] == '42.0'
# These are even worse, but what matters is that version stays a string
# in the result.
data = {'version': {}}
assert self.parse(data)['version'] == '{}'
data = {'version': []}
assert self.parse(data)['version'] == '[]'
data = {'version': None}
assert self.parse(data)['version'] == 'None'
def test_install_origins(self):
self.parse({})['install_origins'] == []
self.parse({'install_origins': ['https://fôo.com']})['install_origins'] == [
'https://fôo.com'
]
self.parse({'install_origins': ['https://bâr.net', 'https://alice.org']})[
'install_origins'
] == ['https://bâr.net', 'https://alice.org']
def test_install_origins_wrong_type_ignored(self):
self.parse({'install_origins': 42})['install_origins'] == []
self.parse({'install_origins': None})['install_origins'] == []
self.parse({'install_origins': {}})['install_origins'] == []
def test_install_origins_wrong_type_inside_list_ignored(self):
self.parse({'install_origins': [42]})['install_origins'] == []
self.parse({'install_origins': [None]})['install_origins'] == []
self.parse({'install_origins': [{}]})['install_origins'] == []
self.parse({'install_origins': [['https://inception.com']]})[
'install_origins'
] == []
self.parse({'install_origins': [42, 'https://goo.com']})['install_origins'] == [
'https://goo.com'
]
# 'flop' is not a valid origin, but the linter is responsible for that
# validation. We just care about it being a string so that we don't
# raise a TypeError later in the process.
self.parse({'install_origins': [42, 'flop']})['install_origins'] == ['flop']
class TestLanguagePackAndDictionaries(AppVersionsMixin, TestCase):
def test_parse_langpack(self):
self.create_appversion('firefox', '60.0')
self.create_appversion('firefox', '60.*')
self.create_appversion('android', '60.0')
self.create_appversion('android', '60.*')
data = {
'applications': {
'gecko': {
'strict_min_version': '>=60.0',
'strict_max_version': '=60.*',
'id': '@langp',
}
},
'langpack_id': 'foo',
}
parsed_data = utils.ManifestJSONExtractor(json.dumps(data)).parse()
assert parsed_data['type'] == amo.ADDON_LPAPP
assert parsed_data['strict_compatibility'] is True
apps = parsed_data['apps']
assert len(apps) == 1 # Langpacks are not compatible with android.
assert apps[0].appdata == amo.FIREFOX
assert apps[0].min.version == '60.0'
assert apps[0].max.version == '60.*'
def test_parse_langpack_not_targeting_versions_explicitly(self):
data = {'applications': {'gecko': {'id': '@langp'}}, 'langpack_id': 'foo'}
parsed_data = utils.ManifestJSONExtractor(json.dumps(data)).parse()
assert parsed_data['type'] == amo.ADDON_LPAPP
assert parsed_data['strict_compatibility'] is True
apps = parsed_data['apps']
assert len(apps) == 1 # Langpacks are not compatible with android.
assert apps[0].appdata == amo.FIREFOX
assert apps[0].min.version == '42.0'
# The linter should force the langpack to have a strict_max_version,
# so the value here doesn't matter much.
assert apps[0].max.version == '*'
def test_parse_dictionary(self):
self.create_appversion('firefox', '61.0')
data = {
'applications': {'gecko': {'id': '@dict'}},
'dictionaries': {'en-US': '/path/to/en-US.dic'},
}
parsed_data = utils.ManifestJSONExtractor(json.dumps(data)).parse()
assert parsed_data['type'] == amo.ADDON_DICT
assert parsed_data['strict_compatibility'] is False
assert parsed_data['target_locale'] == 'en-US'
apps = parsed_data['apps']
assert len(apps) == 1 # Dictionaries are not compatible with android.
assert apps[0].appdata == amo.FIREFOX
assert apps[0].min.version == '61.0'
assert apps[0].max.version == '*'
def test_parse_broken_dictionary(self):
data = {'dictionaries': {}}
with self.assertRaises(forms.ValidationError):
utils.ManifestJSONExtractor(json.dumps(data)).parse()
def test_check_xpi_info_langpack_submission_restrictions(self):
user = user_factory()
self.create_appversion('firefox', '60.0')
self.create_appversion('firefox', '60.*')
data = {
'applications': {
'gecko': {
'strict_min_version': '>=60.0',
'strict_max_version': '=60.*',
'id': '@langp',
}
},
'langpack_id': 'foo',
}
parsed_data = utils.ManifestJSONExtractor(json.dumps(data)).parse()
with self.assertRaises(ValidationError):
# Regular users aren't allowed to submit langpacks.
utils.check_xpi_info(parsed_data, xpi_file=mock.Mock(), user=user)
# Shouldn't raise for users with proper permissions
self.grant_permission(user, ':'.join(amo.permissions.LANGPACK_SUBMIT))
utils.check_xpi_info(parsed_data, xpi_file=mock.Mock(), user=user)
class TestSitePermission(AppVersionsMixin, TestCase):
def parse(self):
return utils.ManifestJSONExtractor('{"site_permissions": ["webmidi"]}').parse()
def test_allow_regular_submission_of_site_permissions_addons_with_permission(self):
user = user_factory()
self.grant_permission(user, 'Addons:SubmitSitePermission')
parsed_data = self.parse()
assert parsed_data['type'] == amo.ADDON_SITE_PERMISSION
assert parsed_data['site_permissions'] == ['webmidi']
assert utils.check_xpi_info(parsed_data, user=user)
def test_allow_submission_of_site_permissions_addons_from_task_user(self):
user = user_factory(pk=settings.TASK_USER_ID)
parsed_data = self.parse()
assert parsed_data['type'] == amo.ADDON_SITE_PERMISSION
assert parsed_data['site_permissions'] == ['webmidi']
assert utils.check_xpi_info(parsed_data, user=user)
def test_disallow_regular_submission_of_site_permission_addons_no_user(self):
parsed_data = self.parse()
with self.assertRaises(ValidationError):
utils.check_xpi_info(parsed_data)
def test_disallow_regular_submission_of_site_permission_addons_normal_user(self):
user = user_factory()
parsed_data = self.parse()
with self.assertRaises(ValidationError):
utils.check_xpi_info(parsed_data, user=user)
class TestManifestJSONExtractorStaticTheme(TestManifestJSONExtractor):
def parse(self, base_data):
if 'theme' not in base_data.keys():
base_data.update(theme={})
return super().parse(base_data)
def test_type(self):
assert self.parse({})['type'] == amo.ADDON_STATICTHEME
def test_apps_use_default_versions_if_applications_is_omitted(self):
"""
Override this because static themes have a higher default version.
"""
data = {}
apps = self.parse(data)['apps']
assert len(apps) == 2
assert apps[0].appdata == amo.FIREFOX
assert apps[0].min.version == (amo.DEFAULT_STATIC_THEME_MIN_VERSION_FIREFOX)
assert apps[0].max.version == amo.DEFAULT_WEBEXT_MAX_VERSION
assert apps[1].appdata == amo.ANDROID
assert apps[1].min.version == (amo.DEFAULT_STATIC_THEME_MIN_VERSION_ANDROID)
assert apps[1].max.version == amo.DEFAULT_WEBEXT_MAX_VERSION
def test_apps_use_default_versions_if_none_provided(self):
"""Use the default min and max versions if none provided."""
data = {'applications': {'gecko': {'id': 'some-id'}}}
apps = self.parse(data)['apps']
assert len(apps) == 2
assert apps[0].appdata == amo.FIREFOX
assert apps[0].min.version == (amo.DEFAULT_STATIC_THEME_MIN_VERSION_FIREFOX)
assert apps[0].max.version == amo.DEFAULT_WEBEXT_MAX_VERSION
assert apps[1].appdata == amo.ANDROID
assert apps[1].min.version == (amo.DEFAULT_STATIC_THEME_MIN_VERSION_ANDROID)
assert apps[1].max.version == amo.DEFAULT_WEBEXT_MAX_VERSION
def test_apps_use_provided_versions(self):
"""Use the min and max versions if provided."""
firefox_min_version = self.create_appversion('firefox', '66.0')
firefox_max_version = self.create_appversion('firefox', '66.*')
android_min_version = self.create_appversion('android', '66.0')
android_max_version = self.create_appversion('android', '66.*')
data = {
'applications': {
'gecko': {
'strict_min_version': '>=66.0',
'strict_max_version': '=66.*',
'id': '@random',
}
}
}
apps = self.parse(data)['apps']
assert len(apps) == 2
assert apps[0].appdata == amo.FIREFOX
assert apps[0].min == firefox_min_version
assert apps[0].max == firefox_max_version
assert apps[1].appdata == amo.ANDROID
assert apps[1].min == android_min_version
assert apps[1].max == android_max_version
def test_theme_json_extracted(self):
# Check theme data is extracted from the manifest and returned.
data = {'theme': {'colors': {'tab_background_text': '#3deb60'}}}
assert self.parse(data)['theme'] == data['theme']
def test_unknown_strict_max_version(self):
data = {
'applications': {
'gecko': {
'strict_max_version': '76.0',
'id': '@unknown_strict_max_version',
}
}
}
with pytest.raises(forms.ValidationError) as exc:
self.parse(data)
assert exc.value.message == ('Unknown "strict_max_version" 76.0 for Firefox')
def test_dont_skip_apps_because_of_strict_version_incompatibility(self):
# In the parent class this method would bump the min_version to 48.0
# because that's the first version to support
# browser_specific_settings, but in static themes we bump it even
# higher because of the minimum version when we started supporting
# static themes themselves.
data = {
'browser_specific_settings': {
'gecko': {'strict_min_version': '42.0', 'id': '@random'}
}
}
apps = self.parse(data)['apps']
assert len(apps) == 2
assert apps[0].appdata == amo.FIREFOX
assert apps[0].min.version == (amo.DEFAULT_STATIC_THEME_MIN_VERSION_FIREFOX)
assert apps[0].max.version == amo.DEFAULT_WEBEXT_MAX_VERSION
assert apps[1].appdata == amo.ANDROID
assert apps[1].min.version == (amo.DEFAULT_STATIC_THEME_MIN_VERSION_ANDROID)
assert apps[1].max.version == amo.DEFAULT_WEBEXT_MAX_VERSION
@pytest.mark.parametrize(
'filename, expected_files',
[
(
'webextension_no_id.xpi',
[
'README.md',
'beasts',
'button',
'content_scripts',
'manifest.json',
'popup',
],
),
(
'webextension_no_id.zip',
[
'README.md',
'beasts',
'button',
'content_scripts',
'manifest.json',
'popup',
],
),
(
'webextension_no_id.tar.gz',
[
'README.md',
'beasts',
'button',
'content_scripts',
'manifest.json',
'popup',
],
),
(
'webextension_no_id.tar.bz2',
[
'README.md',
'beasts',
'button',
'content_scripts',
'manifest.json',
'popup',
],
),
],
)
def test_extract_extension_to_dest(filename, expected_files):
extension_file = f'src/olympia/files/fixtures/files/{filename}'
with mock.patch('olympia.files.utils.os.fsync') as fsync_mock:
temp_folder = utils.extract_extension_to_dest(extension_file)
assert sorted(os.listdir(temp_folder)) == expected_files
# fsync isn't called by default
assert not fsync_mock.called
@pytest.mark.parametrize(
'filename',
[
'webextension_no_id.xpi',
'webextension_no_id.zip',
'webextension_no_id.tar.bz2',
'webextension_no_id.tar.gz',
],
)
def test_extract_extension_to_dest_call_fsync(filename):
extension_file = f'src/olympia/files/fixtures/files/{filename}'
with mock.patch('olympia.files.utils.os.fsync') as fsync_mock:
utils.extract_extension_to_dest(extension_file, force_fsync=True)
# fsync isn't called by default
assert fsync_mock.called
def test_extract_extension_to_dest_non_existing_archive():
extension_file = 'src/olympia/files/fixtures/files/doesntexist.zip'
with mock.patch('olympia.files.utils.shutil.rmtree') as mock_rmtree:
with pytest.raises(FileNotFoundError):
utils.extract_extension_to_dest(extension_file)
# Make sure we are cleaning up our temporary directory if possible
assert mock_rmtree.called
def test_extract_extension_to_dest_invalid_archive():
extension_file = 'src/olympia/files/fixtures/files/invalid-cp437-encoding.xpi'
with mock.patch('olympia.files.utils.shutil.rmtree') as mock_rmtree:
with pytest.raises(forms.ValidationError):
utils.extract_extension_to_dest(extension_file)
# Make sure we are cleaning up our temporary directory if possible
assert mock_rmtree.called
@pytest.fixture
def file_obj():
addon = amo.tests.addon_factory()
addon.update(guid='xxxxx')
version = addon.current_version
return version.file
@pytestmark
def test_bump_version_in_manifest_json(file_obj):
AppVersion.objects.create(
application=amo.FIREFOX.id, version=amo.DEFAULT_WEBEXT_MIN_VERSION
)
AppVersion.objects.create(
application=amo.FIREFOX.id, version=amo.DEFAULT_WEBEXT_MAX_VERSION
)
AppVersion.objects.create(
application=amo.ANDROID.id, version=amo.DEFAULT_WEBEXT_MIN_VERSION_ANDROID
)
AppVersion.objects.create(
application=amo.ANDROID.id, version=amo.DEFAULT_WEBEXT_MAX_VERSION
)
with amo.tests.copy_file(
'src/olympia/files/fixtures/files/webextension.xpi', file_obj.file_path
):
utils.update_version_number(file_obj, '0.0.1.1-signed')
parsed = utils.parse_xpi(file_obj.file_path)
assert parsed['version'] == '0.0.1.1-signed'
def test_extract_translations_simple(file_obj):
extension = 'src/olympia/files/fixtures/files/notify-link-clicks-i18n.xpi'
with amo.tests.copy_file(extension, file_obj.file_path):
messages = utils.extract_translations(file_obj)
assert list(sorted(messages.keys())) == [
'de',
'en-US',
'ja',
'nb-NO',
'nl',
'ru',
'sv-SE',
]
@mock.patch('olympia.files.utils.zipfile.ZipFile.read')
def test_extract_translations_fail_silent_invalid_file(read_mock, file_obj):
extension = 'src/olympia/files/fixtures/files/notify-link-clicks-i18n.xpi'
with amo.tests.copy_file(extension, file_obj.file_path):
read_mock.side_effect = KeyError
# Does not raise an exception
utils.extract_translations(file_obj)
read_mock.side_effect = IOError
# Does not raise an exception too
utils.extract_translations(file_obj)
# We don't fail on invalid JSON too, this is addons-linter domain
read_mock.side_effect = ValueError
utils.extract_translations(file_obj)
# But everything else...
read_mock.side_effect = TypeError
with pytest.raises(TypeError):
utils.extract_translations(file_obj)
def test_get_all_files():
tempdir = tempfile.mkdtemp(dir=settings.TMP_PATH)
os.mkdir(os.path.join(tempdir, 'dir1'))
_touch(os.path.join(tempdir, 'foo1'))
_touch(os.path.join(tempdir, 'dir1', 'foo2'))
assert utils.get_all_files(tempdir) == [
os.path.join(tempdir, 'dir1'),
os.path.join(tempdir, 'dir1', 'foo2'),
os.path.join(tempdir, 'foo1'),
]
shutil.rmtree(tempdir)
assert not os.path.exists(tempdir)
def test_get_all_files_strip_prefix_no_prefix_silent():
tempdir = tempfile.mkdtemp(dir=settings.TMP_PATH)
os.mkdir(os.path.join(tempdir, 'dir1'))
_touch(os.path.join(tempdir, 'foo1'))
_touch(os.path.join(tempdir, 'dir1', 'foo2'))
# strip_prefix alone doesn't do anything.
assert utils.get_all_files(tempdir, strip_prefix=tempdir) == [
os.path.join(tempdir, 'dir1'),
os.path.join(tempdir, 'dir1', 'foo2'),
os.path.join(tempdir, 'foo1'),
]
def test_get_all_files_prefix():
tempdir = tempfile.mkdtemp(dir=settings.TMP_PATH)
os.mkdir(os.path.join(tempdir, 'dir1'))
_touch(os.path.join(tempdir, 'foo1'))
_touch(os.path.join(tempdir, 'dir1', 'foo2'))
# strip_prefix alone doesn't do anything.
assert utils.get_all_files(tempdir, prefix='/foo/bar') == [
'/foo/bar' + os.path.join(tempdir, 'dir1'),
'/foo/bar' + os.path.join(tempdir, 'dir1', 'foo2'),
'/foo/bar' + os.path.join(tempdir, 'foo1'),
]
def test_get_all_files_prefix_with_strip_prefix():
tempdir = tempfile.mkdtemp(dir=settings.TMP_PATH)
os.mkdir(os.path.join(tempdir, 'dir1'))
_touch(os.path.join(tempdir, 'foo1'))
_touch(os.path.join(tempdir, 'dir1', 'foo2'))
# strip_prefix alone doesn't do anything.
result = utils.get_all_files(tempdir, strip_prefix=tempdir, prefix='/foo/bar')
assert result == [
os.path.join('/foo', 'bar', 'dir1'),
os.path.join('/foo', 'bar', 'dir1', 'foo2'),
os.path.join('/foo', 'bar', 'foo1'),
]
def test_lock_with_lock_attained():
with utils.lock(settings.TMP_PATH, 'test-lock-lock2') as lock_attained:
assert lock_attained
@contextlib.contextmanager
def _run_lock_holding_process(lock_name, sleep):
def _other_process_holding_lock():
with utils.lock(settings.TMP_PATH, lock_name) as lock_attained:
assert lock_attained
time.sleep(sleep)
other_process = multiprocessing.Process(target=_other_process_holding_lock)
other_process.start()
# Give the process some time to acquire the lock
time.sleep(0.2)
yield other_process
other_process.join()
def test_lock_timeout():
with _run_lock_holding_process('test-lock-lock3', sleep=2):
# Waiting for 3 seconds allows us to attain the lock from the parent
# process.
lock = utils.lock(settings.TMP_PATH, 'test-lock-lock3', timeout=3)
with lock as lock_attained:
assert lock_attained
with _run_lock_holding_process('test-lock-lock3', sleep=2):
# Waiting only 1 second fails to acquire the lock
lock = utils.lock(settings.TMP_PATH, 'test-lock-lock3', timeout=1)
with lock as lock_attained:
assert not lock_attained
class TestResolvei18nMessage:
def test_no_match(self):
assert utils.resolve_i18n_message('foo', {}, '') == 'foo'
def test_locale_found(self):
messages = {'de': {'foo': {'message': 'bar'}}}
result = utils.resolve_i18n_message('__MSG_foo__', messages, 'de')
assert result == 'bar'
def test_uses_default_locale(self):
messages = {'en-US': {'foo': {'message': 'bar'}}}
result = utils.resolve_i18n_message('__MSG_foo__', messages, 'de', 'en')
assert result == 'bar'
def test_no_locale_match(self):
# Neither `locale` or `locale` are found, "message" is returned
# unchanged
messages = {'fr': {'foo': {'message': 'bar'}}}
result = utils.resolve_i18n_message('__MSG_foo__', messages, 'de', 'en')
assert result == '__MSG_foo__'
def test_field_not_set(self):
"""Make sure we don't fail on messages that are `None`
Fixes https://github.com/mozilla/addons-server/issues/3067
"""
result = utils.resolve_i18n_message(None, {}, 'de', 'en')
assert result is None
def test_field_no_string(self):
"""Make sure we don't fail on messages that are no strings"""
result = utils.resolve_i18n_message([], {}, 'de', 'en')
assert result == []
def test_corrects_locales(self):
messages = {'en-US': {'foo': {'message': 'bar'}}}
result = utils.resolve_i18n_message('__MSG_foo__', messages, 'en')
assert result == 'bar'
def test_ignore_wrong_format(self):
messages = {'en-US': {'foo': 'bar'}}
result = utils.resolve_i18n_message('__MSG_foo__', messages, 'en')
assert result == '__MSG_foo__'
class TestGetBackgroundImages(TestCase):
file_obj = os.path.join(
settings.ROOT, 'src/olympia/devhub/tests/addons/static_theme.zip'
)
file_obj_dep = os.path.join(
settings.ROOT, 'src/olympia/devhub/tests/addons/static_theme_deprecated.zip'
)
def test_get_background_images(self):
data = {'images': {'theme_frame': 'weta.png'}}
images = utils.get_background_images(self.file_obj, data)
assert 'weta.png' in images
assert len(images.items()) == 1
assert len(images['weta.png']) == 126447
def test_get_background_deprecated(self):
data = {'images': {'headerURL': 'weta.png'}}
images = utils.get_background_images(self.file_obj_dep, data)
assert 'weta.png' in images
assert len(images.items()) == 1
assert len(images['weta.png']) == 126447
@mock.patch('olympia.amo.utils.SafeStorage.base_location', '/')
def test_get_background_images_no_theme_data_provided(self):
images = utils.get_background_images(self.file_obj, theme_data=None)
assert 'weta.png' in images
assert len(images.items()) == 1
assert len(images['weta.png']) == 126447
def test_get_background_images_missing(self):
data = {'images': {'theme_frame': 'missing_file.png'}}
images = utils.get_background_images(self.file_obj, data)
assert not images
def test_get_background_images_not_image(self):
self.file_obj = os.path.join(
settings.ROOT, 'src/olympia/devhub/tests/addons/static_theme_non_image.zip'
)
data = {'images': {'theme_frame': 'not_an_image.js'}}
images = utils.get_background_images(self.file_obj, data)
assert not images
def test_get_background_images_with_additional_imgs(self):
self.file_obj = os.path.join(
settings.ROOT, 'src/olympia/devhub/tests/addons/static_theme_tiled.zip'
)
data = {
'images': {
'theme_frame': 'empty.png',
'additional_backgrounds': [
'transparent.gif',
'missing_&_ignored.png',
'weta_for_tiling.png',
],
}
}
images = utils.get_background_images(self.file_obj, data)
assert len(images.items()) == 3
assert len(images['empty.png']) == 332
assert len(images['transparent.gif']) == 42
assert len(images['weta_for_tiling.png']) == 93371
# And again but only with the header image
images = utils.get_background_images(self.file_obj, data, header_only=True)
assert len(images.items()) == 1
assert len(images['empty.png']) == 332
@pytest.mark.parametrize(
'value, expected',
[
(1, '1/01/1'),
(12, '2/12/12'),
(123, '3/23/123'),
(1234, '4/34/1234'),
(123456789, '9/89/123456789'),
],
)
def test_id_to_path(value, expected):
assert utils.id_to_path(value) == expected
@pytest.mark.parametrize(
'value, expected',
[
(1, '01/0001/1'),
(12, '12/0012/12'),
(123, '23/0123/123'),
(1234, '34/1234/1234'),
(123456, '56/3456/123456'),
(123456789, '89/6789/123456789'),
],
)
def test_id_to_path_depth(value, expected):
assert utils.id_to_path(value, breadth=2) == expected
class TestSafeZip(TestCase):
def test_raises_error_for_invalid_webextension_xpi(self):
with pytest.raises(zipfile.BadZipFile):
utils.SafeZip(get_addon_file('invalid_webextension.xpi'))
def test_raises_error_for_archive_with_backslashes_in_filenames(self):
filename = (
'src/olympia/files/'
'fixtures/files/archive-with-invalid-chars-in-filenames.zip'
)
with pytest.raises(utils.InvalidZipFile):
utils.SafeZip(filename)
def test_ignores_error_for_archive_with_backslashes_in_filenames_with_argument(
self,
):
filename = (
'src/olympia/files/'
'fixtures/files/archive-with-invalid-chars-in-filenames.zip'
)
utils.SafeZip(filename, ignore_filename_errors=True)
def test_raises_validation_error_when_uncompressed_size_is_too_large(self):
with override_settings(MAX_ZIP_UNCOMPRESSED_SIZE=1000):
with pytest.raises(utils.InvalidZipFile):
# total uncompressed size of this xpi is 126kb
utils.SafeZip(get_addon_file('mozilla_static_theme.zip'))
class TestArchiveMemberValidator(TestCase):
# We cannot easily test `archive_member_validator` so let's test
# `_validate_archive_member_name_and_size` instead.
def test_raises_when_filename_is_none(self):
with pytest.raises(utils.InvalidZipFile):
utils._validate_archive_member_name_and_size(None, 123)
def test_raises_when_filesize_is_none(self):
with pytest.raises(utils.InvalidZipFile):
utils._validate_archive_member_name_and_size('filename', None)
def test_raises_when_filename_is_dot_dot_slash(self):
with pytest.raises(utils.InvalidZipFile):
utils._validate_archive_member_name_and_size('../', 123)
def test_raises_when_filename_starts_with_slash(self):
with pytest.raises(utils.InvalidZipFile):
utils._validate_archive_member_name_and_size('/..', 123)
def test_raises_when_filename_contains_backslashes(self):
with pytest.raises(utils.InvalidZipFile):
utils._validate_archive_member_name_and_size('path\\to\\file.txt', 123)
def test_raises_when_filename_is_dot_dot(self):
with pytest.raises(utils.InvalidZipFile):
utils._validate_archive_member_name_and_size('..', 123)
def test_ignores_when_filename_is_dot_dot_slash_with_argument(self):
utils._validate_archive_member_name_and_size(
'../', 123, ignore_filename_errors=True
)
def test_ignores_when_filename_starts_with_slash_with_argument(self):
utils._validate_archive_member_name_and_size(
'/..', 123, ignore_filename_errors=True
)
def test_ignores_when_filename_contains_backslashes_with_argument(self):
utils._validate_archive_member_name_and_size(
'path\\to\\file.txt', 123, ignore_filename_errors=True
)
def test_ignores_when_filename_is_dot_dot_with_argument(self):
utils._validate_archive_member_name_and_size(
'..', 123, ignore_filename_errors=True
)
def test_does_not_raise_when_filename_is_dot_dot_extension(self):
utils._validate_archive_member_name_and_size('foo..svg', 123)
@override_settings(FILE_UNZIP_SIZE_LIMIT=100)
def test_raises_when_filesize_is_above_limit(self):
with pytest.raises(utils.InvalidZipFile):
utils._validate_archive_member_name_and_size(
'filename', settings.FILE_UNZIP_SIZE_LIMIT + 100
)
class TestWriteCrxAsXpi(TestCase):
def setUp(self):
self.tempdir = tempfile.mkdtemp(dir=settings.TMP_PATH)
self.target = os.path.join(self.tempdir, 'target')
self.prefix = 'src/olympia/files/fixtures/files'
def tearDown(self):
storage.delete(self.target)
storage.delete(self.tempdir)
# Note: those tests are also performed in test_models.py using
# FileUpload.from_post() to ensure the relevant exception is caught if they
# are raised and the add-on is then fully processed correctly. These just
# test the underlying function that does the conversion from crx to xpi.
def test_webextension_crx(self):
path = os.path.join(self.prefix, 'webextension.crx')
with open(path, 'rb') as source:
utils.write_crx_as_xpi(source, self.target)
assert zipfile.is_zipfile(self.target)
def test_webextension_crx_large(self):
path = os.path.join(self.prefix, 'https-everywhere.crx')
with open(path, 'rb') as source:
utils.write_crx_as_xpi(source, self.target)
assert zipfile.is_zipfile(self.target)
def test_webextension_crx_version_3(self):
path = os.path.join(self.prefix, 'webextension_crx3.crx')
with open(path, 'rb') as source:
utils.write_crx_as_xpi(source, self.target)
assert zipfile.is_zipfile(self.target)
def test_webextension_crx_not_a_crx(self):
file_ = SimpleUploadedFile(
'foo.crx', b'Cr42\x02\x00\x00\x00&\x01\x00\x00\x00\x01\x00\x00'
)
with self.assertRaises(utils.InvalidOrUnsupportedCrx) as exc:
utils.write_crx_as_xpi(file_, self.target)
assert str(exc.exception) == 'CRX file does not start with Cr24'
# It's the caller responsability to move the original file there, as if
# it was a regular zip, since we couldn't convert it.
assert not storage.exists(self.target)
def test_webextension_crx_version_unsupported(self):
file_ = SimpleUploadedFile(
'foo.crx', b'Cr24\x04\x00\x00\x00&\x01\x00\x00\x00\x01\x00\x00'
)
with self.assertRaises(utils.InvalidOrUnsupportedCrx) as exc:
utils.write_crx_as_xpi(file_, self.target)
assert str(exc.exception) == 'Unsupported CRX version'
# It's the caller responsability to move the original file there, as if
# it was a regular zip, since we couldn't convert it.
assert not storage.exists(self.target)
def test_webextension_crx_version_cant_unpack(self):
file_ = SimpleUploadedFile(
'foo.crx', b'Cr24\x02\x00\x00\x00&\x00\x00\x00\x01\x00\x00'
)
with self.assertRaises(utils.InvalidOrUnsupportedCrx) as exc:
utils.write_crx_as_xpi(file_, self.target)
assert str(exc.exception) == 'Invalid or corrupt CRX file'
# It's the caller responsability to move the original file there, as if
# it was a regular zip, since we couldn't convert it.
assert not storage.exists(self.target)
|
{
"content_hash": "84ed1c2c5baa14b6081e7e8e141c7488",
"timestamp": "",
"source": "github",
"line_count": 1319,
"max_line_length": 88,
"avg_line_length": 37.19636087945413,
"alnum_prop": 0.5985487750193632,
"repo_name": "mozilla/olympia",
"id": "686b4e6f53a830aecb0f57e6afb638f6e0ce8103",
"size": "49067",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/olympia/files/tests/test_utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "249"
},
{
"name": "CSS",
"bytes": "663668"
},
{
"name": "HTML",
"bytes": "1600904"
},
{
"name": "JavaScript",
"bytes": "1314155"
},
{
"name": "Makefile",
"bytes": "4235"
},
{
"name": "PLSQL",
"bytes": "74"
},
{
"name": "Python",
"bytes": "3997396"
},
{
"name": "Shell",
"bytes": "9101"
},
{
"name": "Smarty",
"bytes": "1930"
}
],
"symlink_target": ""
}
|
from snmpsim.error import SnmpsimError
from snmpsim.grammar import abstract
class AbstractRecord(object):
grammar = abstract.AbstractGrammar()
ext = ''
def evaluate_oid(self, oid):
raise SnmpsimError(
'Method not implemented at '
'%s' % self.__class__.__name__)
def evaluate_value(self, oid, tag, value, **context):
raise SnmpsimError(
'Method not implemented at '
'%s' % self.__class__.__name__)
def evaluate(self, line, **context):
raise SnmpsimError(
'Method not implemented at '
'%s' % self.__class__.__name__)
def format_oid(self, oid):
raise SnmpsimError(
'Method not implemented at '
'%s' % self.__class__.__name__)
def format_value(self, oid, value, **context):
raise SnmpsimError(
'Method not implemented at '
'%s' % self.__class__.__name__)
def format(self, oid, value, **context):
raise SnmpsimError(
'Method not implemented at '
'%s' % self.__class__.__name__)
@staticmethod
def open(path, flags='rb'):
return open(path, flags)
|
{
"content_hash": "d374be5b1023ba7af8a18352b6ced957",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 57,
"avg_line_length": 29,
"alnum_prop": 0.5517241379310345,
"repo_name": "etingof/snmpsim",
"id": "3c5f8f7ba2e99e8a8dd165d8cddfc7ff01479dd5",
"size": "1348",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "snmpsim/record/abstract.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "287683"
},
{
"name": "Shell",
"bytes": "4190"
}
],
"symlink_target": ""
}
|
"""Samyro learner parameters."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from samyro import read, model
class Trainer(object):
"""A training regime."""
def __init__(self, model_instance, sampler, batch_size, reuse):
assert isinstance(model_instance, model.Model)
assert isinstance(sampler, read.Sampler)
input_placeholder = sampler.placeholder()
output_placeholder = sampler.placeholder()
labels = sampler.as_labels(output_placeholder)
self.feed_vars = (input_placeholder, output_placeholder)
self.train_op, self.train_loss = model_instance.train_op_loss(
input_placeholder,
labels, reuse=reuse)
self.eval_accuracy = model_instance.eval_accuracy(
input_placeholder,
labels, reuse=True)
def train_pass(self, runner, batch_iterator, num_batches=300,
print_every=25):
runner.train_model(train_op=self.train_op,
cost_to_log=self.train_loss,
num_steps=num_batches,
feed_vars=self.feed_vars,
feed_data=batch_iterator,
print_every=print_every)
def eval_pass(self, runner, batch_iterator, num_batches=300):
return runner.evaluate_model(accuracy=self.eval_accuracy,
num_steps=num_batches,
feed_vars=self.feed_vars,
feed_data=batch_iterator)
|
{
"content_hash": "4492e6b0835cf8e148931c8ef603928b",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 70,
"avg_line_length": 39.46341463414634,
"alnum_prop": 0.5784919653893696,
"repo_name": "jkahn/samyro",
"id": "c01071e91851fe575585f636c0334ade1437e5db",
"size": "1618",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "samyro/learn.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "38917"
},
{
"name": "Shell",
"bytes": "137"
}
],
"symlink_target": ""
}
|
"""
Unit tests for some APIs with conditional logic in adb_wrapper.py
"""
import unittest
from devil import devil_env
from devil.android import device_errors
from devil.android.sdk import adb_wrapper
with devil_env.SysPath(devil_env.PYMOCK_PATH):
import mock # pylint: disable=import-error
class AdbWrapperTest(unittest.TestCase):
def setUp(self):
self.device_serial = 'ABC12345678'
self.adb = adb_wrapper.AdbWrapper(self.device_serial)
def _MockRunDeviceAdbCmd(self, return_value):
return mock.patch.object(
self.adb, '_RunDeviceAdbCmd',
mock.Mock(side_effect=None, return_value=return_value))
def testDisableVerityWhenDisabled(self):
with self._MockRunDeviceAdbCmd('Verity already disabled on /system'):
self.adb.DisableVerity()
def testDisableVerityWhenEnabled(self):
with self._MockRunDeviceAdbCmd(
'Verity disabled on /system\nNow reboot your device for settings to '
'take effect'):
self.adb.DisableVerity()
def testEnableVerityWhenEnabled(self):
with self._MockRunDeviceAdbCmd('Verity already enabled on /system'):
self.adb.EnableVerity()
def testEnableVerityWhenDisabled(self):
with self._MockRunDeviceAdbCmd(
'Verity enabled on /system\nNow reboot your device for settings to '
'take effect'):
self.adb.EnableVerity()
def testFailEnableVerity(self):
with self._MockRunDeviceAdbCmd('error: closed'):
self.assertRaises(device_errors.AdbCommandFailedError,
self.adb.EnableVerity)
def testFailDisableVerity(self):
with self._MockRunDeviceAdbCmd('error: closed'):
self.assertRaises(device_errors.AdbCommandFailedError,
self.adb.DisableVerity)
@mock.patch('devil.utils.cmd_helper.GetCmdStatusAndOutputWithTimeout')
def testDeviceUnreachable(self, get_cmd_mock):
get_cmd_mock.return_value = (
1, "error: device '%s' not found" % self.device_serial)
self.assertRaises(device_errors.DeviceUnreachableError, self.adb.Shell,
'/bin/true')
@mock.patch('devil.utils.cmd_helper.GetCmdStatusAndOutputWithTimeout')
def testWaitingForDevice(self, get_cmd_mock):
get_cmd_mock.return_value = (1, '- waiting for device - ')
self.assertRaises(device_errors.DeviceUnreachableError, self.adb.Shell,
'/bin/true')
|
{
"content_hash": "9037710273fa343cbcf461af1d275797",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 77,
"avg_line_length": 35.96969696969697,
"alnum_prop": 0.7080876158382476,
"repo_name": "endlessm/chromium-browser",
"id": "f30ab152de3daedda16864e5100bb7cd8e9e46a7",
"size": "2558",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "third_party/catapult/devil/devil/android/sdk/adb_wrapper_test.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
from django.conf.urls import include, url
from rest_framework.routers import SimpleRouter
from api import views
from api.views import FacebookLogin
router = SimpleRouter()
# users route
router.register(r'users', views.UserViewSet)
# tourpoint route
router.register(r'tourpoints', views.TourPointViewSet)
router.register(r'search', views.TourPointLocationGeoSearchViewSet,
base_name='tourpoint-search')
urlpatterns = [
url(r'^api/v1/', include([
url(r'^', include(router.urls)),
url(r'^auth/', include('rest_auth.urls', namespace='auth')),
url(r'^auth/facebook/$', FacebookLogin.as_view(), name='facebook-login'),
url(r'^$', views.APIRootView.as_view(), name='api-root'),
]))
]
|
{
"content_hash": "fe46beffea9c7874c95c8e99cd157590",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 81,
"avg_line_length": 30.75,
"alnum_prop": 0.6883468834688347,
"repo_name": "johnnywell/snowman",
"id": "d5ab349b86ed39b3b28184bee14623c38b800837",
"size": "738",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api/router.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34519"
}
],
"symlink_target": ""
}
|
from .scale import scale
from copy import deepcopy
import brewer2mpl
def _number_to_palette(ctype, n):
n -= 1
palettes = sorted(brewer2mpl.COLOR_MAPS[ctype].keys())
if n < len(palettes):
return palettes[n]
def _handle_shorthand(text):
abbrevs = {
"seq": "Sequential",
"qual": "Qualitative",
"div": "Diverging"
}
text = abbrevs.get(text, text)
text = text.title()
return text
class scale_colour_brewer(scale):
"""
Parameters
----------
type: string
One of seq (sequential), div (diverging) or qual (qualitative)
palette: string
If a string, will use that named palette. If a number, will index into
the list of palettes of appropriate type
Examples
--------
>>> from ggplot import *
>>> p = ggplot(aes(x='carat', y='price', colour='clarity'), data=diamonds)
>>> p += geom_point()
>>> print(p + scale_color_brewer(palette=4))
>>> print(p + scale_color_brewer(type='diverging'))
>>> print(p + scale_color_brewer(type='div'))
>>> print(p + scale_color_brewer(type='seq'))
>>> print(p + scale_color_brewer(type='seq', palette='Blues'))
"""
VALID_SCALES = ['type', 'palette']
def __radd__(self, gg):
gg = deepcopy(gg)
if self.type:
ctype = self.type
else:
ctype = "Sequential"
ctype = _handle_shorthand(ctype)
if self.palette:
palette = self.palette
else:
palette = _number_to_palette(ctype, 1)
if isinstance(palette, int):
palette = _number_to_palette(ctype, palette)
# Try to get colors
try:
color_col = gg.aesthetics.get('color', gg.aesthetics['fill'])
n_colors = max(gg.data[color_col].nunique(),3)
except KeyError :
# If we are neither using 'color' nor 'fill' then assume there is
# only one color used
n_colors = 3
bmap = brewer2mpl.get_map(palette, ctype, n_colors)
gg.manual_color_list = bmap.hex_colors
return gg
|
{
"content_hash": "fc3868db68efafe3fe5690e4e90794b4",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 78,
"avg_line_length": 28.186666666666667,
"alnum_prop": 0.5666982024597919,
"repo_name": "mizzao/ggplot",
"id": "6f34d4d34ae8cec84c1047f4b991e94b84da04d8",
"size": "2114",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "ggplot/scales/scale_colour_brewer.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "450130"
},
{
"name": "Shell",
"bytes": "93"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import os
import logging
from bmc.bmc_spec import BMCSpec, InvarStatus
from . import sal_op_parser
from . import pwa2salconverter as pwa2sal
import fileops as fops
import utils as U
import err
logger = logging.getLogger(__name__)
SAL_PATH = 'SAL_PATH'
SAL_INF_BMC = '''/bin/sal-inf-bmc'''
class SALBMCError(Exception):
pass
class SalOpts():
def __init__(self):
self.yices = 2
self.verbosity = 3
self.iterative = False
self.preserve_tmp_files = True
# Must separate the arguements. i.e., -v 3 should be given as ['-v', '3']
# This can be avoided by using shell=True, but that is a security risk
def sal_run_cmd(sal_path, depth, sal_file, prop_name, opts=SalOpts()):
cmd = [
sal_path,
'-v', str(opts.verbosity),
'-d', str(depth),
#'{}.sal'.format(module_name),
sal_file,
prop_name
]
if opts.yices == 2:
cmd.extend(['-s', 'yices2'])
if opts.preserve_tmp_files:
cmd.append('--preserve-tmp-files')
if opts.iterative:
cmd.append('-it')
print(' '.join(cmd))
return cmd
class BMC(BMCSpec):
def __init__(self, vs, pwa_graph, init_cons, final_cons,
init_ps, final_ps, fname_constructor, module_name, model_type,
smt_engine):
"""__init__
Parameters
----------
vs : list of variables. Order is important.
pwa_graph :
init_cons :
final_cons :
module_name :
model_type :
Returns
-------
Notes
------
"""
self.prop_name = 'safety'
self.fname_constructor = fname_constructor
self.module_name = module_name
fname = module_name + '.sal'
self.sal_file = fname_constructor(fname)
self.trace = None
self.vs = vs
self.init_ps = init_ps
self.final_ps = final_ps
if model_type == 'dft':
self.pwa2sal = pwa2sal.Pwa2Sal(
module_name, init_cons,
final_cons, pwa_graph, vs,
init_ps, final_ps)
self.sal_trans_sys = self.pwa2sal.trans_sys()
elif model_type == 'dmt':
raise NotImplementedError
dts = pwa_graph.keys()
self.sal_trans_sys = BMC.sal_module_dmt(
dts, vs, pwa_graph, init_cons, final_cons, module_name)
elif model_type == 'ct':
raise NotImplementedError
elif model_type == 'rel':
raise NotImplementedError
else:
raise SALBMCError('unknown model type')
return
def trace_generator(self, depth):
for i in range(1):
status = self.check(depth)
if status == InvarStatus.Unsafe:
yield self.trace, self.get_pwa_trace()
return
def check(self, depth):
yices2_not_found = 'yices2: not found'
self.dump()
try:
sal_path_ = os.environ[SAL_PATH] + SAL_INF_BMC
except KeyError:
raise err.Fatal("SAL environment variable is not defined. It\n"
"should point to sal's top-level directory")
#raise KeyError
sal_path = fops.sanitize_path(sal_path_)
sal_cmd = sal_run_cmd(
sal_path,
depth,
self.sal_file,
self.prop_name,
)
try:
sal_op = U.strict_call_get_op(sal_cmd)
except U.CallError as e:
if yices2_not_found in e.message:
print('SAL can not find yices2. Trying with yices...')
opts = SalOpts()
opts.yices = 1
sal_cmd = sal_run_cmd(
sal_path,
depth,
self.sal_file,
self.prop_name,
opts)
sal_op = U.strict_call_get_op(sal_cmd)
else:
raise err.Fatal('unknown SAL error!')
print(sal_op)
self.trace = sal_op_parser.parse_trace(sal_op, self.vs)
if self.trace is None:
print('BMC failed to find a CE')
return InvarStatus.Unknown
else:
#self.trace.set_vars(self.vs)
print('#'*40)
print('# Cleaned up trace')
print('#'*40)
print(self.trace)
print('#'*40)
return InvarStatus.Unsafe
def dump(self):
fops.write_data(self.sal_file, str(self.sal_trans_sys).encode())
return
def get_trace(self):
raise NotImplementedError
"""Returns the last trace found or None if no trace exists."""
return self.trace
def get_last_traces(self):
raise NotImplementedError
# Code works, but should be removed due to change in
# interfaces
raise NotImplementedError
if self.trace is not None:
return self.trace.to_array(), self.get_last_pwa_trace()
else:
return None, None
def get_pwa_trace(self):
"""Converts a bmc trace to a sequence of sub_models in the original pwa.
Parameters
----------
Returns
-------
pwa_trace = [sub_model_0, sub_model_1, ... ,sub_model_n]
pwa_trace =
models = [m01, m12, ... , m(n-1)n]
partitions = [p0, p1, p2, ..., pn]
Notes
------
For now, pwa_trace is only a list of sub_models, as relational
modeling is being done with KMIN = 1. Hence, there is no
ambiguity.
"""
# # each step, but the last, corresponds to a transition
# for step in steps[:-1]:
# part_id = self.sal2pwa_map[step.assignments['cell']]
# sub_model = self.sal2pwa_map[step.tid]
# # Assumption of trace building is that each submodel only
# # has 1 unique next location. If this violated, we need to
# # add cell ids/part ids to resolve the ambiguity.
# assert(len(sub_model.pnexts) == 1)
# assert(sub_model.p.ID == part_id)
# # this is still untested, so in case assert is off...
# assert(sub_model.p.ID == part_id)
# #err.warn('gone case')
# #pwa_trace.extend((part_id, sub_model))
# pwa_trace.append(sub_model)
if self.trace is None:
return None
steps = self.trace
transitions = [step.tid for step in steps[:-1]]
return self.pwa2sal.trace(transitions)
def gen_new_disc_trace(self):
raise NotImplementedError
"""makes trace = None, signifying no more traces..."""
self.trace = None
return
################################################
# ############# CEMETERY #######################
################################################
# @staticmethod
# def sal_module_dmt(dts, vs, pwa_models, init_set, final_cons, module_name):
# sal_trans_sys = slt_dmt.SALTransSysDMT(dts, module_name, vs, init_set, final_cons)
# for dt, pwa_model in pwa_models.iteritems():
# # replace decimal point with _ else SAL will throw an
# # error due to incorrect identifier
# dt_str = str(dt).replace('.', '_')
# for idx, sub_model in enumerate(pwa_model):
# g = slt_dmt.Guard(sub_model.p.C, sub_model.p.d)
# r = slt_dmt.Reset(sub_model.m.A, sub_model.m.b)
# t = slt_dmt.Transition(
# dt, dts, 'C_{}_{}'.format(idx, dt_str), g, r)
# sal_trans_sys.add_transition(t)
# return sal_trans_sys
|
{
"content_hash": "d796d318d6f3f862e3b2901a526fd34a",
"timestamp": "",
"source": "github",
"line_count": 257,
"max_line_length": 92,
"avg_line_length": 31.023346303501945,
"alnum_prop": 0.5133575818387056,
"repo_name": "zutshi/S3CAMR",
"id": "82f3ebccc7ae13d8741a5ac970359d1208d2a2e1",
"size": "7973",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/bmc/sal_bmc/salbmc.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "2970"
},
{
"name": "MATLAB",
"bytes": "14618"
},
{
"name": "Makefile",
"bytes": "232"
},
{
"name": "Python",
"bytes": "528716"
},
{
"name": "Shell",
"bytes": "199"
}
],
"symlink_target": ""
}
|
import os
import time
import sys
import argparse
import serial.tools.list_ports
from colorama import init as coloramaInit
from colorama import Fore, Back
from lib import pyCham
cham = pyCham()
def showBanner(showFooter = False):
print "██████╗ ██╗ ██╗ ██████╗██╗ ██╗ █████╗ ███╗ ███╗"
print "██╔══██╗╚██╗ ██╔╝██╔════╝██║ ██║██╔══██╗████╗ ████║ by Chema Garcia"
print "██████╔╝ ╚████╔╝ ██║ ███████║███████║██╔████╔██║ @sch3m4"
print "██╔═══╝ ╚██╔╝ ██║ ██╔══██║██╔══██║██║╚██╔╝██║ chema@safetybits.net"
print "██║ ██║ ╚██████╗██║ ██║██║ ██║██║ ╚═╝ ██║ https://github.com/sch3m4/pycham"
print "╚═╝ ╚═╝ ╚═════╝╚═╝ ╚═╝╚═╝ ╚═╝╚═╝ ╚═╝ v0.2b"
if showFooter is True:
print "a Python interface to Chameleon Mini"
print ""
def locateDevice ( devid ):
'''
Returns the serial port path of the arduino if found, or None if it isn't connected
'''
retval = None
for port in serial.tools.list_ports.comports():
if len(port[2]) >= len(devid) and port[2][:len(devid)] == devid:
retval = port[0]
break
return retval
def auxMenu(options):
os.system('clear')
showBanner()
ret = None
while True:
cont = 1
for opt in options:
print Fore.RED + "%s" % cont + Fore.RESET + ")\t" + "%s" % opt
cont += 1
print "0) Exit"
try:
print ""
opt = int(raw_input ( "Value: " ).strip())
except:
continue
if opt > len ( options ):
continue
if int(opt) == 0:
ret = None
else:
ret = options[opt-1]
break
return ret
def showMenu():
cmds = cham.getCommands()
instantcmds = cham.getInstantCommands()
map = {}
finish = False
while finish is False:
print "Getting device version..."
version = cham.getVersion()
print "Getting available configurations..."
configurations = cham.getConfigurations()
print "Getting the currently activated setting..."
settings = cham.getCurSettings()
print "Getting current config..."
config = cham.getCurConfig()
print "Getting button actions..."
bactions = cham.getButtonActions()
print "Getting all available button actions..."
bcuractions = cham.getCurButtonActions()
print "Getting readonly status..."
rostatus = cham.getReadOnly()
print "Getting current UID..."
curuid = cham.getCurrentUID()
print "Getting current UID size..."
curuidsize = cham.getCurrentUIDSize()
print "Getting current memory size..."
curmemsize = cham.getCurrentMemSize()
os.system('clear')
showBanner()
print "-==== Global Configurations ====-"
print Fore.RESET + " = " + Fore.MAGENTA + "+ Version: " + Fore.GREEN + "%s" % version
print Fore.RESET + " = " + Fore.MAGENTA + "+ Configs: " + Fore.GREEN + "%s" % configurations
print Fore.RESET + " = " + Fore.MAGENTA + "+ Button actions: " + Fore.GREEN + "%s" % bactions
print Fore.RESET + " = " + Fore.MAGENTA + "+ ReadOnly status: " + Fore.GREEN + "%s" % rostatus
print Fore.RESET + " = " + Fore.YELLOW + "+ Current button action: " + Fore.GREEN + "%s" % bcuractions
print Fore.RESET + " = " + Fore.YELLOW + "+ Currrent config: " + Fore.GREEN + "%s" % config
print Fore.RESET + " = " + Fore.YELLOW + "+ Current setting: " + Fore.GREEN + "%s" % settings
print Fore.RESET + " = " + Fore.YELLOW + "+ Current UID: " + Fore.GREEN + "%s" % curuid
print Fore.RESET + " = " + Fore.YELLOW + "+ Current UID size: " + Fore.GREEN + "%s" % curuidsize
print Fore.RESET + " = " + Fore.YELLOW + "+ Current memory size: " + Fore.GREEN + "%s" % curmemsize
print Fore.RESET + "-===============================-\n"
options = cmds.keys()
cont = 1
for opt in options:
print Fore.RED + "%s" % cont + Fore.RESET + ")\t%s" % cmds[opt]
cont += 1
if len(map.keys()) != len(options):
map[cont] = opt
print "0)\tExit"
correct = False
reload = False
while correct is False and reload is False:
try:
print ""
print Fore.BLUE + "NOTE:" + Fore.RESET + " Press enter to reload the menu"
sel = raw_input("Action: ")
sel = int(sel)
except ValueError:
if len(sel.strip()) == 0:
reload = True
except:
continue
if reload is True or sel <= len ( options ):
correct = True
if reload is True:
continue
if sel == 0:
return
invalid = False
unknown = False
cmd = map[sel+1]
if cmd in instantcmds:
cham.execute ( cmd )
elif cmd == pyCham.COMMAND_SET_UID:
uid = ''
while len ( uid ) == 0:
uid = raw_input ( "Enter the new UID value: " ).strip()
if uid is None:
unknown = True
else:
cmd += uid
elif cmd == pyCham.COMMAND_SET_CONFIG:
print "TODO"
elif cmd == pyCham.COMMAND_SET_RO:
if int(rostatus) == 0:
cmd += '1'
elif int(rostatus) == 1:
cmd += '0'
else:
invalid = True
elif cmd == pyCham.COMMAND_SET_BUTTON:
opt = auxMenu ( bactions.split(',') )
if opt is None:
unknown = True
else:
cmd += opt
elif cmd == pyCham.COMMAND_SET_SETTING:
opt = auxMenu ( ['1','2','3','4','5','6','7','8','9','10','11','12','13','14','15','16'] )
if opt is None:
unknown = True
else:
cmd += opt
elif cmd == pyCham.COMMAND_DOWNLOAD:
path = '.'
while os.path.isdir(path) is True and os.path.exists(path):
path = raw_input ( "Enter destination file path: " ).strip()
cham.execute ( cmd )
time.sleep(1)
cham.downloadToFile ( path , int(curmemsize) )
print "Restarting..."
# finish = True
elif cmd == pyCham.COMMAND_UPLOAD:
path = ''
while os.path.isfile(path) is False:
path = raw_input ( "Enter source file path: " ).strip()
cham.execute ( cmd )
time.sleep(1)
cham.uploadFromFile ( path )
# finish = True
print "Restarting..."
else:
unknown = True
print "Unknown option!"
# this should not happen
if invalid is True:
print "Invalid value!"
elif unknown is False and finish is False:
cham.execute ( cmd )
raw_input("Press ENTER to continue")
return finish
def main():
showBanner(True)
if len(sys.argv) < 2:
print "[i] Trying to autodetect Chameleon-Mini"
time.sleep(0.5)
args = {'serial': locateDevice ( pyCham.DEVICE_ID ) }
if args['serial'] is not None:
print "[i] Chameleon-Mini found at " + Fore.GREEN + "%s" % args['serial'] + Fore.RESET
time.sleep(1)
else:
parser = argparse.ArgumentParser()
parser.add_argument('serial', metavar='tty', type=str , help='Serial port of Chameleon' )
args = vars(parser.parse_args())
if args['serial'] is None or not os.path.exists ( args['serial' ] ):
print "[e] Error: Chameleon serial port not found. Rerun this script with -h parameter to view help."
sys.exit(-1)
coloramaInit()
finish = False
while finish is False:
cham.setSerial ( args['serial'] )
if cham.openSerial() is None:
print "[e] Error opening serial port"
sys.exit(-2)
finish = showMenu()
cham.close()
cham.close()
return finish
if __name__ == "__main__":
finish = False
while finish is False:
try:
finish = main()
except serial.SerialException:
print "[e] Connection closed"
for i in range(0,pyCham.RECON_DELAY):
val = pyCham.RECON_DELAY - i
print '[+] Reconnection in %d seconds\r' % val,
sys.stdout.flush()
time.sleep(1)
|
{
"content_hash": "b65faf5abf18af799b9e82c71552ca47",
"timestamp": "",
"source": "github",
"line_count": 256,
"max_line_length": 109,
"avg_line_length": 28.20703125,
"alnum_prop": 0.5934081152194987,
"repo_name": "sch3m4/pycham",
"id": "07c1e26ccdd95bc55cc17a25f4bc5b8ba9198657",
"size": "7846",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/pycham.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "16545"
}
],
"symlink_target": ""
}
|
import collections
import copy
import sling
import sling.log as log
import string
from collections import defaultdict
from sling.task.workflow import register_task
from util import load_kb
# A span is a [begin, end) range of tokens in the category title string.
# It is resolved to a list of PIDs and a QID along with the QID's prior
# from the phrase table. This resolution also comes with a count of how
# many members of the category have that (PID-chain, QID) as a fact.
Span = collections.namedtuple('Span', 'begin end qid prior pids count')
# Generates an exhaustive list of parses for a category string.
# Only processes categories that pass some basic checks.
class CategoryParseGenerator:
# Minimum count below which a (pid, qid) span annotation will be ignored.
MIN_PID_QID_COUNT = 2
def lookup(self, name):
handle = self.kb[name]
assert handle is not None, "%s not in KB" % name
return handle
def init(self, task):
self.kb = load_kb(task)
self.names = sling.PhraseTable(self.kb, task.input("phrase-table").name)
self.min_members = int(task.param("min_members"))
self.num_parses_bins = [1, 2, 3, 5, 10, 20, 50, 100, 200]
# Lookup some handles in advance.
self.h_language = self.lookup("/lang/" + task.param("language"))
self.h_lang = self.lookup("lang")
self.main_topic = self.lookup("P301") # present in topical categories
self.h_member = self.lookup("/w/item/member")
self.h_instanceof = self.lookup('P31')
self.h_subclassof = self.lookup('P279')
self.h_category = self.lookup('Q4167836')
self.h_category_contains = self.lookup('P4224')
self.english = task.param("language") == "en"
# The following kinds of categories won't be processed.
self.uninteresting_categories = set([
self.lookup('Q20769287'), # disambiguation category
self.lookup('Q15407973'), # list category
#self.lookup('Q56428020'), # template category
self.lookup('Q23894233'), # stub category
self.lookup('Q24046192'), # admin category
self.lookup('Q15647814'), # user category
self.lookup('Q20010800'), # user language category
self.lookup('Q30432511'), # meta category
self.lookup('Q13331174') # navbox category
])
# These pids will not be considered as resolution for spans.
self.pids_to_ignore = set([
self.h_instanceof, # P31 = instance of
self.lookup('P279'), # P279 = subclass of
self.lookup('P971'), # P971 = category combines topics
self.lookup('P4224'), # P4224 = category contains
])
# These QIDs will not be considered as resolutions for spans.
self.base_qids = set([
self.lookup('Q5'), # human
self.lookup('Q215627'), # person
self.lookup('Q17334923'), # location
self.lookup('Q811430'), # construction
self.lookup('Q43229'), # organization
self.lookup('Q2385804'), # educational institution
self.lookup('Q294163'), # public institution
self.lookup('Q15401930'), # product
self.lookup('Q12737077'), # occupation
self.lookup('Q192581'), # job
self.lookup('Q4164871'), # position
self.lookup('Q216353') # title
])
self.extractor = sling.api.FactExtractor(self.kb)
# Returns whether the item is a category.
def is_category(self, frame):
return self.h_category in frame(self.h_instanceof)
# Returns the category title in the specified language.
def get_title(self, frame):
for alias in frame("alias"):
if alias[self.h_lang] == self.h_language:
return alias.name
return None
# Returns true if the category with QID 'category_qid' and members
# 'category_members' should be rejected from processing or not.
# If true, also returns a corresponding reason.
def reject(self, category_qid, category_frame, category_members):
if self.main_topic in category_frame:
return (True, "topical")
if self.min_members >= 0 and len(category_members) < self.min_members:
return (True, "very_few_members")
for category_type in category_frame(self.h_instanceof):
if category_type in self.uninteresting_categories:
return (True, "uninteresting_category_type")
title = self.get_title(category_frame)
if title is None:
return (True, "no_title_in_language")
if self.english and title.find("stub") != -1:
return (True, "stub")
return (False, "")
# Returns counts of (QID, PID-chain) for all (PID-chain, QID) facts across
# all members. (PID-chain, QID) facts that occur multiple times in a single
# member are counted only once.
def qid_pid_counts(self, store, members):
qp_counts = defaultdict(lambda: defaultdict(int))
seen = set() # (PID, QID) seen in one member
for member in members:
facts = self.extractor.facts(store, member)
seen.clear()
for fact in facts:
if fact in seen:
continue
seen.add(fact)
qid = fact[-1] # fact = sequence of PIDs followed by a QID
pids = tuple(fact[:-1])
qp_counts[qid][pids] += 1
return qp_counts
# Computes and returns all spans in the tokenized category title document,
# that can be resolved to (QID, PID-chain) entries in 'qp_counts'.
#
# Spans are reported as a map: token i -> all spans that start at token i.
def compute_spans(self, document, qp_counts):
tokens = document.tokens
size = len(tokens)
begin_to_spans = [[] for _ in range(size)]
for begin in range(size):
# Ignore spans starting in punctuation.
begin_word = tokens[begin].word
if begin_word in string.punctuation and begin_word != "(":
continue
for end in range(begin + 1, size + 1):
# Ignore spans ending in punctuation.
end_word = tokens[begin].word
if end_word in string.punctuation and end_word != ")":
continue
# Special case: allow balanced parentheses.
if begin_word == "(" and \
all([t.word != ")" for t in tokens[begin + 1: end]]):
continue
if end_word == ")" and \
all([t.word != "(" for t in tokens[begin: end - 1]]):
continue
phrase = document.phrase(begin, end)
matches = self.names.query(phrase)
# Also lemmatize plurals.
if self.english:
original = phrase
if phrase.endswith("ies"): phrase = phrase[0:-3] + "y"
elif phrase.endswith("es"): phrase = phrase[0:-2]
elif phrase.endswith("s"): phrase = phrase[0:-1]
if original != phrase:
# Add more matches.
existing = set([m.item() for m in matches])
more_matches = self.names.query(phrase)
for m in more_matches:
if m.item() not in existing:
matches.append(m)
if len(matches) == 0:
continue
total_denom = 1.0 / sum([m.count() for m in matches])
for match in matches:
qid = match.item()
prior = match.count() * total_denom
if qid in qp_counts:
for pids, count in qp_counts[qid].items():
# Ignore low frequency (pid, qid) pairs.
if count < CategoryParseGenerator.MIN_PID_QID_COUNT:
continue
span = Span(begin, end, match.item(), prior, pids, count)
begin_to_spans[begin].append(span)
return begin_to_spans
# Takes all spans and constructs maximal parses from them. Each parse only
# contains non-overlapping spans.
#
# This is computed recursively from right to left.
# Base case: No parses can start at the end token.
# Case 1: If no parses start at 'begin' then return all parses
# starting at begin + 1.
# Case 2: If spans s_1, ..., s_k start at 'begin', then return
# \Union_i ({s_i} \union ParsesStartingAt(s_i.end))
def construct_parses(self, begin_to_spans):
end = len(begin_to_spans)
parses = {} # i -> parses starting at token i
parses[end] = [[]] # no parses can start after the last token
for begin in range(end - 1, -1, -1):
if len(begin_to_spans[begin]) == 0:
# No spans start at 'begin', so report parses starting at begin + 1.
parses[begin] = copy.copy(parses[begin + 1])
else:
parses[begin] = []
for span in begin_to_spans[begin]:
for parse in parses[span.end]:
parse = copy.copy(parse)
parse.append(span)
parses[begin].append(parse)
# Reverse the spans in each full parse. This will order them by tokens.
parses[0] = [list(reversed(p)) for p in parses[0]]
return parses[0]
# Returns a string representation of a parse.
def parse_to_str(self, parse):
output = []
for span in parse:
output.append(str(span.pids) + ':' + str(span.qid))
return ' '.join(output)
# Returns true if the given span should be dropped.
def skip_span(self, span):
if span.qid in self.base_qids:
return True
for pid in span.pids:
if pid in self.pids_to_ignore:
return True
return False
# Post-processes parses by dropping some spans.
def post_process(self, parses):
output = []
seen = set()
for parse in parses:
new_parse = []
for span in parse:
if not self.skip_span(span):
new_parse.append(span)
if len(new_parse) == 0:
continue
if len(new_parse) != len(parse):
# Dropping spans might lead to duplicate parses, so dedup them.
s = self.parse_to_str(new_parse)
if s not in seen:
output.append(new_parse)
seen.add(s)
else:
output.append(parse)
return output
# Returns category members that (a) are not categories themselves,
# (b) satisfy any category_contains property of the category.
def get_members(self, frame):
members = [m for m in frame(self.h_member) if not self.is_category(m)]
if self.h_category_contains not in frame:
# No other constraint to check.
return members
store = frame.store()
allowed = set([store.resolve(c) for c in frame(self.h_category_contains)])
output = []
for member in members:
valid = False
for value in member(self.h_instanceof):
# Fast-check for whether 'value' satisfies category_contains.
if value in allowed:
valid = True
break
# Slow-check for whether 'value' satisfies category_contains.
for a in allowed:
if self.extractor.in_closure(self.h_subclassof, a, value):
valid = True
break
if valid:
break
if valid:
output.append(member)
return output
# Runs the parse generation task.
def run(self, task):
self.init(task)
writer = sling.RecordWriter(task.output("output").name)
rejected = sling.RecordWriter(task.output("rejected").name)
inputs = [t.name for t in task.inputs("items")]
for filename in inputs:
reader = sling.RecordReader(filename)
for index, (key, value) in enumerate(reader):
store = sling.Store(self.kb)
frame = store.parse(value)
# Only process category items.
if not self.is_category(frame):
rejected.write(key, "not_category")
continue
# See if the category should be skipped.
members = self.get_members(frame)
reject, reason = self.reject(key, frame, members)
if reject:
task.increment("skipped_categories/" + reason)
rejected.write(key, reason)
continue
# First, collect the targets of all facts of all category members.
qp_counts = self.qid_pid_counts(store, members)
# Next, tokenize the category title.
title = self.get_title(frame)
colon = title.find(':')
title = title[colon + 1:]
document = sling.tokenize(title, store)
# Next, find matches for all spans. These are reported as a list,
# where ith item = spans that begin at token i (possibly an empty list).
begin_to_spans = self.compute_spans(document, qp_counts)
# Construct maximal parses with non-overlapping spans.
parses = self.construct_parses(begin_to_spans)
# Post-process parses.
parses = self.post_process(parses)
if len(parses) == 0 or len(parses) == 1 and len(parses[0]) == 0:
task.increment("skipped_categories/no_parses")
rejected.write(key, "no_parses")
continue
# Write parses as frames.
frame = store.frame({"name": title, "members": members})
frame["document"] = document.frame
for parse in parses:
span_array = store.array(len(parse))
for i, span in enumerate(parse):
span_array[i] = store.frame({
"begin": span.begin, "end": span.end, "qid": span.qid,
"prior": span.prior, "pids": list(span.pids),
"count": span.count
})
parse_frame = store.frame({"spans": span_array})
frame.append("parse", parse_frame)
writer.write(key, frame.data(binary=True))
task.increment("categories_accepted")
# Compute histogram over number of parses.
for b in self.num_parses_bins:
if len(parses) <= b:
task.increment("#parses <= %d" % b)
if self.num_parses_bins[-1] < len(parses):
task.increment("#parses > %d" % self.num_parses_bins[-1])
reader.close()
writer.close()
rejected.close()
register_task("category-parse-generator", CategoryParseGenerator)
|
{
"content_hash": "eef87869e421df9c6ba5eb3c7d62c4f2",
"timestamp": "",
"source": "github",
"line_count": 385,
"max_line_length": 80,
"avg_line_length": 35.76883116883117,
"alnum_prop": 0.6147701691961368,
"repo_name": "google/sling",
"id": "5ac23f8684f153595bb2b098dfd01b5c51b4af04",
"size": "14560",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sling/nlp/wikicat/generator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "765"
},
{
"name": "C++",
"bytes": "4793787"
},
{
"name": "CSS",
"bytes": "10049"
},
{
"name": "HTML",
"bytes": "37253"
},
{
"name": "JavaScript",
"bytes": "59134"
},
{
"name": "Python",
"bytes": "577781"
},
{
"name": "Shell",
"bytes": "10326"
},
{
"name": "Starlark",
"bytes": "50958"
}
],
"symlink_target": ""
}
|
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
import uuid
from .. import models
class BackupProtectableItemsOperations(object):
"""BackupProtectableItemsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Client Api Version. Constant value: "2016-12-01".
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2016-12-01"
self.config = config
def list(
self, vault_name, resource_group_name, filter=None, skip_token=None, custom_headers=None, raw=False, **operation_config):
"""Provides a pageable list of protectable objects within your
subscription according to the query filter and the pagination
parameters.
:param vault_name: The name of the recovery services vault.
:type vault_name: str
:param resource_group_name: The name of the resource group where the
recovery services vault is present.
:type resource_group_name: str
:param filter: OData filter options.
:type filter: str
:param skip_token: skipToken Filter.
:type skip_token: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`WorkloadProtectableItemResourcePaged
<azure.mgmt.recoveryservicesbackup.models.WorkloadProtectableItemResourcePaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/Subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupProtectableItems'
path_format_arguments = {
'vaultName': self._serialize.url("vault_name", vault_name, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if skip_token is not None:
query_parameters['$skipToken'] = self._serialize.query("skip_token", skip_token, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.WorkloadProtectableItemResourcePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.WorkloadProtectableItemResourcePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
|
{
"content_hash": "977c822e38c6da2a4ed5430effcb9938",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 169,
"avg_line_length": 45.17142857142857,
"alnum_prop": 0.635673624288425,
"repo_name": "SUSE/azure-sdk-for-python",
"id": "ea8ab20dbe21089c0cdd2c3331f15f545fdb7945",
"size": "5217",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/operations/backup_protectable_items_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9090161"
}
],
"symlink_target": ""
}
|
"""initial migration
Revision ID: 413244c14ee
Revises: None
Create Date: 2015-09-15 00:50:22.590027
"""
# revision identifiers, used by Alembic.
revision = '413244c14ee'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('articles',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.Unicode(length=255), nullable=True),
sa.Column('text', sa.UnicodeText(), nullable=True),
sa.Column('create_time', sa.DateTime(), nullable=True),
sa.Column('publish_time', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('articles')
### end Alembic commands ###
|
{
"content_hash": "81104bfe09d1e8e441cd90cc3ca85fb8",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 63,
"avg_line_length": 26.21212121212121,
"alnum_prop": 0.6739884393063584,
"repo_name": "JXNU-ACS/learn-blog",
"id": "70fa95010a24fe9a11fa6cc239592a0575be8014",
"size": "865",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "migrations/versions/413244c14ee_initial_migration.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "4323"
},
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "8421"
}
],
"symlink_target": ""
}
|
"""
Copyright Google Inc. 2019
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import apache_beam as beam
import numpy as np
import argparse, logging
from apache_beam.typehints import with_input_types
from apache_beam.typehints import with_output_types
def find_locations(wire):
positions = [(0,0)]
for nav in wire.split(','):
dir = nav[0]
if dir == 'R':
update = (1, 0)
elif dir == 'U':
update = (0, 1)
elif dir == 'L':
update = (-1, 0)
else:
update = (0, -1)
n = int(nav[1:])
for x in range(n):
lastpos = positions[-1]
newpos = (lastpos[0] + update[0],
lastpos[1] + update[1])
positions.append(newpos)
return positions[1:] # remove the 0,0
def find_intersection(kv):
row, d = kv
if d['wire1'] and d['wire2']:
wire1 = d['wire1'][0]
wire2 = d['wire2'][0]
for col in wire1:
if col in wire2:
yield (row, col)
def manhattan(rc):
row, col = rc
return abs(row) + abs(col)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Solutions to https://adventofcode.com/2019/ using Apache Beam')
parser.add_argument('--input', required=True, help='Specify input file')
parser.add_argument('--output', required=True, help='Specify output file')
options = parser.parse_args()
runner = 'DirectRunner' # run Beam on local machine, but write outputs to cloud
logging.basicConfig(level=getattr(logging, 'INFO', None))
#wires = ('R75,D30,R83,U83,L12,D49,R71,U7,L72', 'U62,R66,U55,R34,D71,R55,D58,R83')
#wires = ('R98,U47,R26,D63,R33,U87,L62,D20,R33,U53,R51', 'U98,R91,D20,R16,D67,R40,U7,R15,U6,R7')
wires = [line.rstrip() for line in open(options.input)]
print(wires)
opts = beam.pipeline.PipelineOptions(flags=[])
p = beam.Pipeline(runner, options=opts)
locations = {'wire1':
(p | 'create1' >> beam.Create(find_locations(wires[0]))
| 'group1' >> beam.GroupByKey()),
'wire2':
(p | 'create2' >> beam.Create(find_locations(wires[1]))
| 'group2' >> beam.GroupByKey())
}
(locations
| 'cogroup' >> beam.CoGroupByKey()
| 'intersect' >> beam.FlatMap(find_intersection)
| 'distance' >> beam.Map(manhattan)
| 'mindist' >> beam.CombineGlobally(beam.transforms.combiners.TopCombineFn(1, reverse=True))
| 'output' >> beam.io.textio.WriteToText(options.output)
)
job = p.run()
if runner == 'DirectRunner':
job.wait_until_finish()
|
{
"content_hash": "f913633b4e15d642ad141800316b907f",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 112,
"avg_line_length": 34.505494505494504,
"alnum_prop": 0.6111464968152867,
"repo_name": "GoogleCloudPlatform/training-data-analyst",
"id": "668abae91e5b7e2b7657a967c05204273adb0963",
"size": "3164",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "blogs/beamadvent/day3a.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "39536"
},
{
"name": "C#",
"bytes": "23445"
},
{
"name": "C++",
"bytes": "30926"
},
{
"name": "CSS",
"bytes": "53087"
},
{
"name": "Dockerfile",
"bytes": "90856"
},
{
"name": "Go",
"bytes": "93755"
},
{
"name": "HCL",
"bytes": "73891"
},
{
"name": "HTML",
"bytes": "2342167"
},
{
"name": "Java",
"bytes": "2441030"
},
{
"name": "JavaScript",
"bytes": "3957504"
},
{
"name": "Jinja",
"bytes": "257585"
},
{
"name": "Jsonnet",
"bytes": "5696"
},
{
"name": "Jupyter Notebook",
"bytes": "242016061"
},
{
"name": "Makefile",
"bytes": "12642"
},
{
"name": "PigLatin",
"bytes": "11558"
},
{
"name": "Pug",
"bytes": "457977"
},
{
"name": "Python",
"bytes": "18543833"
},
{
"name": "R",
"bytes": "68"
},
{
"name": "Scala",
"bytes": "27161"
},
{
"name": "Shell",
"bytes": "763259"
},
{
"name": "TypeScript",
"bytes": "66858"
}
],
"symlink_target": ""
}
|
from autosar.element import Element
class SwcImplementation(Element):
""" Swc implementation """
def __init__(self,name, behaviorRef, parent=None):
super().__init__(name, parent)
self.behaviorRef = behaviorRef
self.codeDescriptors = None
self.programmingLanguage = None
self.resourceConsumption = None
self.swVersion = None
self.useCodeGenerator = None
self.vendorId = None
class SwcImplementationCodeDescriptor(Element):
""" Swc implementation code """
def __init__(self,name, parent=None):
super().__init__(name, parent)
# Autosar 4
self.artifactDescriptors = None
# Autosar 3
self.type = None
class EngineeringObject(object):
""" EngineeringObject """
def __init__(self, parent=None):
self.parent = parent
self.shortLabel = None
self.category = None
self.revisionLabels = None
self.domain = None
class ResourceConsumption(Element):
""" Swc implementation ResourceConsumption """
def __init__(self,name, parent=None):
super().__init__(name, parent)
self.memorySections = None
class MemorySection(Element):
""" Swc implementation MemorySection """
def __init__(self,name, parent=None):
super().__init__(name, parent)
self.aligment = None
self.memClassSymbol = None
self.options = None
self.size = None
self.swAddrmethodRef = None
self.symbol = None
|
{
"content_hash": "d9e053dc4a5b49a5d5904870fd2c4804",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 54,
"avg_line_length": 31.857142857142858,
"alnum_prop": 0.6028187059577194,
"repo_name": "cogu/autosar",
"id": "80924c2afe3572a37f966abd0477a50f765c1748",
"size": "1561",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "autosar/_swc_implementation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "46"
},
{
"name": "Python",
"bytes": "1039000"
},
{
"name": "Shell",
"bytes": "445"
}
],
"symlink_target": ""
}
|
from tempest_lib.common.utils import data_utils
from tempest_lib import exceptions as lib_exc
from tempest.api.compute import base
from tempest import test
class HostsAdminNegativeTestJSON(base.BaseV2ComputeAdminTest):
"""
Tests hosts API using admin privileges.
"""
@classmethod
def setup_clients(cls):
super(HostsAdminNegativeTestJSON, cls).setup_clients()
cls.client = cls.os_adm.hosts_client
cls.non_admin_client = cls.os.hosts_client
def _get_host_name(self):
hosts = self.client.list_hosts()
self.assertTrue(len(hosts) >= 1)
hostname = hosts[0]['host_name']
return hostname
@test.attr(type=['negative', 'gate'])
@test.idempotent_id('dd032027-0210-4d9c-860e-69b1b8deed5f')
def test_list_hosts_with_non_admin_user(self):
self.assertRaises(lib_exc.Forbidden,
self.non_admin_client.list_hosts)
@test.attr(type=['negative', 'gate'])
@test.idempotent_id('e75b0a1a-041f-47a1-8b4a-b72a6ff36d3f')
def test_show_host_detail_with_nonexistent_hostname(self):
nonexitent_hostname = data_utils.rand_name('rand_hostname')
self.assertRaises(lib_exc.NotFound,
self.client.show_host_detail, nonexitent_hostname)
@test.attr(type=['negative', 'gate'])
@test.idempotent_id('19ebe09c-bfd4-4b7c-81a2-e2e0710f59cc')
def test_show_host_detail_with_non_admin_user(self):
hostname = self._get_host_name()
self.assertRaises(lib_exc.Forbidden,
self.non_admin_client.show_host_detail,
hostname)
@test.attr(type=['negative', 'gate'])
@test.idempotent_id('e40c72b1-0239-4ed6-ba21-81a184df1f7c')
def test_update_host_with_non_admin_user(self):
hostname = self._get_host_name()
self.assertRaises(lib_exc.Forbidden,
self.non_admin_client.update_host,
hostname,
status='enable',
maintenance_mode='enable')
@test.attr(type=['negative', 'gate'])
@test.idempotent_id('76e396fe-5418-4dd3-a186-5b301edc0721')
def test_update_host_with_extra_param(self):
# only 'status' and 'maintenance_mode' are the valid params.
hostname = self._get_host_name()
self.assertRaises(lib_exc.BadRequest,
self.client.update_host,
hostname,
status='enable',
maintenance_mode='enable',
param='XXX')
@test.attr(type=['negative', 'gate'])
@test.idempotent_id('fbe2bf3e-3246-4a95-a59f-94e4e298ec77')
def test_update_host_with_invalid_status(self):
# 'status' can only be 'enable' or 'disable'
hostname = self._get_host_name()
self.assertRaises(lib_exc.BadRequest,
self.client.update_host,
hostname,
status='invalid',
maintenance_mode='enable')
@test.attr(type=['negative', 'gate'])
@test.idempotent_id('ab1e230e-5e22-41a9-8699-82b9947915d4')
def test_update_host_with_invalid_maintenance_mode(self):
# 'maintenance_mode' can only be 'enable' or 'disable'
hostname = self._get_host_name()
self.assertRaises(lib_exc.BadRequest,
self.client.update_host,
hostname,
status='enable',
maintenance_mode='invalid')
@test.attr(type=['negative', 'gate'])
@test.idempotent_id('0cd85f75-6992-4a4a-b1bd-d11e37fd0eee')
def test_update_host_without_param(self):
# 'status' or 'maintenance_mode' needed for host update
hostname = self._get_host_name()
self.assertRaises(lib_exc.BadRequest,
self.client.update_host,
hostname)
@test.attr(type=['negative', 'gate'])
@test.idempotent_id('23c92146-2100-4d68-b2d6-c7ade970c9c1')
def test_update_nonexistent_host(self):
nonexitent_hostname = data_utils.rand_name('rand_hostname')
self.assertRaises(lib_exc.NotFound,
self.client.update_host,
nonexitent_hostname,
status='enable',
maintenance_mode='enable')
@test.attr(type=['negative', 'gate'])
@test.idempotent_id('0d981ac3-4320-4898-b674-82b61fbb60e4')
def test_startup_nonexistent_host(self):
nonexitent_hostname = data_utils.rand_name('rand_hostname')
self.assertRaises(lib_exc.NotFound,
self.client.startup_host,
nonexitent_hostname)
@test.attr(type=['negative', 'gate'])
@test.idempotent_id('9f4ebb7e-b2ae-4e5b-a38f-0fd1bb0ddfca')
def test_startup_host_with_non_admin_user(self):
hostname = self._get_host_name()
self.assertRaises(lib_exc.Forbidden,
self.non_admin_client.startup_host,
hostname)
@test.attr(type=['negative', 'gate'])
@test.idempotent_id('9e637444-29cf-4244-88c8-831ae82c31b6')
def test_shutdown_nonexistent_host(self):
nonexitent_hostname = data_utils.rand_name('rand_hostname')
self.assertRaises(lib_exc.NotFound,
self.client.shutdown_host,
nonexitent_hostname)
@test.attr(type=['negative', 'gate'])
@test.idempotent_id('a803529c-7e3f-4d3c-a7d6-8e1c203d27f6')
def test_shutdown_host_with_non_admin_user(self):
hostname = self._get_host_name()
self.assertRaises(lib_exc.Forbidden,
self.non_admin_client.shutdown_host,
hostname)
@test.attr(type=['negative', 'gate'])
@test.idempotent_id('f86bfd7b-0b13-4849-ae29-0322e83ee58b')
def test_reboot_nonexistent_host(self):
nonexitent_hostname = data_utils.rand_name('rand_hostname')
self.assertRaises(lib_exc.NotFound,
self.client.reboot_host,
nonexitent_hostname)
@test.attr(type=['negative', 'gate'])
@test.idempotent_id('02d79bb9-eb57-4612-abf6-2cb38897d2f8')
def test_reboot_host_with_non_admin_user(self):
hostname = self._get_host_name()
self.assertRaises(lib_exc.Forbidden,
self.non_admin_client.reboot_host,
hostname)
|
{
"content_hash": "1eae68df72df82bceb996e7bcfbacc54",
"timestamp": "",
"source": "github",
"line_count": 169,
"max_line_length": 76,
"avg_line_length": 38.93491124260355,
"alnum_prop": 0.5861702127659575,
"repo_name": "rzarzynski/tempest",
"id": "95be59efca784675cf68baccc31f09edd8ef73f9",
"size": "7201",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tempest/api/compute/admin/test_hosts_negative.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "695"
},
{
"name": "Python",
"bytes": "2888467"
},
{
"name": "Shell",
"bytes": "8560"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from decimal import Decimal
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('round', '0013_plot_batch'),
]
operations = [
migrations.AlterField(
model_name='round',
name='score',
field=models.DecimalField(decimal_places=2, default=Decimal('0'), max_digits=4),
),
]
|
{
"content_hash": "96aff138dadac678aa0d3b3e64e6573f",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 92,
"avg_line_length": 22.94736842105263,
"alnum_prop": 0.6123853211009175,
"repo_name": "adminq80/Interactive_estimation",
"id": "4759382af32499e3b8fd3617fe3ebc22abe8f173",
"size": "509",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "game/round/migrations/0014_auto_20161102_2154.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "7033"
},
{
"name": "HTML",
"bytes": "56962"
},
{
"name": "JavaScript",
"bytes": "21105"
},
{
"name": "Nginx",
"bytes": "2324"
},
{
"name": "Python",
"bytes": "274453"
},
{
"name": "Shell",
"bytes": "8085"
}
],
"symlink_target": ""
}
|
from pkg_resources import parse_version
from django.conf import settings
from django.core.exceptions import ValidationError
from django.db import models
from django.template.defaultfilters import slugify
from django.utils.translation import ugettext_lazy as _
from django.utils import six
from wagtail.core.models import Page, PageBase
from wagtail.admin.edit_handlers import FieldPanel, MultiFieldPanel
from wagtail.images.edit_handlers import ImageChooserPanel
from wagtail.snippets.models import register_snippet
from wagtail.search import index
from wagtail.core import __version__ as WAGTAIL_VERSION
from taggit.models import TaggedItemBase, Tag as TaggitTag
from modelcluster.fields import ParentalKey
from colorful.fields import RGBColorField
from .abstracts import EntryAbstract
from .utils import import_model, get_image_model_path
from .routes import BlogRoutes
from .managers import TagManager, CategoryManager, BlogManager
Entry = import_model(getattr(settings, 'PUPUT_ENTRY_MODEL', EntryAbstract))
class BlogPage(BlogRoutes, Page):
description = models.CharField(
verbose_name=_('Description'),
max_length=255,
blank=True,
help_text=_("The blog description that will appear under the title.")
)
header_image = models.ForeignKey(
get_image_model_path(),
verbose_name=_('Header image'),
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
main_color = RGBColorField(_('Blog Main Color'), default="#4D6AE0")
display_comments = models.BooleanField(default=False, verbose_name=_('Display comments'))
display_categories = models.BooleanField(default=True, verbose_name=_('Display categories'))
display_tags = models.BooleanField(default=True, verbose_name=_('Display tags'))
display_popular_entries = models.BooleanField(default=True, verbose_name=_('Display popular entries'))
display_last_entries = models.BooleanField(default=True, verbose_name=_('Display last entries'))
display_archive = models.BooleanField(default=True, verbose_name=_('Display archive'))
disqus_api_secret = models.TextField(blank=True)
disqus_shortname = models.CharField(max_length=128, blank=True)
num_entries_page = models.IntegerField(default=5, verbose_name=_('Entries per page'))
num_last_entries = models.IntegerField(default=3, verbose_name=_('Last entries limit'))
num_popular_entries = models.IntegerField(default=3, verbose_name=_('Popular entries limit'))
num_tags_entry_header = models.IntegerField(default=5, verbose_name=_('Tags limit entry header'))
short_feed_description = models.BooleanField(default=True, verbose_name=_('Use short description in feeds'))
extra = BlogManager()
content_panels = Page.content_panels + [
FieldPanel('description', classname="full"),
ImageChooserPanel('header_image'),
FieldPanel('main_color')
]
settings_panels = Page.settings_panels + [
MultiFieldPanel(
[
FieldPanel('display_categories'),
FieldPanel('display_tags'),
FieldPanel('display_popular_entries'),
FieldPanel('display_last_entries'),
FieldPanel('display_archive'),
],
heading=_("Widgets")
),
MultiFieldPanel(
[
FieldPanel('num_entries_page'),
FieldPanel('num_last_entries'),
FieldPanel('num_popular_entries'),
FieldPanel('num_tags_entry_header'),
],
heading=_("Parameters")
),
MultiFieldPanel(
[
FieldPanel('display_comments'),
FieldPanel('disqus_api_secret'),
FieldPanel('disqus_shortname'),
],
heading=_("Comments")
),
MultiFieldPanel(
[
FieldPanel('short_feed_description'),
],
heading=_("Feeds")
),
]
subpage_types = ['puput.EntryPage']
def get_entries(self):
return EntryPage.objects.descendant_of(self).live().order_by('-date').select_related('owner')
def get_context(self, request, *args, **kwargs):
context = super(BlogPage, self).get_context(request, *args, **kwargs)
context['entries'] = self.entries
context['blog_page'] = self
context['search_type'] = getattr(self, 'search_type', "")
context['search_term'] = getattr(self, 'search_term', "")
return context
@property
def last_url_part(self):
"""
Get the BlogPage url without the domain
"""
return self.get_url_parts()[-1]
class Meta:
verbose_name = _('Blog')
@register_snippet
class Category(models.Model):
name = models.CharField(max_length=80, unique=True, verbose_name=_('Category name'))
slug = models.SlugField(unique=True, max_length=80)
parent = models.ForeignKey(
'self',
blank=True,
null=True,
related_name="children",
verbose_name=_('Parent category'),
on_delete=models.SET_NULL
)
description = models.CharField(max_length=500, blank=True, verbose_name=_('Description'))
objects = CategoryManager()
def __str__(self):
return self.name
def clean(self):
if self.parent:
parent = self.parent
if self.parent == self:
raise ValidationError(_('Parent category cannot be self.'))
if parent.parent and parent.parent == self:
raise ValidationError(_('Cannot have circular Parents.'))
def save(self, *args, **kwargs):
if not self.slug:
self.slug = slugify(self.name)
return super(Category, self).save(*args, **kwargs)
class Meta:
ordering = ['name']
verbose_name = _("Category")
verbose_name_plural = _("Categories")
class CategoryEntryPage(models.Model):
category = models.ForeignKey(Category, related_name="+", verbose_name=_('Category'), on_delete=models.CASCADE)
page = ParentalKey('EntryPage', related_name='entry_categories')
panels = [
FieldPanel('category')
]
def __str__(self):
return str(self.category)
class TagEntryPage(TaggedItemBase):
content_object = ParentalKey('EntryPage', related_name='entry_tags')
@register_snippet
class Tag(TaggitTag):
objects = TagManager()
class Meta:
proxy = True
class EntryPageRelated(models.Model):
entrypage_from = ParentalKey('EntryPage', verbose_name=_("Entry"), related_name='related_entrypage_from')
entrypage_to = ParentalKey('EntryPage', verbose_name=_("Entry"), related_name='related_entrypage_to')
def __str__(self):
return str(self.entrypage_to)
def _add_owner_panel():
"""
Since Wagtail 1.11 EntryPage owner is set as `editable` it can be added to EntryPage
`settings_panels` if Puput is using a version greater or equal than 1.11.
See:
https://github.com/wagtail/wagtail/pull/3581
"""
if parse_version(WAGTAIL_VERSION) >= parse_version('1.11'):
return [FieldPanel('owner')]
return []
class EntryPage(six.with_metaclass(PageBase, Entry, Page)):
# Search
search_fields = Page.search_fields + [
index.SearchField('body'),
index.SearchField('excerpt'),
index.FilterField('page_ptr_id')
]
# Panels
content_panels = getattr(Entry, 'content_panels', [])
promote_panels = Page.promote_panels + getattr(Entry, 'promote_panels', [])
settings_panels = Page.settings_panels + [
FieldPanel('date')
] + _add_owner_panel() + getattr(Entry, 'settings_panels', [])
# Parent and child settings
parent_page_types = ['puput.BlogPage']
subpage_types = []
@property
def blog_page(self):
return self.get_parent().specific
@property
def related(self):
return [related.entrypage_to for related in self.related_entrypage_from.all()]
@property
def has_related(self):
return self.related_entrypage_from.count() > 0
def get_context(self, request, *args, **kwargs):
context = super(EntryPage, self).get_context(request, *args, **kwargs)
context['blog_page'] = self.blog_page
return context
class Meta:
verbose_name = _('Entry')
verbose_name_plural = _('Entries')
|
{
"content_hash": "f2aabf086d6487261f94584ee18e4d4e",
"timestamp": "",
"source": "github",
"line_count": 252,
"max_line_length": 114,
"avg_line_length": 33.55555555555556,
"alnum_prop": 0.6413197729422895,
"repo_name": "csalom/puput",
"id": "04c5db42bce50739f9a6339e1fecbe712ae09b34",
"size": "8456",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "puput/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11894"
},
{
"name": "HTML",
"bytes": "21461"
},
{
"name": "JavaScript",
"bytes": "4322"
},
{
"name": "Python",
"bytes": "51006"
}
],
"symlink_target": ""
}
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Click.place'
db.add_column('banner_rotator_click', 'place', self.gf('django.db.models.fields.related.ForeignKey')(default=None, related_name='clicks_list', null=True, to=orm['banner_rotator.Place']), keep_default=False)
def backwards(self, orm):
# Deleting field 'Click.place'
db.delete_column('banner_rotator_click', 'place_id')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'banner_rotator.banner': {
'Meta': {'object_name': 'Banner'},
'alt': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'campaign': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'banners'", 'null': 'True', 'blank': 'True', 'to': "orm['banner_rotator.Campaign']"}),
'clicks': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'finish_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'max_clicks': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'max_views': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'places': ('django.db.models.fields.related.ManyToManyField', [], {'db_index': 'True', 'related_name': "'banners'", 'symmetrical': 'False', 'to': "orm['banner_rotator.Place']"}),
'start_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'url_target': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '10'}),
'views': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'weight': ('django.db.models.fields.IntegerField', [], {})
},
'banner_rotator.campaign': {
'Meta': {'object_name': 'Campaign'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'banner_rotator.click': {
'Meta': {'object_name': 'Click'},
'banner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'clicks_list'", 'to': "orm['banner_rotator.Banner']"}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'place': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'clicks_list'", 'null': 'True', 'to': "orm['banner_rotator.Place']"}),
'referrer': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'banner_clicks'", 'null': 'True', 'to': "orm['auth.User']"}),
'user_agent': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'banner_rotator.place': {
'Meta': {'unique_together': "(('slug',),)", 'object_name': 'Place'},
'height': ('django.db.models.fields.SmallIntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'width': ('django.db.models.fields.SmallIntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['banner_rotator']
|
{
"content_hash": "64911be5f2e1f7917bed8b4e0dde1631",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 214,
"avg_line_length": 76.21698113207547,
"alnum_prop": 0.5511820769897264,
"repo_name": "martinogden/django-banner-rotator",
"id": "ecf5cc76a1cddefef596a5e8cd74777bb9dd3c04",
"size": "8097",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "banner_rotator/migrations/0010_auto__add_field_click_place.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "82895"
}
],
"symlink_target": ""
}
|
"""
Copyright 2017-present Airbnb, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from collections import OrderedDict
import json
from nose.tools import assert_equal
from streamalert.classifier.parsers import CSVParser
class TestCSVParser:
"""Test class for CSVParser"""
# pylint: disable=no-self-use,protected-access
@classmethod
def _default_schema(cls):
return OrderedDict([('host', 'string'), ('date', 'string'), ('message', 'string')])
def test_basic_parsing(self):
"""CSVParser - Basic CSV data, str"""
options = {
'schema': self._default_schema(),
'configuration': {
'delimiter': ','
}
}
data = 'test-01.stg.foo.net,01-01-2018,test message!!!!'
# get parsed data
parser = CSVParser(options)
result = parser.parse(data)
assert_equal(result, True)
expected_result = [
{
'date': '01-01-2018',
'host': 'test-01.stg.foo.net',
'message': 'test message!!!!'
}
]
assert_equal(parser.parsed_records, expected_result)
def test_basic_parsing_bytes(self):
"""CSVParser - Basic CSV data, bytes"""
options = {
'schema': self._default_schema(),
'configuration': {
'delimiter': ','
}
}
data = b'test-01.stg.foo.net,01-01-2018,test message!!!!'
# get parsed data
parser = CSVParser(options)
result = parser.parse(data)
assert_equal(result, True)
expected_result = [
{
'date': '01-01-2018',
'host': 'test-01.stg.foo.net',
'message': 'test message!!!!'
}
]
assert_equal(parser.parsed_records, expected_result)
def test_csv_parsing_space_delimited(self):
"""CSVParser - Space separated data"""
options = {
'schema': self._default_schema(),
'configuration': {
'delimiter': ' '
}
}
data = 'test-01.stg.foo.net 02-02-2018 "test message!!!!"'
# get parsed data
parser = CSVParser(options)
result = parser.parse(data)
assert_equal(result, True)
expected_result = [
{
'date': '02-02-2018',
'host': 'test-01.stg.foo.net',
'message': 'test message!!!!'
}
]
assert_equal(parser.parsed_records, expected_result)
def test_csv_parsing_alt_quoted(self):
"""CSVParser - Single Quoted Field"""
options = {
'schema': self._default_schema(),
'configuration': {
'quotechar': '\''
}
}
data = ('test-host,datetime-value,\'CREATE TABLE test ( id '
'INTEGER, type VARCHAR(64) NOT NULL)\'')
# get parsed data
parser = CSVParser(options)
result = parser.parse(data)
assert_equal(result, True)
expected_result = [
{
'host': 'test-host',
'date': 'datetime-value',
'message': 'CREATE TABLE test ( id INTEGER, type VARCHAR(64) NOT NULL)'
}
]
assert_equal(parser.parsed_records, expected_result)
def test_csv_parsing_from_json(self):
"""CSVParser - CSV within JSON"""
options = {
'schema': self._default_schema(),
'configuration': {
'envelope_keys': {
'env_key_01': 'string',
'env_key_02': 'string'
},
'json_path': 'logEvents[*].message'
}
}
data = json.dumps({
'env_key_01': 'DATA_MESSAGE',
'env_key_02': '123456789012',
'logEvents': [
{
'uuid': '0F08CD2B-F21D-4F3A-9231-B527AD42AB91',
'message': 'host-name,01-01-2018,contents'
},
{
'uuid': '0F08CD2B-F21D-4F3A-9231-B527AD42AB91',
'message': 'host-name-02,02-02-2018,contents-02'
}
]
})
# get parsed data
parser = CSVParser(options)
result = parser.parse(data)
assert_equal(result, True)
expected_result = [
{
'host': 'host-name',
'date': '01-01-2018',
'message': 'contents',
'streamalert:envelope_keys': {
'env_key_01': 'DATA_MESSAGE',
'env_key_02': '123456789012'
}
},
{
'host': 'host-name-02',
'date': '02-02-2018',
'message': 'contents-02',
'streamalert:envelope_keys': {
'env_key_01': 'DATA_MESSAGE',
'env_key_02': '123456789012'
}
}
]
assert_equal(parser.parsed_records, expected_result)
def test_nested_csv(self):
"""CSVParser - Nested CSV"""
options = {
'schema': OrderedDict([
('date', 'string'),
('time', 'integer'),
('host', 'string'),
('env', 'string'),
('message', OrderedDict([
('application', 'string'),
('role', 'string'),
('cluster_host', 'string'),
('cluster_size', 'string'),
('result', 'string')
]))
])
}
data = ('"Jan 10, 2017","1485739910","host1.prod.test","Corp",'
'"chef,web-server,1,100,fail"')
# get parsed data
parser = CSVParser(options)
result = parser.parse(data)
assert_equal(result, True)
expected_result = [
{
'date': 'Jan 10, 2017',
'time': 1485739910,
'host': 'host1.prod.test',
'env': 'Corp',
'message': {
'application': 'chef',
'role': 'web-server',
'cluster_host': '1',
'cluster_size': '100',
'result': 'fail'
}
}
]
assert_equal(parser.parsed_records, expected_result)
def test_nested_csv_invalid(self):
"""CSVParser - Nested CSV, Invalid"""
options = {
'schema': OrderedDict([
('date', 'string'),
('message', OrderedDict([
('application', 'string'),
('role', 'string')
]))
])
}
data = '"Jan 10, 2017","chef,web-server,1"'
# get parsed data
parser = CSVParser(options)
result = parser.parse(data)
assert_equal(result, False)
expected_result = [
[
'Jan 10, 2017',
'chef,web-server,1'
]
]
assert_equal(parser.invalid_parses, expected_result)
|
{
"content_hash": "9e4df775b1b7736cc3a3dcb87e823f14",
"timestamp": "",
"source": "github",
"line_count": 254,
"max_line_length": 91,
"avg_line_length": 30.401574803149607,
"alnum_prop": 0.4693084693084693,
"repo_name": "airbnb/streamalert",
"id": "9b14255a86b394fc3e0aacb891573a48ad8fac6a",
"size": "7722",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/streamalert/classifier/test_parsers_csv.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HCL",
"bytes": "142275"
},
{
"name": "Python",
"bytes": "2209853"
},
{
"name": "Shell",
"bytes": "2975"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.