text
stringlengths 4
1.02M
| meta
dict |
|---|---|
from bokeh.plotting import figure, output_file, show
from bokeh.models import ColumnDataSource, Circle, HoverTool
from bokeh.sampledata.glucose import data
output_file("hover_glyph.html")
x, y = data.ix['2010-10-06'].index.to_series(), data.ix['2010-10-06']['glucose']
# Basic plot setup
p = figure(width=600, height=300, x_axis_type="datetime", tools="", toolbar_location=None, title='Hover over points')
p.line(x, y, line_dash="4 4", line_width=1, color='gray')
# Add a circle, that is visible only when selected
# source = ColumnDataSource({'x': x, 'y': y})
# invisible_circle = Circle(x='x', y='y', fill_color='gray', fill_alpha=0.5, line_color="white", size=20)
# visible_circle = Circle(x='x', y='y', fill_color='firebrick', fill_alpha=0.9, line_color="blue", size=20)
# cr = p.add_glyph(source, invisible_circle, hover_glyph=visible_circle)
cr = p.circle(x, y, fill_color="grey", alpha=0.1, line_color=None, size=20,
hover_fill_color="firebrick", hover_alpha=0.5, hover_line_color="white")
p.add_tools(HoverTool(tooltips=None, renderers=[cr], mode='hline'))
show(p)
|
{
"content_hash": "3b5cbcacc0a3bc358cd76a0b01027900",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 117,
"avg_line_length": 45.166666666666664,
"alnum_prop": 0.7001845018450185,
"repo_name": "maxalbert/bokeh",
"id": "1954f147c7e9c9bd307e1bc91fb7df789ec0bda5",
"size": "1084",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/plotting/file/hover_glyph.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5455"
},
{
"name": "CSS",
"bytes": "413523"
},
{
"name": "CoffeeScript",
"bytes": "2166306"
},
{
"name": "HTML",
"bytes": "72855"
},
{
"name": "JavaScript",
"bytes": "7847"
},
{
"name": "Makefile",
"bytes": "5894"
},
{
"name": "Python",
"bytes": "1570619"
},
{
"name": "Shell",
"bytes": "18074"
}
],
"symlink_target": ""
}
|
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'HostMaintenance.hostname'
db.add_column(u'maintenance_hostmaintenance', 'hostname',
self.gf('django.db.models.fields.CharField')(
default=u'', max_length=255),
keep_default=False)
# Changing field 'HostMaintenance.host'
db.alter_column(u'maintenance_hostmaintenance', 'host_id', self.gf(
'django.db.models.fields.related.ForeignKey')(null=True, on_delete=models.SET_NULL, to=orm['physical.Host']))
def backwards(self, orm):
# Deleting field 'HostMaintenance.hostname'
db.delete_column(u'maintenance_hostmaintenance', 'hostname')
# Changing field 'HostMaintenance.host'
db.alter_column(u'maintenance_hostmaintenance', 'host_id', self.gf(
'django.db.models.fields.related.ForeignKey')(default=None, to=orm['physical.Host']))
models = {
u'maintenance.hostmaintenance': {
'Meta': {'unique_together': "((u'host', u'maintenance'),)", 'object_name': 'HostMaintenance', 'index_together': "[[u'host', u'maintenance']]"},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'finished_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'host': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'host_maintenance'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Host']"}),
'hostname': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'main_log': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'maintenance': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'maintenance'", 'to': u"orm['maintenance.Maintenance']"}),
'rollback_log': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'started_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '4'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'maintenance.maintenance': {
'Meta': {'object_name': 'Maintenance'},
'affected_hosts': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'celery_task_id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'finished_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'host_query': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'main_script': ('django.db.models.fields.TextField', [], {}),
'maximum_workers': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1'}),
'query_error': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'rollback_script': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'scheduled_for': ('django.db.models.fields.DateTimeField', [], {'unique': 'True'}),
'started_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.host': {
'Meta': {'object_name': 'Host'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'hostname': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'monitor_url': ('django.db.models.fields.URLField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
}
}
complete_apps = ['maintenance']
|
{
"content_hash": "cf3db3e70fd8190fecfd6e8a293c5e8e",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 191,
"avg_line_length": 68.65277777777777,
"alnum_prop": 0.5737406433340076,
"repo_name": "globocom/database-as-a-service",
"id": "ec237010e3a587722a15500dc4b2bc1021e65a44",
"size": "4967",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dbaas/maintenance/migrations/0012_auto__add_field_hostmaintenance_hostname__chg_field_hostmaintenance_ho.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "243568"
},
{
"name": "Dockerfile",
"bytes": "1372"
},
{
"name": "HTML",
"bytes": "310401"
},
{
"name": "JavaScript",
"bytes": "988830"
},
{
"name": "Makefile",
"bytes": "5199"
},
{
"name": "Python",
"bytes": "9674426"
},
{
"name": "Shell",
"bytes": "215115"
}
],
"symlink_target": ""
}
|
import os
import subprocess
import re
import getopt
import sys
import arch_handler as ah
from my_regex import *
#==================================================================
# Global functions
def runCommand(command, arguments):
ret = subprocess.check_output([command] + arguments)
return ret
def makeWotan(wotanPath):
print("Making Wotan...")
os.chdir(wotanPath)
ret = runCommand("make", [])
return ret
def makeVPR(vprPath):
print("Making VPR...")
os.chdir(vprPath)
ret = runCommand("make", [])
return ret
# Copied from .../wotan/python/wotan_tester.py
def get_arch_to_path(arch_point):
assert isinstance(arch_point, Arch_Point_Info)
arch_path = ''
sb_pattern = arch_point.switchblock_pattern
wire_topology = arch_point.wire_topology
wirelengths = {}
wirelengths['semi-global'] = arch_point.s_wirelength
if arch_point.g_wirelength != None:
wirelengths['global'] = arch_point.g_wirelength
global_via_repeat = 4
fc_in = arch_point.fcin
fc_out = arch_point.fcout
lut_size = str(arch_point.lut_size) + 'LUT'
arch_path = ah.get_path_to_arch(sb_pattern, wire_topology, wirelengths, global_via_repeat, \
fc_in, fc_out, lut_size)
return arch_path
#==================================================================
# Class copied from .../wotan/python/wotan_tester.py
class Arch_Point_Info:
def __init__(self, lut_size, # Size of the LUT (i.e. K)
s_wirelength, # Semi-global wirelength
g_wirelength, # Global-layer wirelength; Specify None if not used
switchblock_pattern, # wilton/universal/subset
wire_topology, # 'single-wirelength', 'on-cb-off-cb', 'on-cb-off-sb',
# 'on-cb-off-cbsb', 'on-cbsb-off-cbsb', 'on-sb-off-sb'
fcin, # cb input flexibility
fcout, # cb output flexibility
arch_string = None): # Optional string that describes this architecture
if lut_size not in [4, 6]:
raise BaseException, 'Unexpected LUT size: %d' % (lut_size)
if switchblock_pattern not in ['wilton', 'universal', 'subset']:
raise BaseException, 'Unexpected switch block pattern: %s' % (switchblock_pattern)
if wire_topology not in ['single-wirelength', 'on-cb-off-cb', 'on-cb-off-sb', \
'on-cb-off-cbsb', 'on-cbsb-off-cbsb', 'on-sb-off-sb']:
raise BaseException, 'Unexpected wire topology: %s' % (wire_topology)
self.lut_size = lut_size
self.s_wirelength = s_wirelength
self.g_wirelength = g_wirelength
self.switchblock_pattern = switchblock_pattern
self.wire_topology = wire_topology
self.fcin = fcin
self.fcout = fcout
self.arch_string = arch_string
# Overload constructor -- initialize based on a string. Expecting string to be in
# the format of this class' 'as_str' function.
@classmethod
def from_str(cls, s):
regex_list = {
's_wirelength' : '.*_s(\d+)_.*',
'g_wirelength' : '.*_g(\d+)_.*',
'K' : '.*k(\d)_.*',
'wire_topology' : '.*_topology-([-\w]+)_.*',
'fcin' : '.*fcin(\d+\.*\d*)',
'fcout' : '.*fcout(\d+\.*\d*)',
}
# Get wirelength, fcin, fcout
tmp_dict = {}
for key in regex_list:
try:
tmp_dict[key] = regex_last_token(s, regex_list[key])
except RegexException as exception:
if key == 'g_wirelength':
# OK if global wirelength wasn't specified
tmp_dict[key] = None
continue
else:
raise
s_wirelength = int(tmp_dict['s_wirelength'])
g_wirelength = tmp_dict['g_wirelength']
if g_wirelength != None:
g_wirelength = int(g_wirelength)
lut_size = int(tmp_dict['K'])
wire_topology = tmp_dict['wire_topology']
fcin = float(tmp_dict['fcin'])
fcout = float(tmp_dict['fcout'])
# Get switchblock
switchblock = None
if 'subset' in s:
switchblock = 'subset'
elif 'universal' in s:
switchblock = 'universal'
elif 'wilton' in s:
switchblock = 'wilton'
else:
print('could not find a switchblock specification in string:\n\t' + s)
sys.exit()
return cls(lut_size, s_wirelength, g_wirelength, switchblock, wire_topology, fcin, fcout, s)
# Returns a string describing an object of this class
def as_str(self):
return self.arch_string
def __str__(self):
return self.arch_string
def __repr__(self):
return self.arch_string
#==================================================================
# Class for running architecture through Wotan
class Wotan:
def __init__(self, archPath, vtrPath, vprPath, wotanPath, wotanOpts, lut_size):
self.archPath = archPath
self.vtrPath = vtrPath
self.vprPath = vprPath
self.wotanPath = wotanPath
self.wotanOpts = wotanOpts
self.lut_size = lut_size
def runWotan(self):
benchmark = 'vtr_benchmarks_blif/sha.blif'
if self.lut_size == 4:
benchmark = '4LUT_DSP_vtr_benchmarks_blif/sha.pre-vpr.blif'
vprOpts = self.archPath + ' ' + self.vtrPath + '/vtr_flow/benchmarks/' + benchmark + \
' -dump_rr_structs_file ./dumped_rr_structs.txt ' + \
'-pack -place -route_chan_width ' + str(chanWidth)
# Run VPR to get RRG
ret = self._runVPRGetRRG(vprOpts)
assert ret
# Run Wotan to get routability metric
ret = self._runWotan()
assert ret
def _runVPRGetRRG(self, vprOpts):
print("Running VPR to get RRG...")
os.chdir(self.vprPath)
argsList = vprOpts.split()
output = runCommand("./vpr", argsList)
return output
def _runWotan(self):
print("Running Wotan to get routability metric...")
os.chdir(self.wotanPath)
argsList = self.wotanOpts.split()
output = runCommand("./wotan", argsList)
return output
#==================================================================
# Generates the custom architecture file
class GenerateArch:
def __init__(self, arch_str):
self.arch_str = arch_str
def getArch(self):
#arch_str = 'k4_s1_subset_topology-single-wirelength_fcin0.3_fcout0.4'
arch = Arch_Point_Info.from_str(self.arch_str)
return arch
def getCustomArch(self, archPoint):
# Returns the path to the architecture path
assert isinstance(archPoint, Arch_Point_Info)
archPath = get_arch_to_path(archPoint)
print "Arch File Path: ", archPath
#==================================================================
# Main function
def main(arch_str):
base_path = "/nfs/ug/homes-4/k/kongnath/code"
vtrPath = base_path + "/vtr"
vprPath = vtrPath + "/vpr"
wotan_path = base_path + "/wotan"
arch_dir = wotan_path + "/arch"
ga = GenerateArch(arch_str)
arch = ga.getArch()
ga.getCustomArch(arch)
if __name__ == "__main__":
try:
opts, args = getopt.getopt(sys.argv[1:], "a:")
except getopt.GetOptError as err:
print str(err)
sys.exit(2)
arch = ""
for o, a in opts:
if o == '-a':
arch = a
else:
sys.exit(2)
# arch = 'k4_s1_subset_topology-single-wirelength_fcin0.3_fcout0.2'
# arch = 'k4_s1_subset_topology-single-wirelength_fcin0.2_fcout0.1'
if not arch:
print "Need arch name."
sys.exit(2)
print arch
main(arch)
|
{
"content_hash": "c4d7875d5b41ea43245f2275a2be6a6f",
"timestamp": "",
"source": "github",
"line_count": 239,
"max_line_length": 94,
"avg_line_length": 28.497907949790793,
"alnum_prop": 0.6366172368227867,
"repo_name": "wotan-fpga/wotan",
"id": "e94159d6f7de70e960fae34a35165b63d3e5c4c3",
"size": "6865",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/generate_arch.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "4770"
},
{
"name": "C++",
"bytes": "551250"
},
{
"name": "Makefile",
"bytes": "4749"
},
{
"name": "Python",
"bytes": "124266"
},
{
"name": "Shell",
"bytes": "1069"
}
],
"symlink_target": ""
}
|
from django import forms
from cyder.base.eav.forms import get_eav_form
from cyder.cydhcp.workgroup.models import Workgroup, WorkgroupAV
class WorkgroupForm(forms.ModelForm):
class Meta:
model = Workgroup
WorkgroupAVForm = get_eav_form(WorkgroupAV, Workgroup)
|
{
"content_hash": "fc4a2cfaf49b65a2fe232558018045cc",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 64,
"avg_line_length": 21.307692307692307,
"alnum_prop": 0.7725631768953068,
"repo_name": "murrown/cyder",
"id": "a9b9a871d28a4764dfb7693ea647326d7012450b",
"size": "277",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "cyder/cydhcp/workgroup/forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "128021"
},
{
"name": "CoffeeScript",
"bytes": "4769"
},
{
"name": "HTML",
"bytes": "70622"
},
{
"name": "JavaScript",
"bytes": "293466"
},
{
"name": "Makefile",
"bytes": "11293"
},
{
"name": "Puppet",
"bytes": "6422"
},
{
"name": "Python",
"bytes": "2452428"
},
{
"name": "Shell",
"bytes": "16784"
}
],
"symlink_target": ""
}
|
"""Loads datasets, dashboards and slices in a new superset instance"""
import json
import os
import zlib
from io import BytesIO
from typing import Any, Dict, List, Set
from urllib import request
from superset import app, db
from superset.connectors.connector_registry import ConnectorRegistry
from superset.models.slice import Slice
BASE_URL = "https://github.com/apache-superset/examples-data/blob/master/"
misc_dash_slices: Set[str] = set() # slices assembled in a 'Misc Chart' dashboard
def get_table_connector_registry() -> Any:
return ConnectorRegistry.sources["table"]
def get_examples_folder() -> str:
return os.path.join(app.config["BASE_DIR"], "examples")
def update_slice_ids(layout_dict: Dict[Any, Any], slices: List[Slice]) -> None:
charts = [
component
for component in layout_dict.values()
if isinstance(component, dict) and component["type"] == "CHART"
]
sorted_charts = sorted(charts, key=lambda k: k["meta"]["chartId"])
for i, chart_component in enumerate(sorted_charts):
if i < len(slices):
chart_component["meta"]["chartId"] = int(slices[i].id)
chart_component["meta"]["uuid"] = str(slices[i].uuid)
def merge_slice(slc: Slice) -> None:
o = db.session.query(Slice).filter_by(slice_name=slc.slice_name).first()
if o:
db.session.delete(o)
db.session.add(slc)
db.session.commit()
def get_slice_json(defaults: Dict[Any, Any], **kwargs: Any) -> str:
defaults_copy = defaults.copy()
defaults_copy.update(kwargs)
return json.dumps(defaults_copy, indent=4, sort_keys=True)
def get_example_data(
filepath: str, is_gzip: bool = True, make_bytes: bool = False
) -> BytesIO:
content = request.urlopen( # pylint: disable=consider-using-with
f"{BASE_URL}{filepath}?raw=true"
).read()
if is_gzip:
content = zlib.decompress(content, zlib.MAX_WBITS | 16)
if make_bytes:
content = BytesIO(content)
return content
|
{
"content_hash": "1ed868e0ce0ac573b764ee12f906df64",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 82,
"avg_line_length": 31.650793650793652,
"alnum_prop": 0.6740220661985958,
"repo_name": "apache/incubator-superset",
"id": "d8b2c59fb777d7e462a70e1d0d8f0643a676819a",
"size": "2779",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "superset/examples/helpers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "156654"
},
{
"name": "Dockerfile",
"bytes": "4402"
},
{
"name": "HTML",
"bytes": "125147"
},
{
"name": "JavaScript",
"bytes": "2357868"
},
{
"name": "Mako",
"bytes": "1197"
},
{
"name": "Python",
"bytes": "2295567"
},
{
"name": "Shell",
"bytes": "29230"
},
{
"name": "Smarty",
"bytes": "1826"
},
{
"name": "TypeScript",
"bytes": "94013"
}
],
"symlink_target": ""
}
|
'''
Script for building and uploading a STM8 project with dependency auto-detection
'''
# set general options
UPLOAD = 'BSL' # select 'BSL' or 'SWIM'
TERMINAL = False # set True to open terminal after upload
RESET = 1 # STM8 reset: 0=skip, 1=manual, 2=DTR line (RS232), 3=send 'Re5eT!' @ 115.2kBaud, 4=Arduino pin 8, 5=Raspi pin 12
OPTIONS = '' # e.g. device for SPL ('-DSTM8S105', see stm8s.h)
# set path to root of STM8 templates
ROOT_DIR = '../../../'
LIB_ROOT = ROOT_DIR + 'Library/'
TOOL_DIR = ROOT_DIR + 'Tools/'
OBJDIR = 'output'
TARGET = 'main.ihx'
# set OS specific
import platform
if platform.system() == 'Windows':
PORT = 'COM10'
SWIM_PATH = 'C:/Programme/STMicroelectronics/st_toolset/stvp/'
SWIM_TOOL = 'ST-LINK'
SWIM_NAME = 'STM8S105x6' # STM8 Discovery
#SWIM_NAME = 'STM8S208xB' # muBoard
MAKE_TOOL = 'mingw32-make.exe'
else:
PORT = '/dev/ttyUSB0'
SWIM_TOOL = 'stlink'
SWIM_NAME = 'stm8s105c6' # STM8 Discovery
#SWIM_NAME = 'stm8s208?b' # muBoard
MAKE_TOOL = 'make'
# import required modules
import sys
import os
import platform
import argparse
sys.path.insert(0,TOOL_DIR) # assert that TOOL_DIR is searched first
import misc
from buildProject import createMakefile, buildProject
from uploadHex import stm8gal, stm8flash, STVP
##################
# main program
##################
# commandline parameters with defaults
parser = argparse.ArgumentParser(description="compile and upload STM8 project")
parser.add_argument("--skipmakefile", default=False, action="store_true" , help="skip creating Makefile")
parser.add_argument("--skipbuild", default=False, action="store_true" , help="skip building project")
parser.add_argument("--skipupload", default=False, action="store_true" , help="skip uploading hexfile")
parser.add_argument("--skipterminal", default=False, action="store_true" , help="skip opening terminal")
parser.add_argument("--skippause", default=False, action="store_true" , help="skip pause before exit")
args = parser.parse_args()
# create Makefile
if args.skipmakefile == False:
createMakefile(workdir='.', libroot=LIB_ROOT, outdir=OBJDIR, target=TARGET, options=OPTIONS)
# build target
if args.skipbuild == False:
buildProject(workdir='.', make=MAKE_TOOL)
# upload code via UART bootloader
if args.skipupload == False:
if UPLOAD == 'BSL':
stm8gal(tooldir=TOOL_DIR, port=PORT, outdir=OBJDIR, target=TARGET, reset=RESET)
# upload code via SWIM. Use stm8flash on Linux, STVP on Windows (due to libusb issues)
if UPLOAD == 'SWIM':
if platform.system() == 'Windows':
STVP(tooldir=SWIM_PATH, device=SWIM_NAME, hardware=SWIM_TOOL, outdir=OBJDIR, target=TARGET)
else:
stm8flash(tooldir=TOOL_DIR, device=SWIM_NAME, hardware=SWIM_TOOL, outdir=OBJDIR, target=TARGET)
# if specified open serial console after upload
if args.skipterminal == False:
if TERMINAL == True:
cmd = 'python '+TOOL_DIR+'terminal.py -p '+PORT
exitcode = os.system(cmd)
if (exitcode != 0):
sys.stderr.write('error '+str(exitcode)+'\n\n')
misc.Exit(exitcode)
# wait for return, then close window
if args.skippause == False:
if (sys.version_info.major == 3):
input("\npress return to exit ... ")
else:
raw_input("\npress return to exit ... ")
sys.stdout.write('\n\n')
# END OF MODULE
|
{
"content_hash": "6706e17f5a396116031373a8ded4c72b",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 137,
"avg_line_length": 34.683673469387756,
"alnum_prop": 0.6743159752868491,
"repo_name": "gicking/STM8_templates",
"id": "85485c77770ec2fd16cc435b26a76d30c287b74d",
"size": "3418",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Projects/General_Examples/Basic_Project/build_upload.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "2793475"
},
{
"name": "C++",
"bytes": "107128"
},
{
"name": "CSS",
"bytes": "7094"
},
{
"name": "HTML",
"bytes": "285869"
},
{
"name": "Python",
"bytes": "317044"
},
{
"name": "Shell",
"bytes": "161"
}
],
"symlink_target": ""
}
|
XXXXXXXXX XXX XXXXXXXXXX XXXXXXXX X XXXXXXXXX XXXXXXXX XXX XXXXXX XXXXXXXXX
XXX XXXXXXXXXX XXX XXXXXXXXXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXX XX XXXXXXXX X
XXXXX XXXXXX XX XXXXXXXX X
XXXXX XXXXXX XX XXXXXXXX XX
XXXXX XXXXXX XX XXXXXXXX X
XXXXX XXXXXX XX XXXXXXXX X
XXXXX XXXXXX XX XXXXXXXX X
XXXXX XXXXXX XX XXXXXXXX X
XXXXX XXXXXX XX XXXXXXXX X
XXXXX XXXXXX XX XXXXXXXX X
XXXXX XXXXXX XX XXXXXXXX XX
XXXXX XXXXXX XX XXXXXXXX X
XXXXX XXXXXX XX XXXXXXXX X
XXXXX XXXXXX X XXXXXXXX XX
XXXXX XXXXXX XX XXXXXXXX X
XXXXX XXXXXX XX XXXXXXXX X
XXXXX XXXXXX XX XXXXXXXX X
XXXXXXXX XXXXXX XX XXXXXXXX X
XXXXX XXXXXX X XXXXXXXX XX
XXXXX XXXXXX XX XXXXXXXX X
XXXXX XXXXXX XX XXXXXXXX X
XXXXX XXXXXX X XXXXXXXX XX
XXXXXXXX XXXXXX XX XXXXXXXX X
|
{
"content_hash": "53a5f5c119d65c4318687ce1809443c0",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 75,
"avg_line_length": 33.76,
"alnum_prop": 0.726303317535545,
"repo_name": "dnaextrim/django_adminlte_x",
"id": "b5b1ab42d7c058e3ac4de5d5bac7f87cbe1f4c7f",
"size": "844",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "adminlte/static/plugins/ckeditor/plugins/a11yhelp/dialogs/lang/_translationstatus.txt.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "487538"
},
{
"name": "HTML",
"bytes": "1939871"
},
{
"name": "JavaScript",
"bytes": "2949324"
},
{
"name": "PHP",
"bytes": "3841"
},
{
"name": "Python",
"bytes": "11030"
}
],
"symlink_target": ""
}
|
"""Tests the graph quantization script.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.contrib.quantization.tools import quantize_graph
from tensorflow.python.framework import graph_util
flags = tf.app.flags
FLAGS = flags.FLAGS
def run_graph_def(graph_def, input_map, outputs):
graph = tf.Graph()
with graph.as_default():
tf.import_graph_def(graph_def, input_map={}, name="")
with tf.Session(graph=graph) as sess:
results = sess.run(outputs, feed_dict=input_map)
return results
def test_mat_mul(m, n, k, a, b):
"""Tests a MatMul replacement."""
a_constant_name = "a_constant"
b_constant_name = "b_constant"
mat_mul_name = "mat_mul"
float_graph_def = tf.GraphDef()
a_constant = quantize_graph.create_constant_node(a_constant_name,
value=a,
dtype=tf.float32,
shape=[m, k])
float_graph_def.node.extend([a_constant])
b_constant = quantize_graph.create_constant_node(b_constant_name,
value=b,
dtype=tf.float32,
shape=[k, n])
float_graph_def.node.extend([b_constant])
mat_mul_node = quantize_graph.create_node("MatMul", mat_mul_name,
[a_constant_name, b_constant_name])
quantize_graph.set_attr_dtype(mat_mul_node, "T", tf.float32)
quantize_graph.set_attr_bool(mat_mul_node, "transpose_a", False)
quantize_graph.set_attr_bool(mat_mul_node, "transpose_b", False)
float_graph_def.node.extend([mat_mul_node])
test_graph(float_graph_def, {}, [mat_mul_name])
def test_conv(depth, image_width, image_height, image_batch_count, filter_size,
filter_count, stride, padding, input_values, filter_values):
"""Tests a Conv replacement."""
input_constant_name = "input_constant"
filter_constant_name = "filter_constant"
conv_name = "conv"
float_graph_def = tf.GraphDef()
input_constant = quantize_graph.create_constant_node(
input_constant_name,
value=input_values,
dtype=tf.float32,
shape=[
image_batch_count, image_height, image_width, depth
])
float_graph_def.node.extend([input_constant])
filter_constant = quantize_graph.create_constant_node(
filter_constant_name,
value=filter_values,
dtype=tf.float32,
shape=[
filter_size, filter_size, depth, filter_count
])
float_graph_def.node.extend([filter_constant])
conv_node = quantize_graph.create_node("Conv2D", conv_name,
[input_constant_name,
filter_constant_name])
quantize_graph.set_attr_dtype(conv_node, "T", tf.float32)
quantize_graph.set_attr_int_list(conv_node, "strides", [1, stride, stride, 1])
quantize_graph.set_attr_string(conv_node, "padding", padding)
float_graph_def.node.extend([conv_node])
test_graph(float_graph_def, {}, [conv_name])
def are_tensors_near(a, b, tolerance):
"""Tests whether two tensors are nearly identical.
This is a specialized comparison function designed to help debug problems with
quantization. It prints out information about the differences between tensors
on failure, paying special attention to possible biases by looking at the mean
and absolute average errors.
Args:
a: First comparison tensor.
b: Second comparison tensor.
tolerance: Float value indicating how large an error between values is ok.
Returns:
Boolean indicating whether the two inputs were close enough.
"""
flat_a = a.flatten()
flat_b = b.flatten()
if len(flat_a) != len(flat_b):
print("Tensors are different sizes: " + str(len(flat_a)) + " vs " +
str(len(flat_b)))
return False
value_count = len(flat_a)
how_many_different = 0
total_difference = 0
total_abs_difference = 0
for index in range(value_count):
a_value = flat_a[index]
b_value = flat_b[index]
difference = a_value - b_value
total_difference += difference
total_abs_difference += abs(difference)
if abs(difference) > tolerance:
how_many_different += 1
mean_difference = total_difference / value_count
mean_abs_difference = total_abs_difference / value_count
proportion_different = (how_many_different * 1.0) / value_count
if how_many_different == 0:
return True
else:
print("Tensors have {0} different values ({1}%), with mean difference"
" {2} and mean absolute difference {3}".format(
how_many_different, proportion_different * 100, mean_difference,
mean_abs_difference))
return False
def get_top_value(input_values):
max_value = None
max_index = None
for index, value in enumerate(input_values.flatten()):
if max_value is None or value > max:
max_value = value
max_index = index
return max_index, max_value
def test_graph(float_graph_def, input_map, output_names):
"""Runs the float graph through the rewriter and tests the results."""
float_results = run_graph_def(float_graph_def, input_map,
[output_name + ":0"
for output_name in output_names])
# TODO(petewarden): round test is currently failing because there is no
# RoundToSteps op available.
# round_rewriter = quantize_graph.GraphRewriter(float_graph_def, "round")
# round_graph_def = round_rewriter.rewrite(output_name)
# round_results = run_graph_def(round_graph_def, input_map,
# [output_name + ":0"])
# assert are_tensors_near(expected, round_results[0], 1.0)
#
# TODO(petewarden): Add test for "quantize" mode.
eightbit_rewriter = quantize_graph.GraphRewriter(float_graph_def, "eightbit")
eightbit_graph_def = eightbit_rewriter.rewrite(output_names)
eightbit_results = run_graph_def(eightbit_graph_def, input_map,
[output_name + ":0"
for output_name in output_names])
for expected, result in zip(float_results, eightbit_results):
assert are_tensors_near(expected, result, 1.0)
# Test the weights_rounded mode. This uses the default bit_depth.
weights_rounded_rewriter = quantize_graph.GraphRewriter(
float_graph_def, "weights_rounded")
weights_rounded_graph_def = weights_rounded_rewriter.rewrite(output_names)
weights_rounded_results = run_graph_def(weights_rounded_graph_def, input_map,
[output_name + ":0"
for output_name in output_names])
for expected, result in zip(float_results, weights_rounded_results):
assert are_tensors_near(expected, result, 1.0)
class QuantizeGraphTest(tf.test.TestCase):
def test_negative_const_problem(self):
shape_constant_name = "shape_constant"
shape_constant = quantize_graph.create_constant_node(
shape_constant_name, value=-0.8, dtype=tf.float32, shape=[1])
quantization_result = quantize_graph.quantize_weight_eightbit(
shape_constant, b"MIN_COMBINED")
self.assertEqual(4, len(quantization_result))
def test_odd_padding_problem(self):
"""Tests one error case we ran into in a real graph."""
test_conv(1, 4, 4, 1, 3, 1, 2, b"SAME",
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
[1, 2, 3, 4, 5, 6, 7, 8, 9])
def test_mat_mul_tiny(self):
# These tests are added to test the generate case where
# min(matrix) == max(matrix), which used to cause problems.
test_mat_mul(1, 1, 1, [2], [3])
test_mat_mul(1, 2, 1, [1], [2, 3])
test_mat_mul(1, 1, 2, [1, 1], [1, 1])
test_mat_mul(1, 1, 2, [0, 0], [1, 1])
# The general case.
test_mat_mul(1, 1, 2, [1, 2], [1, 2])
def test_mat_mul_small(self):
test_mat_mul(2, 4, 3, [1, 2, 3, 4, 5, 6],
[7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18])
def test_conv(self):
test_conv(1, 4, 3, 1, 3, 1, 1, b"SAME",
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
[1, 4, 7, 2, 5, 8, 3, 6, 9])
def test_quantize_array(self):
# Test invalid parameters (empty array, or 0 buckets.
self.assertRaises(ValueError, quantize_graph.quantize_array,
np.array([]), 2)
self.assertRaises(ValueError, quantize_graph.quantize_array,
np.array([1, 2]), 0)
# Test input array of length 1.
arr = np.array([1])
qarr = quantize_graph.quantize_array(arr, 1)
self.assertEqual(arr, qarr)
qarr = quantize_graph.quantize_array(arr, 2)
self.assertEqual(arr, qarr)
# Test input array with all elements equal.
arr = np.array([1, 1, 1])
qarr = quantize_graph.quantize_array(arr, 10)
self.assertTrue((np.array([1, 1, 1]) == qarr).all())
# Test "normal" input arrays.
arr = np.array([0, 0.3, 0.6, 1])
qarr = quantize_graph.quantize_array(arr, 1)
self.assertTrue((np.array([0.5, 0.5, 0.5, 0.5]) == qarr).all())
qarr = quantize_graph.quantize_array(arr, 2)
self.assertTrue((np.array([0.25, 0.25, 0.75, 0.75]) == qarr).all())
qarr = quantize_graph.quantize_array(arr.reshape((2, 2)), 2)
self.assertTrue((np.array([[0.25, 0.25], [0.75, 0.75]]) == qarr).all())
def test_concat(self):
shape_constant_name = "shape_constant"
a_constant_name = "a_constant"
b_constant_name = "b_constant"
concat_name = "concat"
float_graph_def = tf.GraphDef()
shape_constant = quantize_graph.create_constant_node(shape_constant_name,
value=0,
dtype=tf.int32,
shape=[])
float_graph_def.node.extend([shape_constant])
a_constant = quantize_graph.create_constant_node(a_constant_name,
value=[1, 2, 3, 4, 5, 6, 7,
8, 9, 10, 11, 12],
dtype=tf.float32,
shape=[2, 2, 3])
float_graph_def.node.extend([a_constant])
b_constant = quantize_graph.create_constant_node(b_constant_name,
value=[13, 14, 15, 16, 17,
18, 19, 20, 21, 22,
23, 24],
dtype=tf.float32,
shape=[2, 2, 3])
float_graph_def.node.extend([b_constant])
concat_node = quantize_graph.create_node("Concat", concat_name,
[shape_constant_name,
a_constant_name, b_constant_name])
quantize_graph.set_attr_int(concat_node, "N", 2)
quantize_graph.set_attr_dtype(concat_node, "T", tf.float32)
float_graph_def.node.extend([concat_node])
test_graph(float_graph_def, {}, [concat_name])
def test_multiple_outputs(self):
input_constant_name = "input_constant"
split_constant_name = "split_constant"
split_name = "split"
concat_constant_name = "concat_constant"
concat_name = "concat"
float_graph_def = tf.GraphDef()
input_constant = quantize_graph.create_constant_node(input_constant_name,
value=[1, 2, 3, 4, 5,
6, 7, 8, 9, 10,
11, 12],
dtype=tf.float32,
shape=[2, 6])
float_graph_def.node.extend([input_constant])
split_constant = quantize_graph.create_constant_node(split_constant_name,
value=1,
dtype=tf.int32,
shape=[])
float_graph_def.node.extend([split_constant])
split_node = quantize_graph.create_node("Split", split_name,
[split_constant_name,
input_constant_name])
quantize_graph.set_attr_int(split_node, "num_split", 2)
quantize_graph.set_attr_dtype(split_node, "T", tf.float32)
float_graph_def.node.extend([split_node])
concat_constant = quantize_graph.create_constant_node(concat_constant_name,
value=1,
dtype=tf.int32,
shape=[])
float_graph_def.node.extend([concat_constant])
concat_node = quantize_graph.create_node("Concat", concat_name,
[concat_constant_name,
split_name + ":0",
split_name + ":1"])
quantize_graph.set_attr_int(concat_node, "N", 2)
quantize_graph.set_attr_dtype(concat_node, "T", tf.float32)
float_graph_def.node.extend([concat_node])
test_graph(float_graph_def, {}, [concat_name])
def test_node_name_from_input(self):
self.assertEqual("SomeName",
quantize_graph.node_name_from_input("^SomeName:2"))
def test_unique_node_name_from_input(self):
self.assertEqual("__hat__SomeName__port__2",
quantize_graph.unique_node_name_from_input("^SomeName:2"))
def test_identity(self):
input_constant_name = "input_constant"
identity_name = "identity"
float_graph_def = tf.GraphDef()
input_constant = quantize_graph.create_constant_node(input_constant_name,
value=[1, 2, 3, 4, 5,
6, 7, 8, 9, 10,
11, 12],
dtype=tf.float32,
shape=[2, 6])
float_graph_def.node.extend([input_constant])
identity_node = quantize_graph.create_node("Identity", identity_name,
[input_constant_name])
quantize_graph.set_attr_dtype(identity_node, "T", tf.float32)
float_graph_def.node.extend([identity_node])
test_graph(float_graph_def, {}, [identity_name])
def test_keep_control_edges(self):
no_op_name = "no_op"
a_constant_name = "a_constant"
b_constant_name = "b_constant"
a_check_name = "a_check"
b_check_name = "b_check"
a_identity_name = "a_identity"
b_identity_name = "b_identity"
add_name = "add"
graph_def = tf.GraphDef()
no_op = quantize_graph.create_node("NoOp", no_op_name, [])
graph_def.node.extend([no_op])
a_constant = quantize_graph.create_constant_node(a_constant_name,
value=1,
dtype=tf.float32,
shape=[])
graph_def.node.extend([a_constant])
a_check_node = quantize_graph.create_node("CheckNumerics", a_check_name,
[a_constant_name])
graph_def.node.extend([a_check_node])
a_identity_node = quantize_graph.create_node("Identity", a_identity_name,
[a_constant_name,
"^" + a_check_name,
"^" + no_op_name])
graph_def.node.extend([a_identity_node])
b_constant = quantize_graph.create_constant_node(b_constant_name,
value=1,
dtype=tf.float32,
shape=[])
graph_def.node.extend([b_constant])
b_check_node = quantize_graph.create_node("CheckNumerics", b_check_name,
[b_constant_name])
graph_def.node.extend([b_check_node])
b_identity_node = quantize_graph.create_node("Identity", b_identity_name,
[b_constant_name,
"^" + b_check_name])
graph_def.node.extend([b_identity_node])
add_node = quantize_graph.create_node("Add", add_name,
[a_identity_name,
b_identity_name])
quantize_graph.set_attr_dtype(add_node, "T", tf.float32)
graph_def.node.extend([add_node])
expected_output = tf.GraphDef()
no_op = quantize_graph.create_node("NoOp", no_op_name, [])
expected_output.node.extend([no_op])
a_constant = quantize_graph.create_constant_node(a_constant_name,
value=1,
dtype=tf.float32,
shape=[])
expected_output.node.extend([a_constant])
a_identity_node = quantize_graph.create_node("Identity", a_identity_name,
[a_constant_name,
"^" + no_op_name])
expected_output.node.extend([a_identity_node])
b_constant = quantize_graph.create_constant_node(b_constant_name,
value=1,
dtype=tf.float32,
shape=[])
expected_output.node.extend([b_constant])
add_node = quantize_graph.create_node("Add", add_name,
[a_identity_name,
b_constant_name])
quantize_graph.set_attr_dtype(add_node, "T", tf.float32)
expected_output.node.extend([add_node])
output = graph_util.remove_training_nodes(graph_def)
stripped_output = graph_util.extract_sub_graph(output, [add_name])
self.assertProtoEquals(expected_output, stripped_output)
def test_batch_norm(self):
input_constant_name = "input_constant"
mean_constant_name = "mean_constant"
variance_constant_name = "variance_constant"
beta_constant_name = "beta_constant"
gamma_constant_name = "gamma_constant"
batch_norm_name = "batch_norm"
float_graph_def = tf.GraphDef()
input_constant = quantize_graph.create_constant_node(input_constant_name,
value=[1, 4, 2, 5, 3,
6, -1, -4, -2,
-5, -3, -6],
dtype=tf.float32,
shape=[1, 1, 6, 2])
float_graph_def.node.extend([input_constant])
mean_constant = quantize_graph.create_constant_node(mean_constant_name,
value=[10, 20],
dtype=tf.float32,
shape=[2])
float_graph_def.node.extend([mean_constant])
variance_constant = quantize_graph.create_constant_node(
variance_constant_name, value=[0.25, 0.5], dtype=tf.float32, shape=[2])
float_graph_def.node.extend([variance_constant])
beta_constant = quantize_graph.create_constant_node(beta_constant_name,
value=[0.1, 0.6],
dtype=tf.float32,
shape=[2])
float_graph_def.node.extend([beta_constant])
gamma_constant = quantize_graph.create_constant_node(gamma_constant_name,
value=[0, 0],
dtype=tf.float32,
shape=[2])
float_graph_def.node.extend([gamma_constant])
batch_norm_node = quantize_graph.create_node(
"BatchNormWithGlobalNormalization", batch_norm_name,
[input_constant_name, mean_constant_name, variance_constant_name,
beta_constant_name, gamma_constant_name])
quantize_graph.set_attr_dtype(batch_norm_node, "T", tf.float32)
quantize_graph.set_attr_bool(batch_norm_node, "scale_after_normalization",
False)
quantize_graph.set_attr_float(batch_norm_node, "variance_epsilon", 0.001)
float_graph_def.node.extend([batch_norm_node])
test_graph(float_graph_def, {}, [batch_norm_name])
def test_max_pool(self):
input_constant_name = "input_constant"
max_pool_name = "max_pool"
float_graph_def = tf.GraphDef()
input_constant = quantize_graph.create_constant_node(input_constant_name,
value=[1, 2, 3, 4, 5,
6, 7, 8, 9, 10,
11, 12],
dtype=tf.float32,
shape=[1, 2, 6, 1])
float_graph_def.node.extend([input_constant])
max_pool_node = quantize_graph.create_node("MaxPool", max_pool_name,
[input_constant_name])
quantize_graph.set_attr_int_list(max_pool_node, "ksize", [1, 2, 2, 1])
quantize_graph.set_attr_int_list(max_pool_node, "strides", [1, 1, 1, 1])
quantize_graph.set_attr_string(max_pool_node, "padding", b"SAME")
float_graph_def.node.extend([max_pool_node])
test_graph(float_graph_def, {}, [max_pool_name])
def test_avg_pool(self):
input_constant_name = "input_constant"
avg_pool_name = "avg_pool"
float_graph_def = tf.GraphDef()
input_constant = quantize_graph.create_constant_node(input_constant_name,
value=[1, 2, 3, 4, 5,
6, 7, 8, 9, 10,
11, 12],
dtype=tf.float32,
shape=[1, 2, 6, 1])
float_graph_def.node.extend([input_constant])
avg_pool_node = quantize_graph.create_node("AvgPool", avg_pool_name,
[input_constant_name])
quantize_graph.set_attr_dtype(avg_pool_node, "T", tf.float32)
quantize_graph.set_attr_int_list(avg_pool_node, "ksize", [1, 2, 2, 1])
quantize_graph.set_attr_int_list(avg_pool_node, "strides", [1, 1, 1, 1])
quantize_graph.set_attr_string(avg_pool_node, "padding", b"SAME")
float_graph_def.node.extend([avg_pool_node])
test_graph(float_graph_def, {}, [avg_pool_name])
def test_relu(self):
input_constant_name = "input_constant"
relu_name = "relu"
float_graph_def = tf.GraphDef()
input_constant = quantize_graph.create_constant_node(input_constant_name,
value=[1, 2, 3, 4, 5,
6, 7, 8, 9, 10,
11, 12],
dtype=tf.float32,
shape=[1, 2, 6, 1])
float_graph_def.node.extend([input_constant])
relu_node = quantize_graph.create_node("Relu", relu_name,
[input_constant_name])
quantize_graph.set_attr_dtype(relu_node, "T", tf.float32)
float_graph_def.node.extend([relu_node])
test_graph(float_graph_def, {}, [relu_name])
def test_relu6(self):
input_constant_name = "input_constant"
relu6_name = "relu6"
float_graph_def = tf.GraphDef()
input_constant = quantize_graph.create_constant_node(input_constant_name,
value=[1, 2, 3, 4, 5,
6, 7, 8, 9, 10,
11, 12],
dtype=tf.float32,
shape=[1, 2, 6, 1])
float_graph_def.node.extend([input_constant])
relu6_node = quantize_graph.create_node("Relu6", relu6_name,
[input_constant_name])
quantize_graph.set_attr_dtype(relu6_node, "T", tf.float32)
float_graph_def.node.extend([relu6_node])
test_graph(float_graph_def, {}, [relu6_name])
def test_bias_add(self):
input_constant_name = "input_constant"
offset_constant_name = "offset_constant"
bias_add_name = "bias_add"
float_graph_def = tf.GraphDef()
input_constant = quantize_graph.create_constant_node(input_constant_name,
value=[1, 2, 3, 4, 5,
6, 7, 8, 9, 10,
11, 12],
dtype=tf.float32,
shape=[1, 1, 2, 6])
float_graph_def.node.extend([input_constant])
offset_constant = quantize_graph.create_constant_node(offset_constant_name,
value=[1, 2, 3, 4, 5,
6],
dtype=tf.float32,
shape=[6])
float_graph_def.node.extend([offset_constant])
bias_add_node = quantize_graph.create_node("BiasAdd", bias_add_name,
[input_constant_name,
offset_constant_name])
quantize_graph.set_attr_dtype(bias_add_node, "T", tf.float32)
float_graph_def.node.extend([bias_add_node])
test_graph(float_graph_def, {}, [bias_add_name])
def test_remove_redundant_quantization(self):
a_constant_name = "a_constant"
a_constant_min_name = "a_constant_min"
a_constant_max_name = "a_constant_max"
a_dequantize_name = "a_dequantize"
a_quantize_name = "a_quantize"
b_constant_name = "b_constant"
b_constant_min_name = "b_constant_min"
b_constant_max_name = "b_constant_max"
b_dequantize_name = "b_dequantize"
b_quantize_name = "b_quantize"
mat_mul_name = "mat_mul"
graph_def = tf.GraphDef()
a_constant = quantize_graph.create_constant_node(a_constant_name,
value=(0,),
dtype=tf.quint8,
shape=[])
graph_def.node.extend([a_constant])
a_constant_min = quantize_graph.create_constant_node(a_constant_min_name,
value=2,
dtype=tf.float32,
shape=[])
graph_def.node.extend([a_constant_min])
a_constant_max = quantize_graph.create_constant_node(a_constant_max_name,
value=2,
dtype=tf.float32,
shape=[])
graph_def.node.extend([a_constant_max])
a_dequantize_node = quantize_graph.create_node("Dequantize",
a_dequantize_name,
[a_constant_name,
a_constant_min_name,
a_constant_max_name])
quantize_graph.set_attr_dtype(a_dequantize_node, "T", tf.uint8)
graph_def.node.extend([a_dequantize_node])
a_quantize_node = quantize_graph.create_node("QuantizeV2",
a_quantize_name,
[a_dequantize_name,
a_dequantize_name + ":1",
a_dequantize_name + ":2"])
quantize_graph.set_attr_dtype(a_quantize_node, "T", tf.uint8)
graph_def.node.extend([a_quantize_node])
b_constant = quantize_graph.create_constant_node(b_constant_name,
value=(0,),
dtype=tf.quint8,
shape=[])
graph_def.node.extend([b_constant])
b_constant_min = quantize_graph.create_constant_node(b_constant_min_name,
value=3,
dtype=tf.float32,
shape=[])
graph_def.node.extend([b_constant_min])
b_constant_max = quantize_graph.create_constant_node(b_constant_max_name,
value=3,
dtype=tf.float32,
shape=[])
graph_def.node.extend([b_constant_max])
b_dequantize_node = quantize_graph.create_node("Dequantize",
b_dequantize_name,
[b_constant_name,
b_constant_min_name,
b_constant_max_name])
quantize_graph.set_attr_dtype(b_dequantize_node, "T", tf.uint8)
graph_def.node.extend([b_dequantize_node])
b_quantize_node = quantize_graph.create_node("QuantizeV2",
b_quantize_name,
[b_dequantize_name,
b_dequantize_name + ":1",
b_dequantize_name + ":2"])
quantize_graph.set_attr_dtype(b_quantize_node, "T", tf.uint8)
graph_def.node.extend([b_quantize_node])
mat_mul_node = quantize_graph.create_node("QuantizedMatMul", mat_mul_name,
[a_quantize_name,
b_quantize_name,
a_quantize_name + ":1",
a_quantize_name + ":2",
b_quantize_name + ":1",
b_quantize_name + ":2"])
quantize_graph.set_attr_dtype(mat_mul_node, "T1", tf.uint8)
quantize_graph.set_attr_dtype(mat_mul_node, "T2", tf.int32)
graph_def.node.extend([mat_mul_node])
expected_output = tf.GraphDef()
a_constant = quantize_graph.create_constant_node(a_constant_name,
value=(0,),
dtype=tf.quint8,
shape=[])
expected_output.node.extend([a_constant])
a_constant_min = quantize_graph.create_constant_node(a_constant_min_name,
value=2,
dtype=tf.float32,
shape=[])
expected_output.node.extend([a_constant_min])
a_constant_max = quantize_graph.create_constant_node(a_constant_max_name,
value=2,
dtype=tf.float32,
shape=[])
expected_output.node.extend([a_constant_max])
b_constant = quantize_graph.create_constant_node(b_constant_name,
value=(0,),
dtype=tf.quint8,
shape=[])
expected_output.node.extend([b_constant])
b_constant_min = quantize_graph.create_constant_node(b_constant_min_name,
value=3,
dtype=tf.float32,
shape=[])
expected_output.node.extend([b_constant_min])
b_constant_max = quantize_graph.create_constant_node(b_constant_max_name,
value=3,
dtype=tf.float32,
shape=[])
expected_output.node.extend([b_constant_max])
mat_mul_node = quantize_graph.create_node("QuantizedMatMul", mat_mul_name,
[a_constant_name,
b_constant_name,
a_constant_min_name,
a_constant_max_name,
b_constant_min_name,
b_constant_max_name])
quantize_graph.set_attr_dtype(mat_mul_node, "T1", tf.uint8)
quantize_graph.set_attr_dtype(mat_mul_node, "T2", tf.int32)
expected_output.node.extend([mat_mul_node])
rewriter = quantize_graph.GraphRewriter(graph_def, [mat_mul_name])
output = rewriter.remove_redundant_quantization(graph_def)
stripped_output = graph_util.extract_sub_graph(output, [mat_mul_name])
self.assertProtoEquals(expected_output, stripped_output)
if __name__ == "__main__":
tf.test.main()
|
{
"content_hash": "8048ac894adbd230ea8ad56c0ade1a7a",
"timestamp": "",
"source": "github",
"line_count": 684,
"max_line_length": 80,
"avg_line_length": 51.074561403508774,
"alnum_prop": 0.48226706741090597,
"repo_name": "neilhan/tensorflow",
"id": "4826ea26896c16a925b1a8407d1d7d6ffd057da0",
"size": "35624",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/quantization/tools/quantize_graph_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "88078"
},
{
"name": "C++",
"bytes": "12868691"
},
{
"name": "CMake",
"bytes": "72170"
},
{
"name": "CSS",
"bytes": "774"
},
{
"name": "Go",
"bytes": "42531"
},
{
"name": "HTML",
"bytes": "1171130"
},
{
"name": "Java",
"bytes": "51034"
},
{
"name": "JavaScript",
"bytes": "12972"
},
{
"name": "Jupyter Notebook",
"bytes": "1833434"
},
{
"name": "Makefile",
"bytes": "23390"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "64592"
},
{
"name": "Protocol Buffer",
"bytes": "136850"
},
{
"name": "Python",
"bytes": "11825434"
},
{
"name": "Shell",
"bytes": "265831"
},
{
"name": "TypeScript",
"bytes": "673426"
}
],
"symlink_target": ""
}
|
"""Basic functions for probability features: Transitions matrix, motif probability..."""
from . import utils
def permutations(items, n):
if n == 0:
yield ''
else:
for i in range(len(items)):
for base in permutations(items, n - 1):
yield str(items[i]) + str(base)
def get_transitions(seq, alphabet, markov_order):
"""Computes transitions matrix"""
transitions = []
motifs = list(permutations(alphabet, markov_order+1))
counts = dict(zip(motifs, [0]*len(motifs)))
for i in range(len(seq)-markov_order):
s = seq[i:i+markov_order+1]
if all([i in alphabet for i in s]):
counts[s] += 1
for motif in motifs:
transitions.append(counts[motif])
transitions = list(utils.grouper(len(alphabet), transitions))
sums = map(sum, transitions)
for i in range(len(transitions)):
if sums[i] != 0:
transitions[i] = [transitions[i][j] / float(sums[i]) for j in range(len(transitions[i]))]
else:
transitions[i] = [0.] * len(transitions[i])
return transitions
def prob_motif(motif, alphabet, markov_order, transitions):
"""Computes the probability of a motif based on a transitions matrix"""
transitions = utils.flatten(transitions)
motifs = list(permutations(alphabet, markov_order+1))
motifs_index = dict([(motifs[i], i) for i in range(len(motifs))])
prob = 1.
for i in range(len(motif) - markov_order):
prob *= transitions[motifs_index[motif[i:i+markov_order+1]]]
return prob
|
{
"content_hash": "1663681ef3b8f6f9663e6e5ef22253ae",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 101,
"avg_line_length": 38.1219512195122,
"alnum_prop": 0.6250799744081894,
"repo_name": "RNAEDITINGPLUS/main",
"id": "9e137eacb5d9147aa3309546e61a7d146da9a374",
"size": "1751",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "node/mirmap/prob.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "26443"
},
{
"name": "C++",
"bytes": "65129"
},
{
"name": "CSS",
"bytes": "21418"
},
{
"name": "HTML",
"bytes": "212278"
},
{
"name": "JavaScript",
"bytes": "949717"
},
{
"name": "Makefile",
"bytes": "764"
},
{
"name": "PHP",
"bytes": "1411565"
},
{
"name": "Perl",
"bytes": "36123"
},
{
"name": "Python",
"bytes": "493394"
},
{
"name": "Shell",
"bytes": "4623"
},
{
"name": "Smarty",
"bytes": "8376"
}
],
"symlink_target": ""
}
|
"A Google Calendar Parser"
from datetime import datetime, date, timedelta
from time import strptime, mktime
from xml.sax.saxutils import unescape
from urllib2 import urlopen
# From Requirements.txt
from pytz import timezone
from icalendar.cal import Calendar, Event
from BeautifulSoup import BeautifulStoneSoup, Tag
TIME_FORMATS = (
"%a %b %d, %Y %I:%M%p",
"%a %b %d, %Y %I%p",
"%a %b %d, %Y",
"%Y-%m-%dT%H:%M:%S"
)
def _parse_time(time_str, reference_date=None):
"""\
Parses a calendar time string, and outputs a datetime object of the specified time.
Only compatible with the time formats listed in the TIME_FORMATS tuple.
'reference_date' is another time-string, used when the original time_str doesn't contain any date information.
"""
time_struct = None
if len(time_str.split()) == 1:
if "." in time_str:
time_str = time_str.rsplit('.', 1)[0]
else:
assert reference_date, "Hour-only time strings need a reference date string."
time_str = " ".join(reference_date.split()[:4]) + " " + time_str
for time_format in TIME_FORMATS:
try:
time_struct = strptime(time_str, time_format)
except ValueError:
pass
if time_struct == None:
raise ValueError("Unsopported time string format: %s" % (time_str))
return datetime.fromtimestamp(mktime(time_struct))
def _fix_timezone(datetime_obj, time_zone):
"""\
Adjusts time relative to the calendar's timezone,
then removes the datetime object's timezone property.
"""
if type(datetime_obj) is datetime and datetime_obj.tzinfo is not None:
return datetime_obj.astimezone(time_zone).replace(tzinfo=None)
elif type(datetime_obj) is date:
return datetime(datetime_obj.year, datetime_obj.month, datetime_obj.day)
return datetime_obj
def _multi_replace(string, replace_dict):
"Replaces multiple items in a string, where replace_dict consists of {value_to_be_removed: replced_by, etc...}"
for key, value in replace_dict.iteritems():
string = string.replace(str(key), str(value))
return string
def to_unicode_or_bust(obj, encoding='utf-8'):
if isinstance(obj, basestring):
if not isinstance(obj, unicode):
obj = unicode(obj, encoding)
return obj
def _normalize(data_string, convert_whitespace=False):
"Removes various markup artifacts and returns a normal python string."
new_string = unescape(to_unicode_or_bust(data_string))
new_string = _multi_replace(new_string, {
' ': ' ', '"': '"', '¦': '|', "'": "'", "\\": ""
})
new_string = new_string.strip()
if convert_whitespace:
return " ".join(new_string.split())
return new_string
class CalendarEvent(dict):
"""\
A modified dictionary that allows accessing and modifying the main properties of a calendar event
as both attributes, and dictionary keys; i.e. 'event["name"]' is the same as using 'event.name'
Only the following event-specific properties may be accessed/modified as attributes:
"name", "description", "location", "start_time", "end_time", "all_day",
"repeats", "repeat_freq", "repeat_day", "repeat_month", "repeat_until"
CalendarEvents may also be compared using the >, >=, <, <=, comparison operators, which compare
the starting times of the events.
"""
__slots__ = ( "name", "description", "location", "start_time", "end_time", "all_day",
"repeats", "repeat_freq", "repeat_day", "repeat_month", "repeat_until" )
def __getattr__(self, key):
if key in self.__slots__:
return self[key]
else:
return dict.__getattribute__(self, key)
def __setattr__(self, key, value):
if key in self.__slots__:
self[key] = value
else:
raise AttributeError("dict attributes are not modifiable.")
def __lt__(self, other):
assert type(other) is CalendarEvent, "Both objects must be CalendarEvents to compare."
return self["start_time"] < other["start_time"]
def __le__(self, other):
assert type(other) is CalendarEvent, "Both objects must be CalendarEvents to compare."
return self["start_time"] <= other["start_time"]
def __gt__(self, other):
assert type(other) is CalendarEvent, "Both objects must be CalendarEvents to compare."
return self["start_time"] > other["start_time"]
def __ge__(self, other):
assert type(other) is CalendarEvent, "Both objects must be CalendarEvents to compare."
return self["start_time"] >= other["start_time"]
class CalendarParser(object):
"""\
A practical calendar parser for Google Calendar's two output formats: XML, and iCal (.ics).
Stores events as a list of dictionaries with self-describing attributes.
Accepts url resources as well as local xml/ics files.
Certain fields/properties are not available when parsing ics resources.
"""
# TODO: Accept calendarIDs and support google's REST api
def __init__(self, ics_url=None, xml_url=None, ics_file=None, xml_file=None):
self.ics_file = ics_file
self.ics_url = ics_url
self.xml_file = xml_file
self.xml_url = xml_url
self.time_zone = None
self.calendar = None
self.title = ""
self.subtitle = ""
self.author = ""
self.email = ""
self.last_updated = None
self.date_published = None
self.events = []
def __len__(self):
return len(self.events)
def __iter__(self):
return self.events.__iter__()
def __reversed__(self):
return reversed(self.events)
def __contains__(self, item):
if type(item) is not str:
return item in self.events
for event in self.events:
if event["name"].lower() == item.lower():
return True
return False
def __getitem__(self, item):
if type(item) is str:
event_list = []
for event in self.events:
if event["name"].lower() == item.lower():
event_list.append(event)
if len(event_list) == 0:
raise LookupError("'%s' is not an event in this calendar." % (item))
if len(event_list) == 1:
return event_list[0]
else:
return event_list
else:
return self.events[item]
def keys(self):
"Returns the names of all the parsed events, which may be used as lookup-keys on the parser object."
return [event["name"] for event in self.events]
def sort_by_latest(self, sort_in_place=False):
"Returns a list of the parsed events, where the newest events are listed first."
sorted_events = sorted(self.events, reverse=True)
if sort_in_place:
self.events = sorted_events
return sorted_events
def sort_by_oldest(self, sort_in_place=False):
"Returns a list of the parsed events, where the oldest events are listed first."
sorted_events = sorted(self.events)
if sort_in_place:
self.events = sorted_events
return sorted_events
def fetch_calendar(self, force_xml=False, force_ics=False):
"Fetches the calendar data from an XML/.ics resource in preperation for parsing."
cal_data = None
if self.xml_url:
cal_data = urlopen(self.xml_url)
elif self.ics_url:
cal_data = urlopen(self.ics_url)
elif self.xml_file:
cal_data = open(self.xml_file, "rb")
elif self.ics_file:
cal_data = open(self.ics_file, "rb")
else:
raise UnboundLocalError("No calendar url or file path has been set.")
cal_str = cal_data.read()
cal_data.close()
if (self.xml_url or self.xml_file) and not force_ics:
self.calendar = BeautifulStoneSoup(_normalize(cal_str, True))
elif (self.ics_url or self.ics_file) and not force_xml:
self.calendar = Calendar.from_ical(cal_str)
return self.calendar
def parse_xml(self, overwrite_events=True):
"Returns a generator of Event dictionaries from an XML atom feed."
assert self.xml_url or self.xml_url, "No xml resource has been set."
self.calendar = self.fetch_calendar(force_xml=True).contents[1]
metadata = self.calendar.contents[1:3]
self.title = metadata[1].contents[0].contents[0]
self.subtitle = metadata[1].contents[1].next
self.author = metadata[1].contents[6].next.next.next
self.email = metadata[1].contents[6].next.contents[1].next
self.time_zone = timezone(metadata[1].contents[6].contents[5].attrs[0][1])
self.last_updated = _parse_time(metadata[0].next)
self.date_published = _parse_time(
metadata[1].contents[6].contents[5].next.next.contents[1].next)
raw_events = self.calendar.contents[3:]
if overwrite_events:
self.events = []
for event in raw_events:
event_dict = CalendarEvent()
event_dict["name"] = _normalize(event.next.next)
event_dict["repeats"] = False
for content in event.contents[2]:
if isinstance(content, Tag):
content = content.contents[0]
if "Recurring Event" in content:
event_dict["repeats"] = True
elif event_dict["repeats"]:
if "First start:" in content:
rep_info = content.split()[2:-1]
rep_date = rep_info[0].split('-')
# Not enough info to determine how often the event repeats...
#event_dict['repeat_month'] = rep_date[1] # "YEARLY"
#event_dict['repeat_day'] = rep_date[2] # "MONTHLY"
rep_date = map(int, rep_date)
if len(rep_info) == 2:
rep_time = map(int, rep_info[1].split(':'))
event_dict["start_time"] = datetime( *(rep_date + rep_time) )
else:
event_dict["start_time"] = datetime(*rep_date)
elif "Duration:" in content:
seconds = int(content.split()[-1])
event_dict["end_time"] = event_dict["start_time"] + timedelta(seconds=seconds)
elif "When: " in content:
when = event.contents[1].next.replace("When: ", "", 1)
if len(when.split()) > 4:
# Remove the timezone
when = when.rsplit(" ", 1)[0]
when = when.split(" to ")
if len(when) == 2:
start, end = when
event_dict["end_time"] = _parse_time(end, start)
else:
start = when[0]
event_dict["start_time"] = _parse_time(start)
if not "end_time" in event_dict \
and event_dict["start_time"].hour == 0 \
and event_dict["start_time"].minute == 0:
event_dict["all_day"] = True
event_dict["end_time"] = event_dict["start_time"] + timedelta(days=1)
else:
event_dict["all_day"] = False
elif "Where: " in content:
event_dict["location"] = _normalize(content).replace("Where: ", "")
elif "Event Description: " in content:
event_dict["description"] = _normalize(content).replace("Event Description: ", "")
if overwrite_events:
self.events.append(event_dict)
yield event_dict
def parse_ics(self, overwrite_events=True):
"Returns a generator of Event dictionaries from an iCal (.ics) file."
assert self.ics_url or self.ics_url, "No ics resource has been set."
# Returns an icalendar.Calendar object.
self.fetch_calendar(force_ics=True)
self.time_zone = timezone(str(self.calendar["x-wr-timezone"]))
self.title = str(self.calendar["x-wr-calname"])
if overwrite_events:
self.events = []
for event in self.calendar.walk():
if isinstance(event, Event):
event_dict = CalendarEvent()
if "SUMMARY" in event:
event_dict["name"] = _normalize(event["summary"])
if "DESCRIPTION" in event:
event_dict["description"] = _normalize(event["description"])
if "LOCATION" in event and event["location"]:
event_dict["location"] = _normalize(event["location"])
if "DTSTART" in event:
event_dict["start_time"] = _fix_timezone(event["dtstart"].dt, self.time_zone)
if "DTEND" in event:
event_dict["end_time"] = _fix_timezone(event["dtend"].dt, self.time_zone)
if event_dict["start_time"].hour == 0 \
and event_dict["start_time"].minute == 0 \
and (event_dict["end_time"] - event_dict["start_time"]) == timedelta(days=1):
event_dict["all_day"] = True
else:
event_dict["all_day"] = False
event_dict["repeats"] = False
if "RRULE" in event:
rep_dict = event["RRULE"]
event_dict["repeats"] = True
event_dict["repeat_freq"] = rep_dict["FREQ"][0]
if event_dict["repeat_freq"] == "YEARLY":
event_dict["repeat_day"] = event_dict["start_time"].day
event_dict["repeat_month"] = event_dict["start_time"].month
if "BYDAY" in rep_dict:
event_dict["repeat_day"] = rep_dict["BYDAY"][0]
elif "BYMONTHDAY" in rep_dict:
event_dict["repeat_day"] = rep_dict["BYMONTHDAY"][0]
if "BYMONTH" in rep_dict:
event_dict["repeat_month"] = rep_dict["BYMONTH"][0]
if "UNTIL" in rep_dict:
event_dict["repeat_until"] = _fix_timezone(rep_dict["UNTIL"][0], self.time_zone)
if overwrite_events:
self.events.append(event_dict)
yield event_dict
def parse_calendar(self, force_list=False, use_xml=False, use_ics=False, overwrite_events=True):
"Parses the calendar at the specified resource path. Returns a generator of CalendarEvents."
generator = None
if (self.ics_url or self.ics_file) and (use_ics or not use_xml):
generator = self.parse_ics(overwrite_events)
elif (self.xml_url or self.xml_file) and (use_xml or not use_ics):
generator = self.parse_xml(overwrite_events)
if force_list:
return [event for event in generator]
else:
return generator
|
{
"content_hash": "a450baa547c3bd5619cd7ad56f363a35",
"timestamp": "",
"source": "github",
"line_count": 399,
"max_line_length": 115,
"avg_line_length": 38.82706766917293,
"alnum_prop": 0.5562225664859283,
"repo_name": "dominicmeroux/Reading-In-and-Analyzing-Calendar-Data-by-Interfacing-Between-MySQL-and-Python",
"id": "f44effae042aed1d65ed4b02456a179d8020fc3f",
"size": "15492",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "calendar_parser.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "69357"
},
{
"name": "Python",
"bytes": "58947"
}
],
"symlink_target": ""
}
|
import warnings
import threading
from django.core.mail import get_connection
from django.core.mail.backends.base import BaseEmailBackend
from email_tracker.conf import settings
from email_tracker.compat import make_msgid
from email_tracker.models import TrackedEmail
class EmailBackendWrapper(BaseEmailBackend):
"""
Email backend wrapper that will wraps backend configured in
settings.EMAIL_TRACKER_BACKEND
"""
def __init__(self, **kwargs):
super(EmailBackendWrapper, self).__init__(**kwargs)
self.connection = get_connection(settings.EMAIL_TRACKER_BACKEND, **kwargs)
self._lock = threading.RLock()
def open(self):
return self.connection.open()
def close(self):
return self.connection.close()
def send_messages(self, email_messages):
if not email_messages:
return
with self._lock:
new_conn_created = self.open()
num_sent = 0
for message in email_messages:
sent = self._send(message)
if sent:
num_sent += 1
if new_conn_created:
self.close()
return num_sent
def _send(self, message):
return self.connection.send_messages([message])
class EmailTrackerBackend(EmailBackendWrapper):
"""
Sends one or more EmailMessage objects and returns the number of email
messages sent.
Creates EmailMessage and EmailCategory (if needed) for every message
with defined extra header
"""
def _send(self, message):
# Ensure that message has ID before send in order to log it
self._ensure_message_id(message)
sent = super(EmailTrackerBackend, self)._send(message)
self.track_message(message, bool(sent))
return sent
def _ensure_message_id(self, message):
if 'message-id' not in [key.lower() for key in message.extra_headers]:
message.extra_headers['Message-ID'] = make_msgid()
def track_message(self, message, is_sent):
TrackedEmail.objects.create_from_message(message, is_sent=is_sent)
def create_tracked_email(email_message, is_sent):
warnings.warn('create_tracked_email is deprecated. Use TrackedEmail.objects.create_from_message instead',
DeprecationWarning,
stacklevel=2)
return TrackedEmail.objects.create_from_message(email_message,
is_sent=is_sent)
|
{
"content_hash": "0dec921db06de092837e88b98be332dd",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 109,
"avg_line_length": 32.723684210526315,
"alnum_prop": 0.6437474869320466,
"repo_name": "IndustriaTech/django-email-tracker",
"id": "797cae53f3c7dd7c3161340d00a559fd2a157393",
"size": "2487",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "email_tracker/backends.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "53919"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
import model_utils.fields
import colorfield.fields
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Activity',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=200, verbose_name='Nombre')),
('slug', models.SlugField()),
('text', models.TextField(help_text='Try and enter few some more lines', verbose_name='Activity description', blank=True)),
('archivo', models.FileField(upload_to='activity', null=True, verbose_name='Archivo', blank=True)),
('start', models.DateTimeField(null=True, verbose_name='Start', blank=True)),
('end', models.DateTimeField(null=True, verbose_name='End', blank=True)),
],
options={
'ordering': ['start', 'name'],
'verbose_name': 'Activity',
'verbose_name_plural': 'Activities',
},
),
migrations.CreateModel(
name='AttendeePayment',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('date', models.DateField(verbose_name='Date')),
('amount', models.DecimalField(verbose_name='Amount', max_digits=10, decimal_places=2)),
('reference', models.CharField(max_length=20, verbose_name='Reference')),
('note', models.CharField(max_length=200, null=True, verbose_name='Nota', blank=True)),
],
options={
'ordering': ['-date'],
'verbose_name': 'Attendee Payment',
'verbose_name_plural': 'Attendee Payments',
},
),
migrations.CreateModel(
name='AttendeeReceipt',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('date', models.DateField(verbose_name='Date')),
('reference', models.CharField(max_length=20, verbose_name='Reference')),
('note', models.CharField(max_length=200, null=True, verbose_name='Nota', blank=True)),
],
options={
'ordering': ['-date'],
'verbose_name': 'Attendee Receipt',
'verbose_name_plural': 'Attendee Receipts',
},
),
migrations.CreateModel(
name='AttendeeType',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=50, verbose_name='Nombre')),
],
options={
'ordering': ['name'],
'verbose_name': 'Attendee Type',
'verbose_name_plural': 'Attendee Types',
},
),
migrations.CreateModel(
name='AttendeeTypeEvent',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('price', models.DecimalField(verbose_name='Price', max_digits=8, decimal_places=2)),
('eb_price', models.DecimalField(verbose_name='Early Bird Price', max_digits=8, decimal_places=2)),
('extra_price', models.DecimalField(verbose_name='Extra Activity Price', max_digits=8, decimal_places=2)),
],
options={
'verbose_name': 'Attendee Type in Event',
'verbose_name_plural': 'Attendee Types in Events',
},
),
migrations.CreateModel(
name='Content',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=200, verbose_name='Nombre')),
('title', models.CharField(max_length=200, verbose_name='Title')),
('page', model_utils.fields.StatusField(default='about', max_length=100, no_check_for_status=True, choices=[('about', 'About'), ('contact', 'Contact'), ('info', 'Main description'), ('footer', 'Footer'), ('services', 'Services'), ('404', 'Not Found'), ('confirmation', 'Confirmation'), ('other', 'Other')])),
('text', models.TextField(null=True, blank=True)),
],
options={
'ordering': ['name'],
'verbose_name': 'Content',
'verbose_name_plural': 'Contents',
},
),
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=200, verbose_name='Event')),
('title', models.CharField(max_length=200, null=True, verbose_name='Title', blank=True)),
('organizer', models.CharField(max_length=200, null=True, verbose_name='Organizer', blank=True)),
('slug', models.SlugField()),
('status', models.CharField(default='draft', max_length=20, verbose_name='Status', choices=[('inactive', 'inactive'), ('draft', 'draft'), ('published', 'published'), ('frontpage', 'frontpage')])),
('activities_label', models.CharField(max_length=50, null=True, verbose_name='Activities label', blank=True)),
('text', models.TextField(help_text='Try and enter few some more lines', verbose_name='Event description', blank=True)),
('pdfnote', models.TextField(null=True, verbose_name='PDF and Approval Note', blank=True)),
('reject_note', models.TextField(null=True, verbose_name='Reject Note', blank=True)),
('eb_start', models.DateField(null=True, verbose_name='Early Bird Start', blank=True)),
('eb_end', models.DateField(null=True, verbose_name='Early Bird End', blank=True)),
('start', models.DateTimeField(null=True, verbose_name='Start', blank=True)),
('end', models.DateTimeField(null=True, verbose_name='End', blank=True)),
('place', models.CharField(max_length=200, null=True, verbose_name='Place', blank=True)),
('badge_size_x', models.IntegerField(null=True, verbose_name='Badge size X', blank=True)),
('badge_size_y', models.IntegerField(null=True, verbose_name='Badge size Y', blank=True)),
('badge_color', colorfield.fields.ColorField(default='ffffff', max_length=10, verbose_name='Badge color')),
('template', models.CharField(max_length=200, null=True, blank=True)),
],
options={
'ordering': ['-start'],
'verbose_name': 'Event',
'verbose_name_plural': 'Events',
},
),
migrations.CreateModel(
name='EventBadge',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('field', models.CharField(max_length=50, verbose_name='Field', choices=[('event', 'Event'), ('name', 'Complete name'), ('first_name', 'First name'), ('last_name', 'Last name'), ('profession', 'Profession'), ('country', 'Country'), ('type', 'Tipo'), ('email', 'Correo electr\xf3nico'), ('text', 'Texto'), ('logo', 'Logo'), ('photo', 'Photo'), ('organization', 'Organization')])),
('color', colorfield.fields.ColorField(default='', max_length=10, null=True, verbose_name='Color', blank=True)),
('size', models.IntegerField(verbose_name='Size')),
('x', models.IntegerField(verbose_name='X')),
('y', models.IntegerField(verbose_name='Y')),
('format', models.CharField(max_length=50, null=True, verbose_name='Extra', blank=True)),
],
options={
'ordering': ['field'],
'verbose_name': 'Badge',
'verbose_name_plural': 'Badges',
},
),
migrations.CreateModel(
name='Field',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', model_utils.fields.StatusField(default='document', max_length=100, no_check_for_status=True, choices=[('document', 'Document'), ('phone', 'Telephone'), ('organization', 'Organization'), ('position', 'Position'), ('profession', 'Profession'), ('country', 'Country')])),
('label', models.CharField(max_length=100)),
('order', models.IntegerField(null=True)),
],
options={
'ordering': ['event', 'order'],
},
),
migrations.CreateModel(
name='Font',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=50, verbose_name='Nombre')),
('filename', models.CharField(unique=True, max_length=250, verbose_name='Filename')),
],
options={
'ordering': ['name'],
'verbose_name': 'Font',
'verbose_name_plural': 'Fonts',
},
),
migrations.CreateModel(
name='Invited',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('first_name', models.CharField(max_length=200, verbose_name='First name')),
('last_name', models.CharField(max_length=200, verbose_name='Last name')),
('organization', models.CharField(max_length=50, null=True, verbose_name='Organization', blank=True)),
('email', models.CharField(unique=True, max_length=100, verbose_name='E-Mail')),
],
options={
'ordering': ['first_name', 'last_name'],
'verbose_name': 'Invited',
'verbose_name_plural': 'Invited',
},
),
migrations.CreateModel(
name='Logo',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=200, verbose_name='Nombre')),
('weight', models.IntegerField()),
('url', models.URLField(null=True, blank=True)),
],
options={
'ordering': ['weight'],
'verbose_name': 'Logos',
'verbose_name_plural': 'Logos',
},
),
migrations.CreateModel(
name='News',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=200, verbose_name='Title')),
('media', models.CharField(max_length=200, verbose_name='Media')),
('url', models.URLField()),
('date', models.DateField()),
],
options={
'ordering': ['-date', 'title'],
},
),
migrations.CreateModel(
name='Organization',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=200, verbose_name='Organization')),
('email', models.CharField(max_length=100, verbose_name='E-Mail')),
],
),
migrations.CreateModel(
name='PaymentMethod',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100, verbose_name='Nombre')),
],
options={
'ordering': ['name'],
'verbose_name': 'Payment Method',
'verbose_name_plural': 'Payment Methods',
},
),
migrations.CreateModel(
name='Profession',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=200, verbose_name='Nombre')),
('slug', models.SlugField()),
('text', models.TextField(help_text='Try and enter few some more lines', verbose_name='Profession description', blank=True)),
],
options={
'ordering': ['name'],
'verbose_name': 'Profession',
'verbose_name_plural': 'Professions',
},
),
]
|
{
"content_hash": "f1cc73cb50d8fe6050b59a6a7a872185",
"timestamp": "",
"source": "github",
"line_count": 247,
"max_line_length": 395,
"avg_line_length": 53.46153846153846,
"alnum_prop": 0.5351003407800076,
"repo_name": "javierwilson/resilienciacafe",
"id": "88ba7f12639a8e0443b7f0a4451b7c5052fa8ec4",
"size": "13229",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "forocacao/app/migrations/0001_initial.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "117761"
},
{
"name": "HTML",
"bytes": "91091"
},
{
"name": "JavaScript",
"bytes": "2394"
},
{
"name": "Python",
"bytes": "143840"
},
{
"name": "Shell",
"bytes": "3620"
}
],
"symlink_target": ""
}
|
"""Test the sma config flow."""
from unittest.mock import patch
from pysma.exceptions import (
SmaAuthenticationException,
SmaConnectionException,
SmaReadException,
)
from homeassistant.components.sma.const import DOMAIN
from homeassistant.config_entries import SOURCE_USER
from homeassistant.data_entry_flow import FlowResultType
from . import MOCK_DEVICE, MOCK_USER_INPUT, _patch_async_setup_entry
async def test_form(hass):
"""Test we get the form."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == FlowResultType.FORM
assert result["errors"] == {}
with patch("pysma.SMA.new_session", return_value=True), patch(
"pysma.SMA.device_info", return_value=MOCK_DEVICE
), _patch_async_setup_entry() as mock_setup_entry:
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
MOCK_USER_INPUT,
)
await hass.async_block_till_done()
assert result["type"] == FlowResultType.CREATE_ENTRY
assert result["title"] == MOCK_USER_INPUT["host"]
assert result["data"] == MOCK_USER_INPUT
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_cannot_connect(hass):
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
with patch(
"pysma.SMA.new_session", side_effect=SmaConnectionException
), _patch_async_setup_entry() as mock_setup_entry:
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
MOCK_USER_INPUT,
)
assert result["type"] == FlowResultType.FORM
assert result["errors"] == {"base": "cannot_connect"}
assert len(mock_setup_entry.mock_calls) == 0
async def test_form_invalid_auth(hass):
"""Test we handle invalid auth error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
with patch(
"pysma.SMA.new_session", side_effect=SmaAuthenticationException
), _patch_async_setup_entry() as mock_setup_entry:
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
MOCK_USER_INPUT,
)
assert result["type"] == FlowResultType.FORM
assert result["errors"] == {"base": "invalid_auth"}
assert len(mock_setup_entry.mock_calls) == 0
async def test_form_cannot_retrieve_device_info(hass):
"""Test we handle cannot retrieve device info error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
with patch("pysma.SMA.new_session", return_value=True), patch(
"pysma.SMA.read", side_effect=SmaReadException
), _patch_async_setup_entry() as mock_setup_entry:
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
MOCK_USER_INPUT,
)
assert result["type"] == FlowResultType.FORM
assert result["errors"] == {"base": "cannot_retrieve_device_info"}
assert len(mock_setup_entry.mock_calls) == 0
async def test_form_unexpected_exception(hass):
"""Test we handle unexpected exception."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
with patch(
"pysma.SMA.new_session", side_effect=Exception
), _patch_async_setup_entry() as mock_setup_entry:
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
MOCK_USER_INPUT,
)
assert result["type"] == FlowResultType.FORM
assert result["errors"] == {"base": "unknown"}
assert len(mock_setup_entry.mock_calls) == 0
async def test_form_already_configured(hass, mock_config_entry):
"""Test starting a flow by user when already configured."""
mock_config_entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
with patch("pysma.SMA.new_session", return_value=True), patch(
"pysma.SMA.device_info", return_value=MOCK_DEVICE
), patch(
"pysma.SMA.close_session", return_value=True
), _patch_async_setup_entry() as mock_setup_entry:
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
MOCK_USER_INPUT,
)
assert result["type"] == FlowResultType.ABORT
assert result["reason"] == "already_configured"
assert len(mock_setup_entry.mock_calls) == 0
|
{
"content_hash": "b7602df65ea7ad95e07fefd47e0d3bbb",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 71,
"avg_line_length": 33.431654676258994,
"alnum_prop": 0.6511727996556919,
"repo_name": "mezz64/home-assistant",
"id": "eeaa0d75f074e954c60fdc0b70b034505a94726d",
"size": "4647",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "tests/components/sma/test_config_flow.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "52481895"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
}
|
"""
See docstring class Validator below for more details on validation
"""
from abc import abstractmethod
from copy import deepcopy
from moto.dynamodb2.exceptions import (
AttributeIsReservedKeyword,
ExpressionAttributeValueNotDefined,
AttributeDoesNotExist,
ExpressionAttributeNameNotDefined,
IncorrectOperandType,
InvalidUpdateExpressionInvalidDocumentPath,
ProvidedKeyDoesNotExist,
EmptyKeyAttributeException,
)
from moto.dynamodb2.models import DynamoType
from moto.dynamodb2.parsing.ast_nodes import (
ExpressionAttribute,
UpdateExpressionPath,
UpdateExpressionSetAction,
UpdateExpressionAddAction,
UpdateExpressionDeleteAction,
UpdateExpressionRemoveAction,
DDBTypedValue,
ExpressionAttributeValue,
ExpressionAttributeName,
DepthFirstTraverser,
NoneExistingPath,
UpdateExpressionFunction,
ExpressionPathDescender,
UpdateExpressionValue,
ExpressionValueOperator,
ExpressionSelector,
)
from moto.dynamodb2.parsing.reserved_keywords import ReservedKeywords
class ExpressionAttributeValueProcessor(DepthFirstTraverser):
def __init__(self, expression_attribute_values):
self.expression_attribute_values = expression_attribute_values
def _processing_map(self):
return {
ExpressionAttributeValue: self.replace_expression_attribute_value_with_value
}
def replace_expression_attribute_value_with_value(self, node):
"""A node representing an Expression Attribute Value. Resolve and replace value"""
assert isinstance(node, ExpressionAttributeValue)
attribute_value_name = node.get_value_name()
try:
target = self.expression_attribute_values[attribute_value_name]
except KeyError:
raise ExpressionAttributeValueNotDefined(
attribute_value=attribute_value_name
)
return DDBTypedValue(DynamoType(target))
class ExpressionPathResolver(object):
def __init__(self, expression_attribute_names):
self.expression_attribute_names = expression_attribute_names
@classmethod
def raise_exception_if_keyword(cls, attribute):
if attribute.upper() in ReservedKeywords.get_reserved_keywords():
raise AttributeIsReservedKeyword(attribute)
def resolve_expression_path(self, item, update_expression_path):
assert isinstance(update_expression_path, UpdateExpressionPath)
return self.resolve_expression_path_nodes(item, update_expression_path.children)
def resolve_expression_path_nodes(self, item, update_expression_path_nodes):
target = item.attrs
for child in update_expression_path_nodes:
# First replace placeholder with attribute_name
attr_name = None
if isinstance(child, ExpressionAttributeName):
attr_placeholder = child.get_attribute_name_placeholder()
try:
attr_name = self.expression_attribute_names[attr_placeholder]
except KeyError:
raise ExpressionAttributeNameNotDefined(attr_placeholder)
elif isinstance(child, ExpressionAttribute):
attr_name = child.get_attribute_name()
self.raise_exception_if_keyword(attr_name)
if attr_name is not None:
# Resolv attribute_name
try:
target = target[attr_name]
except (KeyError, TypeError):
if child == update_expression_path_nodes[-1]:
return NoneExistingPath(creatable=True)
return NoneExistingPath()
else:
if isinstance(child, ExpressionPathDescender):
continue
elif isinstance(child, ExpressionSelector):
index = child.get_index()
if target.is_list():
try:
target = target[index]
except IndexError:
# When a list goes out of bounds when assigning that is no problem when at the assignment
# side. It will just append to the list.
if child == update_expression_path_nodes[-1]:
return NoneExistingPath(creatable=True)
return NoneExistingPath()
else:
raise InvalidUpdateExpressionInvalidDocumentPath
else:
raise NotImplementedError(
"Path resolution for {t}".format(t=type(child))
)
if not isinstance(target, DynamoType):
print(target)
return DDBTypedValue(target)
def resolve_expression_path_nodes_to_dynamo_type(
self, item, update_expression_path_nodes
):
node = self.resolve_expression_path_nodes(item, update_expression_path_nodes)
if isinstance(node, NoneExistingPath):
raise ProvidedKeyDoesNotExist()
assert isinstance(node, DDBTypedValue)
return node.get_value()
class ExpressionAttributeResolvingProcessor(DepthFirstTraverser):
def _processing_map(self):
return {
UpdateExpressionSetAction: self.disable_resolving,
UpdateExpressionPath: self.process_expression_path_node,
}
def __init__(self, expression_attribute_names, item):
self.expression_attribute_names = expression_attribute_names
self.item = item
self.resolving = False
def pre_processing_of_child(self, parent_node, child_id):
"""
We have to enable resolving if we are processing a child of UpdateExpressionSetAction that is not first.
Because first argument is path to be set, 2nd argument would be the value.
"""
if isinstance(
parent_node,
(
UpdateExpressionSetAction,
UpdateExpressionRemoveAction,
UpdateExpressionDeleteAction,
UpdateExpressionAddAction,
),
):
if child_id == 0:
self.resolving = False
else:
self.resolving = True
def disable_resolving(self, node=None):
self.resolving = False
return node
def process_expression_path_node(self, node):
"""Resolve ExpressionAttribute if not part of a path and resolving is enabled."""
if self.resolving:
return self.resolve_expression_path(node)
else:
# Still resolve but return original note to make sure path is correct Just make sure nodes are creatable.
result_node = self.resolve_expression_path(node)
if (
isinstance(result_node, NoneExistingPath)
and not result_node.is_creatable()
):
raise InvalidUpdateExpressionInvalidDocumentPath()
return node
def resolve_expression_path(self, node):
return ExpressionPathResolver(
self.expression_attribute_names
).resolve_expression_path(self.item, node)
class UpdateExpressionFunctionEvaluator(DepthFirstTraverser):
"""
At time of writing there are only 2 functions for DDB UpdateExpressions. They both are specific to the SET
expression as per the official AWS docs:
https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/
Expressions.UpdateExpressions.html#Expressions.UpdateExpressions.SET
"""
def _processing_map(self):
return {UpdateExpressionFunction: self.process_function}
def process_function(self, node):
assert isinstance(node, UpdateExpressionFunction)
function_name = node.get_function_name()
first_arg = node.get_nth_argument(1)
second_arg = node.get_nth_argument(2)
if function_name == "if_not_exists":
if isinstance(first_arg, NoneExistingPath):
result = second_arg
else:
result = first_arg
assert isinstance(result, (DDBTypedValue, NoneExistingPath))
return result
elif function_name == "list_append":
first_arg = deepcopy(
self.get_list_from_ddb_typed_value(first_arg, function_name)
)
second_arg = self.get_list_from_ddb_typed_value(second_arg, function_name)
for list_element in second_arg.value:
first_arg.value.append(list_element)
return DDBTypedValue(first_arg)
else:
raise NotImplementedError(
"Unsupported function for moto {name}".format(name=function_name)
)
@classmethod
def get_list_from_ddb_typed_value(cls, node, function_name):
assert isinstance(node, DDBTypedValue)
dynamo_value = node.get_value()
assert isinstance(dynamo_value, DynamoType)
if not dynamo_value.is_list():
raise IncorrectOperandType(function_name, dynamo_value.type)
return dynamo_value
class NoneExistingPathChecker(DepthFirstTraverser):
"""
Pass through the AST and make sure there are no none-existing paths.
"""
def _processing_map(self):
return {NoneExistingPath: self.raise_none_existing_path}
def raise_none_existing_path(self, node):
raise AttributeDoesNotExist
class ExecuteOperations(DepthFirstTraverser):
def _processing_map(self):
return {UpdateExpressionValue: self.process_update_expression_value}
def process_update_expression_value(self, node):
"""
If an UpdateExpressionValue only has a single child the node will be replaced with the childe.
Otherwise it has 3 children and the middle one is an ExpressionValueOperator which details how to combine them
Args:
node(Node):
Returns:
Node: The resulting node of the operation if present or the child.
"""
assert isinstance(node, UpdateExpressionValue)
if len(node.children) == 1:
return node.children[0]
elif len(node.children) == 3:
operator_node = node.children[1]
assert isinstance(operator_node, ExpressionValueOperator)
operator = operator_node.get_operator()
left_operand = self.get_dynamo_value_from_ddb_typed_value(node.children[0])
right_operand = self.get_dynamo_value_from_ddb_typed_value(node.children[2])
if operator == "+":
return self.get_sum(left_operand, right_operand)
elif operator == "-":
return self.get_subtraction(left_operand, right_operand)
else:
raise NotImplementedError(
"Moto does not support operator {operator}".format(
operator=operator
)
)
else:
raise NotImplementedError(
"UpdateExpressionValue only has implementations for 1 or 3 children."
)
@classmethod
def get_dynamo_value_from_ddb_typed_value(cls, node):
assert isinstance(node, DDBTypedValue)
dynamo_value = node.get_value()
assert isinstance(dynamo_value, DynamoType)
return dynamo_value
@classmethod
def get_sum(cls, left_operand, right_operand):
"""
Args:
left_operand(DynamoType):
right_operand(DynamoType):
Returns:
DDBTypedValue:
"""
try:
return DDBTypedValue(left_operand + right_operand)
except TypeError:
raise IncorrectOperandType("+", left_operand.type)
@classmethod
def get_subtraction(cls, left_operand, right_operand):
"""
Args:
left_operand(DynamoType):
right_operand(DynamoType):
Returns:
DDBTypedValue:
"""
try:
return DDBTypedValue(left_operand - right_operand)
except TypeError:
raise IncorrectOperandType("-", left_operand.type)
class EmptyStringKeyValueValidator(DepthFirstTraverser):
def __init__(self, key_attributes):
self.key_attributes = key_attributes
def _processing_map(self):
return {UpdateExpressionSetAction: self.check_for_empty_string_key_value}
def check_for_empty_string_key_value(self, node):
"""A node representing a SET action. Check that keys are not being assigned empty strings"""
assert isinstance(node, UpdateExpressionSetAction)
assert len(node.children) == 2
key = node.children[0].children[0].children[0]
val_node = node.children[1].children[0]
if val_node.type in ["S", "B"] and key in self.key_attributes:
raise EmptyKeyAttributeException
return node
class Validator(object):
"""
A validator is used to validate expressions which are passed in as an AST.
"""
def __init__(
self,
expression,
expression_attribute_names,
expression_attribute_values,
item,
table,
):
"""
Besides validation the Validator should also replace referenced parts of an item which is cheapest upon
validation.
Args:
expression(Node): The root node of the AST representing the expression to be validated
expression_attribute_names(ExpressionAttributeNames):
expression_attribute_values(ExpressionAttributeValues):
item(Item): The item which will be updated (pointed to by Key of update_item)
"""
self.expression_attribute_names = expression_attribute_names
self.expression_attribute_values = expression_attribute_values
self.item = item
self.table = table
self.processors = self.get_ast_processors()
self.node_to_validate = deepcopy(expression)
@abstractmethod
def get_ast_processors(self):
"""Get the different processors that go through the AST tree and processes the nodes."""
def validate(self):
n = self.node_to_validate
for processor in self.processors:
n = processor.traverse(n)
return n
class UpdateExpressionValidator(Validator):
def get_ast_processors(self):
"""Get the different processors that go through the AST tree and processes the nodes."""
processors = [
ExpressionAttributeValueProcessor(self.expression_attribute_values),
ExpressionAttributeResolvingProcessor(
self.expression_attribute_names, self.item
),
UpdateExpressionFunctionEvaluator(),
NoneExistingPathChecker(),
ExecuteOperations(),
EmptyStringKeyValueValidator(self.table.key_attributes),
]
return processors
|
{
"content_hash": "b0c9211e00f03645dbb1285b710dec61",
"timestamp": "",
"source": "github",
"line_count": 394,
"max_line_length": 118,
"avg_line_length": 37.93908629441624,
"alnum_prop": 0.6315226117206315,
"repo_name": "william-richard/moto",
"id": "79849e538c64febabf5026bfaf172aa8baa0d00e",
"size": "14948",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "moto/dynamodb2/parsing/validators.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "443"
},
{
"name": "HTML",
"bytes": "5848"
},
{
"name": "Java",
"bytes": "1688"
},
{
"name": "JavaScript",
"bytes": "756"
},
{
"name": "Makefile",
"bytes": "1213"
},
{
"name": "Python",
"bytes": "6637538"
},
{
"name": "Ruby",
"bytes": "188"
},
{
"name": "Scala",
"bytes": "782"
},
{
"name": "Shell",
"bytes": "797"
}
],
"symlink_target": ""
}
|
from . import Core
from .Core import Transform_Affine, Transform_Identity, Transform_6DOF, Transform_Scale, Transform_Translation, Transform_Rotation, Grid3D, Image3D, GridND, ImageND
from .Core import grid_from_box_and_affine
from .Core import nipy_to_occiput, nifti_to_occiput, occiput_from_array
from . import transformations
from . import NiftyCore_wrap
from . import Conversion
|
{
"content_hash": "aab1532971d7455b479239fef4b135b3",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 164,
"avg_line_length": 48,
"alnum_prop": 0.8046875,
"repo_name": "kastman/occiput",
"id": "c43b9f5c2b6016e848a0a52a94d977d062b9f6f4",
"size": "515",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "occiput/Core/__init__.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "264798"
}
],
"symlink_target": ""
}
|
"""Tests for HomematicIP Cloud config flow."""
from unittest.mock import patch
from homeassistant.components.homematicip_cloud.const import (
DOMAIN as HMIPC_DOMAIN,
HMIPC_AUTHTOKEN,
HMIPC_HAPID,
HMIPC_NAME,
HMIPC_PIN,
)
from tests.common import MockConfigEntry
DEFAULT_CONFIG = {HMIPC_HAPID: "ABC123", HMIPC_PIN: "123", HMIPC_NAME: "hmip"}
IMPORT_CONFIG = {HMIPC_HAPID: "ABC123", HMIPC_AUTHTOKEN: "123", HMIPC_NAME: "hmip"}
async def test_flow_works(hass, simple_mock_home):
"""Test config flow."""
with patch(
"homeassistant.components.homematicip_cloud.hap.HomematicipAuth.async_checkbutton",
return_value=False,
), patch(
"homeassistant.components.homematicip_cloud.hap.HomematicipAuth.get_auth",
return_value=True,
):
result = await hass.config_entries.flow.async_init(
HMIPC_DOMAIN, context={"source": "user"}, data=DEFAULT_CONFIG
)
assert result["type"] == "form"
assert result["step_id"] == "link"
assert result["errors"] == {"base": "press_the_button"}
flow = next(
flow
for flow in hass.config_entries.flow.async_progress()
if flow["flow_id"] == result["flow_id"]
)
assert flow["context"]["unique_id"] == "ABC123"
with patch(
"homeassistant.components.homematicip_cloud.hap.HomematicipAuth.async_checkbutton",
return_value=True,
), patch(
"homeassistant.components.homematicip_cloud.hap.HomematicipAuth.async_setup",
return_value=True,
), patch(
"homeassistant.components.homematicip_cloud.hap.HomematicipAuth.async_register",
return_value=True,
), patch(
"homeassistant.components.homematicip_cloud.hap.HomematicipHAP.async_connect",
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == "create_entry"
assert result["title"] == "ABC123"
assert result["data"] == {"hapid": "ABC123", "authtoken": True, "name": "hmip"}
assert result["result"].unique_id == "ABC123"
async def test_flow_init_connection_error(hass):
"""Test config flow with accesspoint connection error."""
with patch(
"homeassistant.components.homematicip_cloud.hap.HomematicipAuth.async_setup",
return_value=False,
):
result = await hass.config_entries.flow.async_init(
HMIPC_DOMAIN, context={"source": "user"}, data=DEFAULT_CONFIG
)
assert result["type"] == "form"
assert result["step_id"] == "init"
async def test_flow_link_connection_error(hass):
"""Test config flow client registration connection error."""
with patch(
"homeassistant.components.homematicip_cloud.hap.HomematicipAuth.async_checkbutton",
return_value=True,
), patch(
"homeassistant.components.homematicip_cloud.hap.HomematicipAuth.async_setup",
return_value=True,
), patch(
"homeassistant.components.homematicip_cloud.hap.HomematicipAuth.async_register",
return_value=False,
):
result = await hass.config_entries.flow.async_init(
HMIPC_DOMAIN, context={"source": "user"}, data=DEFAULT_CONFIG
)
assert result["type"] == "abort"
assert result["reason"] == "connection_aborted"
async def test_flow_link_press_button(hass):
"""Test config flow ask for pressing the blue button."""
with patch(
"homeassistant.components.homematicip_cloud.hap.HomematicipAuth.async_checkbutton",
return_value=False,
), patch(
"homeassistant.components.homematicip_cloud.hap.HomematicipAuth.async_setup",
return_value=True,
):
result = await hass.config_entries.flow.async_init(
HMIPC_DOMAIN, context={"source": "user"}, data=DEFAULT_CONFIG
)
assert result["type"] == "form"
assert result["step_id"] == "link"
assert result["errors"] == {"base": "press_the_button"}
async def test_init_flow_show_form(hass):
"""Test config flow shows up with a form."""
result = await hass.config_entries.flow.async_init(
HMIPC_DOMAIN, context={"source": "user"}
)
assert result["type"] == "form"
assert result["step_id"] == "init"
async def test_init_already_configured(hass):
"""Test accesspoint is already configured."""
MockConfigEntry(domain=HMIPC_DOMAIN, unique_id="ABC123").add_to_hass(hass)
with patch(
"homeassistant.components.homematicip_cloud.hap.HomematicipAuth.async_checkbutton",
return_value=True,
):
result = await hass.config_entries.flow.async_init(
HMIPC_DOMAIN, context={"source": "user"}, data=DEFAULT_CONFIG
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
async def test_import_config(hass, simple_mock_home):
"""Test importing a host with an existing config file."""
with patch(
"homeassistant.components.homematicip_cloud.hap.HomematicipAuth.async_checkbutton",
return_value=True,
), patch(
"homeassistant.components.homematicip_cloud.hap.HomematicipAuth.async_setup",
return_value=True,
), patch(
"homeassistant.components.homematicip_cloud.hap.HomematicipAuth.async_register",
return_value=True,
), patch(
"homeassistant.components.homematicip_cloud.hap.HomematicipHAP.async_connect",
):
result = await hass.config_entries.flow.async_init(
HMIPC_DOMAIN, context={"source": "import"}, data=IMPORT_CONFIG
)
assert result["type"] == "create_entry"
assert result["title"] == "ABC123"
assert result["data"] == {"authtoken": "123", "hapid": "ABC123", "name": "hmip"}
assert result["result"].unique_id == "ABC123"
async def test_import_existing_config(hass):
"""Test abort of an existing accesspoint from config."""
MockConfigEntry(domain=HMIPC_DOMAIN, unique_id="ABC123").add_to_hass(hass)
with patch(
"homeassistant.components.homematicip_cloud.hap.HomematicipAuth.async_checkbutton",
return_value=True,
), patch(
"homeassistant.components.homematicip_cloud.hap.HomematicipAuth.async_setup",
return_value=True,
), patch(
"homeassistant.components.homematicip_cloud.hap.HomematicipAuth.async_register",
return_value=True,
):
result = await hass.config_entries.flow.async_init(
HMIPC_DOMAIN, context={"source": "import"}, data=IMPORT_CONFIG
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
|
{
"content_hash": "a494af025c538435b69e648920a6b032",
"timestamp": "",
"source": "github",
"line_count": 185,
"max_line_length": 91,
"avg_line_length": 35.83783783783784,
"alnum_prop": 0.6559577677224736,
"repo_name": "turbokongen/home-assistant",
"id": "0b573e66b1da0458b2562acd0d15db28a350a528",
"size": "6630",
"binary": false,
"copies": "4",
"ref": "refs/heads/dev",
"path": "tests/components/homematicip_cloud/test_config_flow.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1720"
},
{
"name": "Python",
"bytes": "30405146"
},
{
"name": "Shell",
"bytes": "4832"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import csv
from furl import furl
from datetime import datetime, timedelta
from django.views.generic import FormView, DeleteView, ListView
from django.contrib.auth.mixins import PermissionRequiredMixin
from django.core.exceptions import PermissionDenied
from django.core.mail import send_mail
from django.http import Http404, HttpResponse
from django.shortcuts import redirect
from osf.models.user import OSFUser
from osf.models.node import Node, NodeLog
from osf.models.spam import SpamStatus
from osf.models.tag import Tag
from framework.auth import get_user
from framework.auth.utils import impute_names
from framework.auth.core import generate_verification_key
from website.mailchimp_utils import subscribe_on_confirm
from admin.base.views import GuidFormView, GuidView
from osf.models.admin_log_entry import (
update_admin_log,
USER_2_FACTOR,
USER_EMAILED,
USER_REMOVED,
USER_RESTORED,
CONFIRM_SPAM)
from admin.users.serializers import serialize_user
from admin.users.forms import EmailResetForm, WorkshopForm
from admin.users.templatetags.user_extras import reverse_user
from website.settings import DOMAIN, SUPPORT_EMAIL
class UserDeleteView(PermissionRequiredMixin, DeleteView):
""" Allow authorised admin user to remove/restore user
Interface with OSF database. No admin models.
"""
template_name = 'users/remove_user.html'
context_object_name = 'user'
object = None
permission_required = 'osf.change_user'
raise_exception = True
def delete(self, request, *args, **kwargs):
try:
user = self.get_object()
if user.date_disabled is None or kwargs.get('is_spam'):
user.disable_account()
user.is_registered = False
if 'spam_flagged' in user.system_tags or 'ham_confirmed' in user.system_tags:
if 'spam_flagged' in user.system_tags:
t = Tag.objects.get(name='spam_flagged', system=True)
user.tags.remove(t)
if 'ham_confirmed' in user.system_tags:
t = Tag.objects.get(name='ham_confirmed', system=True)
user.tags.remove(t)
if 'spam_confirmed' not in user.system_tags:
user.add_system_tag('spam_confirmed')
flag = USER_REMOVED
message = 'User account {} disabled'.format(user.pk)
else:
user.date_disabled = None
subscribe_on_confirm(user)
user.is_registered = True
if 'spam_flagged' in user.system_tags or 'spam_confirmed' in user.system_tags:
if 'spam_flagged' in user.system_tags:
t = Tag.objects.get(name='spam_flagged', system=True)
user.tags.remove(t)
if 'spam_confirmed' in user.system_tags:
t = Tag.objects.get(name='spam_confirmed', system=True)
user.tags.remove('spam_confirmed')
if 'ham_confirmed' not in user.system_tags:
user.add_system_tag('ham_confirmed')
flag = USER_RESTORED
message = 'User account {} reenabled'.format(user.pk)
user.save()
except AttributeError:
raise Http404(
'{} with id "{}" not found.'.format(
self.context_object_name.title(),
self.kwargs.get('guid')
))
update_admin_log(
user_id=self.request.user.id,
object_id=user.pk,
object_repr='User',
message=message,
action_flag=flag
)
return redirect(reverse_user(self.kwargs.get('guid')))
def get_context_data(self, **kwargs):
context = {}
context.setdefault('guid', kwargs.get('object')._id)
return super(UserDeleteView, self).get_context_data(**context)
def get_object(self, queryset=None):
return OSFUser.load(self.kwargs.get('guid'))
class SpamUserDeleteView(UserDeleteView):
"""
Allow authorized admin user to delete a spam user and mark all their nodes as private
"""
template_name = 'users/remove_spam_user.html'
def delete(self, request, *args, **kwargs):
try:
user = self.get_object()
except AttributeError:
raise Http404(
'{} with id "{}" not found.'.format(
self.context_object_name.title(),
self.kwargs.get('guid')
))
if user:
for node in user.contributor_to:
if not node.is_registration and not node.is_spam:
node.confirm_spam(save=True)
update_admin_log(
user_id=request.user.id,
object_id=node._id,
object_repr='Node',
message='Confirmed SPAM: {} when user {} marked as spam'.format(node._id, user._id),
action_flag=CONFIRM_SPAM
)
kwargs.update({'is_spam': True})
return super(SpamUserDeleteView, self).delete(request, *args, **kwargs)
class HamUserRestoreView(UserDeleteView):
"""
Allow authorized admin user to undelete a ham user
"""
template_name = 'users/restore_ham_user.html'
def delete(self, request, *args, **kwargs):
try:
user = self.get_object()
except AttributeError:
raise Http404(
'{} with id "{}" not found.'.format(
self.context_object_name.title(),
self.kwargs.get('guid')
))
if user:
for node in user.contributor_to:
if node.is_spam:
node.confirm_ham(save=True)
update_admin_log(
user_id=request.user.id,
object_id=node._id,
object_repr='Node',
message='Confirmed HAM: {} when user {} marked as ham'.format(node._id, user._id),
action_flag=CONFIRM_SPAM
)
kwargs.update({'is_spam': False})
return super(HamUserRestoreView, self).delete(request, *args, **kwargs)
class UserSpamList(PermissionRequiredMixin, ListView):
SPAM_TAG = 'spam_flagged'
paginate_by = 25
paginate_orphans = 1
ordering = ('date_disabled')
context_object_name = '-user'
permission_required = ('osf.view_spam', 'osf.view_user')
raise_exception = True
def get_queryset(self):
return OSFUser.objects.filter(tags__name=self.SPAM_TAG).order_by(self.ordering)
def get_context_data(self, **kwargs):
query_set = kwargs.pop('object_list', self.object_list)
page_size = self.get_paginate_by(query_set)
paginator, page, query_set, is_paginated = self.paginate_queryset(
query_set, page_size)
return {
'users': map(serialize_user, query_set),
'page': page,
}
class UserFlaggedSpamList(UserSpamList, DeleteView):
SPAM_TAG = 'spam_flagged'
template_name = 'users/flagged_spam_list.html'
def delete(self, request, *args, **kwargs):
if not request.user.get_perms('osf.mark_spam'):
raise PermissionDenied("You don't have permission to update this user's spam status.")
user_ids = [
uid for uid in request.POST.keys()
if uid != 'csrfmiddlewaretoken'
]
for uid in user_ids:
user = OSFUser.load(uid)
if 'spam_flagged' in user.system_tags:
user.system_tags.remove('spam_flagged')
user.add_system_tag('spam_confirmed')
user.save()
update_admin_log(
user_id=self.request.user.id,
object_id=uid,
object_repr='User',
message='Confirmed SPAM: {}'.format(uid),
action_flag=CONFIRM_SPAM
)
return redirect('users:flagged-spam')
class UserKnownSpamList(UserSpamList):
SPAM_TAG = 'spam_confirmed'
template_name = 'users/known_spam_list.html'
class UserKnownHamList(UserSpamList):
SPAM_TAG = 'ham_confirmed'
template_name = 'users/known_spam_list.html'
class User2FactorDeleteView(UserDeleteView):
""" Allow authorised admin user to remove 2 factor authentication.
Interface with OSF database. No admin models.
"""
template_name = 'users/remove_2_factor.html'
def delete(self, request, *args, **kwargs):
user = self.get_object()
try:
user.delete_addon('twofactor')
except AttributeError:
raise Http404(
'{} with id "{}" not found.'.format(
self.context_object_name.title(),
self.kwargs.get('guid')
))
update_admin_log(
user_id=self.request.user.id,
object_id=user.pk,
object_repr='User',
message='Removed 2 factor auth for user {}'.format(user.pk),
action_flag=USER_2_FACTOR
)
return redirect(reverse_user(self.kwargs.get('guid')))
class UserFormView(PermissionRequiredMixin, GuidFormView):
template_name = 'users/search.html'
object_type = 'user'
permission_required = 'osf.view_user'
raise_exception = True
@property
def success_url(self):
return reverse_user(self.guid)
class UserView(PermissionRequiredMixin, GuidView):
template_name = 'users/user.html'
context_object_name = 'user'
permission_required = 'osf.view_user'
raise_exception = True
def get_context_data(self, **kwargs):
kwargs = super(UserView, self).get_context_data(**kwargs)
kwargs.update({'SPAM_STATUS': SpamStatus}) # Pass spam status in to check against
return kwargs
def get_object(self, queryset=None):
return serialize_user(OSFUser.load(self.kwargs.get('guid')))
class UserWorkshopFormView(PermissionRequiredMixin, FormView):
form_class = WorkshopForm
object_type = 'user'
template_name = 'users/workshop.html'
permission_required = 'osf.view_user'
raise_exception = True
def form_valid(self, form):
csv_file = form.cleaned_data['document']
final = self.parse(csv_file)
file_name = csv_file.name
results_file_name = '{}_user_stats.csv'.format(file_name.replace(' ', '_').strip('.csv'))
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="{}"'.format(results_file_name)
writer = csv.writer(response)
for row in final:
writer.writerow(row)
return response
@staticmethod
def find_user_by_email(email):
user_list = OSFUser.objects.filter(emails__contains=[email])
return user_list[0] if user_list else None
@staticmethod
def find_user_by_full_name(full_name):
user_list = OSFUser.objects.filter(fullname=full_name)
return user_list[0] if user_list.count() == 1 else None
@staticmethod
def find_user_by_family_name(family_name):
user_list = OSFUser.objects.filter(family_name=family_name)
return user_list[0] if user_list.count() == 1 else None
@staticmethod
def get_user_logs_since_workshop(user, workshop_date):
query_date = workshop_date + timedelta(days=1)
return NodeLog.objects.filter(user=user, date__gt=query_date)
@staticmethod
def get_user_nodes_since_workshop(user, workshop_date):
query_date = workshop_date + timedelta(days=1)
return Node.objects.filter(creator=user, date_created__gt=query_date)
def parse(self, csv_file):
""" Parse and add to csv file.
:param csv_file: Comma separated
:return: A list
"""
result = []
csv_reader = csv.reader(csv_file)
for index, row in enumerate(csv_reader):
if index == 0:
row.extend([
'OSF ID', 'Logs Since Workshop', 'Nodes Created Since Workshop', 'Last Log Date'
])
result.append(row)
continue
email = row[5]
user_by_email = self.find_user_by_email(email)
if not user_by_email:
full_name = row[4]
try:
family_name = impute_names(full_name)['family']
except UnicodeDecodeError:
row.extend(['Unable to parse name'])
result.append(row)
continue
user_by_name = self.find_user_by_full_name(full_name) or self.find_user_by_family_name(family_name)
if not user_by_name:
row.extend(['', 0, 0, ''])
result.append(row)
continue
else:
user = user_by_name
else:
user = user_by_email
workshop_date = datetime.strptime(row[1], '%m/%d/%y')
nodes = self.get_user_nodes_since_workshop(user, workshop_date)
user_logs = self.get_user_logs_since_workshop(user, workshop_date)
last_log_date = user_logs.latest().date.strftime('%m/%d/%y') if user_logs else ''
row.extend([
user.pk, len(user_logs), len(nodes), last_log_date
])
result.append(row)
return result
def form_invalid(self, form):
super(UserWorkshopFormView, self).form_invalid(form)
class ResetPasswordView(PermissionRequiredMixin, FormView):
form_class = EmailResetForm
template_name = 'users/reset.html'
context_object_name = 'user'
permission_required = 'osf.change_user'
raise_exception = True
def get_context_data(self, **kwargs):
user = OSFUser.load(self.kwargs.get('guid'))
try:
self.initial.setdefault('emails', [(r, r) for r in user.emails])
except AttributeError:
raise Http404(
'{} with id "{}" not found.'.format(
self.context_object_name.title(),
self.kwargs.get('guid')
))
kwargs.setdefault('guid', user._id)
return super(ResetPasswordView, self).get_context_data(**kwargs)
def form_valid(self, form):
email = form.cleaned_data.get('emails')
user = get_user(email)
if user is None or user._id != self.kwargs.get('guid'):
return HttpResponse(
'{} with id "{}" and email "{}" not found.'.format(
self.context_object_name.title(),
self.kwargs.get('guid'),
email
),
status=409
)
reset_abs_url = furl(DOMAIN)
user.verification_key_v2 = generate_verification_key(verification_type='password')
user.save()
reset_abs_url.path.add(('resetpassword/{}/{}'.format(user._id, user.verification_key_v2['token'])))
send_mail(
subject='Reset OSF Password',
message='Follow this link to reset your password: {}'.format(
reset_abs_url.url
),
from_email=SUPPORT_EMAIL,
recipient_list=[email]
)
update_admin_log(
user_id=self.request.user.id,
object_id=user.pk,
object_repr='User',
message='Emailed user {} a reset link.'.format(user.pk),
action_flag=USER_EMAILED
)
return super(ResetPasswordView, self).form_valid(form)
@property
def success_url(self):
return reverse_user(self.kwargs.get('guid'))
|
{
"content_hash": "2b3ec9e3d5a21e914ae5d55ccaea1f1e",
"timestamp": "",
"source": "github",
"line_count": 441,
"max_line_length": 115,
"avg_line_length": 36.142857142857146,
"alnum_prop": 0.5766359244620114,
"repo_name": "monikagrabowska/osf.io",
"id": "1c556eb4a97a8b8cd63461a190dad9ad94bf347e",
"size": "15939",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "admin/users/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "176566"
},
{
"name": "HTML",
"bytes": "183119"
},
{
"name": "JavaScript",
"bytes": "2017358"
},
{
"name": "Jupyter Notebook",
"bytes": "8510"
},
{
"name": "Makefile",
"bytes": "6905"
},
{
"name": "Mako",
"bytes": "755899"
},
{
"name": "PLpgSQL",
"bytes": "22144"
},
{
"name": "Perl",
"bytes": "13885"
},
{
"name": "Python",
"bytes": "9632033"
},
{
"name": "Shell",
"bytes": "436"
}
],
"symlink_target": ""
}
|
import lldb
from lldbsuite.test.lldbtest import *
from lldbsuite.test.decorators import *
class AproposTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
@no_debug_info_test
def test_apropos(self):
self.expect("apropos", error=True,
substrs=[' must be called with exactly one argument'])
self.expect("apropos a b", error=True,
substrs=[' must be called with exactly one argument'])
self.expect("apropos ''", error=True,
substrs=['\'\' is not a valid search word'])
@no_debug_info_test
def test_apropos_variable(self):
"""Test that 'apropos variable' prints the fully qualified command name"""
self.expect(
'apropos variable',
substrs=[
'frame variable',
'target variable',
'watchpoint set variable'])
|
{
"content_hash": "1be6eb89fcf57a65a29ac8d746cb50bd",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 82,
"avg_line_length": 34.88461538461539,
"alnum_prop": 0.5865490628445424,
"repo_name": "endlessm/chromium-browser",
"id": "920f0dd0b2455677b644a35b3480f265720b3f89",
"size": "907",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "third_party/llvm/lldb/test/API/commands/apropos/basic/TestApropos.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
from photoshell.hash import hash_file
import os
def test_hash_file(tmpdir):
tmpdir.join('file.test').write("Test")
assert (hash_file(os.path.join(tmpdir.strpath, 'file.test')) ==
'640ab2bae07bedc4c163f679a746f7ab7fb5d1fa')
|
{
"content_hash": "1353d70438de5d31de77497abd61730e",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 67,
"avg_line_length": 27.333333333333332,
"alnum_prop": 0.7032520325203252,
"repo_name": "photoshell/photoshell",
"id": "6e5dbb56749d8b9339cb5cd0a1ad792378b37b20",
"size": "246",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/hash_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1442"
},
{
"name": "Python",
"bytes": "36072"
}
],
"symlink_target": ""
}
|
import torch
import torch.multiprocessing as mp
class TrafficLight:
"""used by chief to allow workers to run or not"""
def __init__(self, val=True):
self.val = mp.Value("b", False)
self.lock = mp.Lock()
def get(self):
with self.lock:
return self.val.value
def switch(self):
with self.lock:
self.val.value = (not self.val.value)
class Counter:
"""enable the chief to access worker's total number of updates"""
def __init__(self, val=True):
self.val = mp.Value("i", 0)
self.lock = mp.Lock()
def get(self):
# used by chief
with self.lock:
return self.val.value
def increment(self):
# used by workers
with self.lock:
self.val.value += 1
def reset(self):
# used by chief
with self.lock:
self.val.value = 0
|
{
"content_hash": "291029214fe730312a4bbe90c4129799",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 69,
"avg_line_length": 23.128205128205128,
"alnum_prop": 0.5532150776053215,
"repo_name": "alexis-jacq/Pytorch-DPPO",
"id": "7a2cd591c5960da2d25801595a2eec77d057107d",
"size": "902",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22117"
}
],
"symlink_target": ""
}
|
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2015, Xtina Schelin <xtina.schelin@gmail.com>'
__docformat__ = 'restructuredtext en'
try:
from PyQt5 import Qt as QtGui
except ImportError:
from PyQt4 import QtGui
try:
from PyQt5.Qt import QLabel, QGridLayout, Qt, QGroupBox, QCheckBox
except ImportError:
from PyQt4.Qt import QLabel, QGridLayout, Qt, QGroupBox, QCheckBox
from calibre.gui2.metadata.config import ConfigWidget as DefaultConfigWidget
from calibre.utils.config import JSONConfig
STORE_NAME = 'Options'
KEY_MAX_DOWNLOADS = 'maxDownloads'
KEY_APPEND_CONTENTS = 'appendContents'
DEFAULT_STORE_VALUES = {
KEY_MAX_DOWNLOADS: 1,
KEY_APPEND_CONTENTS: False
}
# This is where all preferences for this plugin will be stored.
plugin_prefs = JSONConfig('plugins/ISFDB')
# Set defaults.
plugin_prefs.defaults[STORE_NAME] = DEFAULT_STORE_VALUES
class ConfigWidget(DefaultConfigWidget):
def __init__(self, plugin):
DefaultConfigWidget.__init__(self, plugin)
c = plugin_prefs[STORE_NAME]
other_group_box = QGroupBox('Other options', self)
self.l.addWidget(other_group_box, self.l.rowCount(), 0, 1, 2)
other_group_box_layout = QGridLayout()
other_group_box.setLayout(other_group_box_layout)
# Maximum # of title/author searches to review.
max_label = QLabel('Maximum title/author search matches to evaluate (1 = fastest):', self)
max_label.setToolTip('ISFDB doesn\'t always have links to large covers for every ISBN\n'
'of the same book. Increasing this value will take effect when doing\n'
'title/author searches to consider more ISBN editions.\n\n'
'This will increase the potential likelihood of getting a larger cover,\n'
'though does not guarantee it.')
other_group_box_layout.addWidget(max_label, 0, 0, 1, 1)
self.max_downloads_spin = QtGui.QSpinBox(self)
self.max_downloads_spin.setMinimum(1)
self.max_downloads_spin.setMaximum(5)
self.max_downloads_spin.setProperty('value', c.get(KEY_MAX_DOWNLOADS, DEFAULT_STORE_VALUES[KEY_MAX_DOWNLOADS]))
other_group_box_layout.addWidget(self.max_downloads_spin, 0, 1, 1, 1)
other_group_box_layout.setColumnStretch(2, 1)
# Contents field, if possible.
self.contents_checkbox = QCheckBox('Append Contents if available to comments', self)
self.contents_checkbox.setToolTip('Choosing this option will write the Contents section to the comments\n'
'field, if such a section exists.')
self.contents_checkbox.setChecked(c.get(KEY_APPEND_CONTENTS, DEFAULT_STORE_VALUES[KEY_APPEND_CONTENTS]))
other_group_box_layout.addWidget(self.contents_checkbox, 2, 0, 1, 3)
def commit(self):
DefaultConfigWidget.commit(self)
new_prefs = {}
new_prefs[KEY_MAX_DOWNLOADS] = int(unicode(self.max_downloads_spin.value()))
new_prefs[KEY_APPEND_CONTENTS] = self.contents_checkbox.checkState() == Qt.Checked
plugin_prefs[STORE_NAME] = new_prefs
|
{
"content_hash": "0016eb5ea6cb53020e129985d6fb74fb",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 113,
"avg_line_length": 41.66197183098591,
"alnum_prop": 0.742393509127789,
"repo_name": "XtinaSchelin/isfdb-calibre",
"id": "881b631fc4e09fc3011532618d04b44544d61e42",
"size": "3031",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "isfdb-plugin/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "23045"
}
],
"symlink_target": ""
}
|
"""Daemon Component
Component to daemonize a system into the background and detach it from its
controlling PTY. Supports PID file writing, logging stdin, stdout and stderr
and changing the current working directory.
"""
from __future__ import print_function
import os
import sys
import fcntl
from logging import getLogger
from circuits import handler, BaseComponent
UMASK = 0
WORKDIR = "/"
DEVNULL = getattr(os, "devnull", "/dev/null")
class Daemon(BaseComponent):
"""Daemon Component
:param pidfile: .pid filename
:type pidfile: str or unicode
:param path: path to change directory to
:type path: str
"""
def init(self, pidfile, path=WORKDIR):
self.pidfile = os.path.abspath(pidfile)
self.path = os.path.abspath(path)
self.logger = getLogger(__name__)
self.logger.debug("pidfile: {}".format(self.pidfile))
self.logger.debug("path: {}".format(self.path))
def create_lockfile(self):
# If pidfile already exists, we should read pid from there; to overwrite it, if locking
# will fail, because locking attempt somehow purges the file contents.
if os.path.isfile(self.pidfile):
with open(self.pidfile, "r") as old_pidfile:
old_pid = old_pidfile.read()
# Create a lockfile so that only one instance of this daemon is running at any time.
try:
lockfile = open(self.pidfile, "w")
except IOError:
print("Unable to create the pidfile.", file=sys.stderr)
raise SystemExit(1)
try:
# Try to get an exclusive lock on the file.
# This will fail if another process has the file locked.
fcntl.flock(lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError:
print("Unable to lock on the pidfile.", file=sys.stderr)
# We need to overwrite the pidfile if we got here.
with open(self.pidfile, "w") as pidfile:
pidfile.write(old_pid)
raise SystemExit(1)
return lockfile
def daemonize(self):
lockfile = self.create_lockfile()
try:
pid = os.fork()
if pid > 0:
# exit first parent
os._exit(0)
except OSError as e:
print("fork #1 failed: {0:d} ({0:s})".format(e.errno, str(e)), file=sys.stderr)
raise SystemExit(1)
# decouple from parent environment
os.chdir(self.path)
os.setsid()
os.umask(UMASK)
# do second fork
try:
pid = os.fork()
if pid > 0:
# exit from second parent
os._exit(0)
except OSError as e:
sys.stderr.write(
"fork #2 failed: {0:d} ({0:s})\n".format(
e.errno, str(e)
)
)
raise SystemExit(1)
# redirect standard file descriptors
sys.stdout.flush()
sys.stderr.flush()
for fd in (0, 1, 2):
try:
os.close(fd)
except OSError:
pass
os.open(DEVNULL, os.O_RDWR) # standard input (0)
# Duplicate stdin to stdout and stderr.
os.dup2(0, 1) # stdout (1)
os.dup2(0, 2) # stderr (2)
try:
lockfile.write("%s" % (os.getpid()))
lockfile.flush()
except IOError:
print("Unable to write pid to the pidfile.", file=sys.stderr)
raise SystemExit(1)
@handler("ready", channel="*")
def on_ready(self, server, bind):
self.logger.debug("daemonizing ...")
self.daemonize()
self.unregister()
|
{
"content_hash": "029fcfd946458f2d11fe522c2756245f",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 95,
"avg_line_length": 28.189393939393938,
"alnum_prop": 0.5614082235958076,
"repo_name": "prologic/cgod",
"id": "43692cb0e45a89da644ce67c2b82db4101f5b59e",
"size": "3834",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cgod/daemon.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "305"
},
{
"name": "HTML",
"bytes": "661"
},
{
"name": "Python",
"bytes": "66474"
}
],
"symlink_target": ""
}
|
"""
Relaxation methods
------------------
The multigrid cycle is formed by two complementary procedures: relaxation and
coarse-grid correction. The role of relaxation is to rapidly damp oscillatory
(high-frequency) errors out of the approximate solution. When the error is
smooth, it can then be accurately represented on the coarser grid, where a
solution, or approximate solution, can be computed.
Iterative methods for linear systems that have an error smoothing property
are valid relaxation methods. Since the purpose of a relaxation method is
to smooth oscillatory errors, its effectiveness on non-oscillatory errors
is not important. This point explains why simple iterative methods like
Gauss-Seidel iteration are effective relaxation methods while being very
slow to converge to the solution of Ax=b.
PyAMG implements relaxation methods of the following varieties:
1. Jacobi iteration
2. Gauss-Seidel iteration
3. Successive Over-Relaxation
4. Polynomial smoothing (e.g. Chebyshev)
5. Jacobi and Gauss-Seidel on the normal equations (A.H A and A A.H)
6. Krylov methods: gmres, cg, cgnr, cgne
7. No pre- or postsmoother
Refer to the docstrings of the individual methods for additional information.
"""
__docformat__ = "restructuredtext en"
#TODO: explain separation of basic methods from interface methods.
#TODO: explain why each class of methods exist (parallel vs. serial, SPD vs. indefinite)
postpone_import = 1
|
{
"content_hash": "ea1cdbd6b648133f99060acd8aa5da97",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 88,
"avg_line_length": 40.77777777777778,
"alnum_prop": 0.7704359673024523,
"repo_name": "pombreda/pyamg",
"id": "605c60ac63c82465f6a12fce91eb5735d0f8c58f",
"size": "1468",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyamg/relaxation/info.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "1112880"
},
{
"name": "CSS",
"bytes": "9832"
},
{
"name": "Makefile",
"bytes": "3249"
},
{
"name": "Matlab",
"bytes": "2742"
},
{
"name": "Python",
"bytes": "1215339"
},
{
"name": "Shell",
"bytes": "558"
},
{
"name": "TeX",
"bytes": "232"
}
],
"symlink_target": ""
}
|
from nova import db
from nova import objects
from nova.objects import base
from nova.objects import fields
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class PciDevice(base.NovaPersistentObject, base.NovaObject):
"""Object to represent a PCI device on a compute node.
PCI devices are managed by the compute resource tracker, which discovers
the devices from the hardware platform, claims, allocates and frees
devices for instances.
The PCI device information is permanently maintained in a database.
This makes it convenient to get PCI device information, like physical
function for a VF device, adjacent switch IP address for a NIC,
hypervisor identification for a PCI device, etc. It also provides a
convenient way to check device allocation information for administrator
purposes.
A device can be in available/claimed/allocated/deleted/removed state.
A device is available when it is discovered..
A device is claimed prior to being allocated to an instance. Normally the
transition from claimed to allocated is quick. However, during a resize
operation the transition can take longer, because devices are claimed in
prep_resize and allocated in finish_resize.
A device becomes removed when hot removed from a node (i.e. not found in
the next auto-discover) but not yet synced with the DB. A removed device
should not be allocated to any instance, and once deleted from the DB,
the device object is changed to deleted state and no longer synced with
the DB.
Filed notes::
| 'dev_id':
| Hypervisor's identification for the device, the string format
| is hypervisor specific
| 'extra_info':
| Device-specific properties like PF address, switch ip address etc.
"""
# Version 1.0: Initial version
# Version 1.1: String attributes updated to support unicode
VERSION = '1.1'
fields = {
'id': fields.IntegerField(),
# Note(yjiang5): the compute_node_id may be None because the pci
# device objects are created before the compute node is created in DB
'compute_node_id': fields.IntegerField(nullable=True),
'address': fields.StringField(),
'vendor_id': fields.StringField(),
'product_id': fields.StringField(),
'dev_type': fields.StringField(),
'status': fields.StringField(),
'dev_id': fields.StringField(nullable=True),
'label': fields.StringField(nullable=True),
'instance_uuid': fields.StringField(nullable=True),
'extra_info': fields.DictOfStringsField(),
}
def update_device(self, dev_dict):
"""Sync the content from device dictionary to device object.
The resource tracker updates the available devices periodically.
To avoid meaningless syncs with the database, we update the device
object only if a value changed.
"""
# Note(yjiang5): status/instance_uuid should only be updated by
# functions like claim/allocate etc. The id is allocated by
# database. The extra_info is created by the object.
no_changes = ('status', 'instance_uuid', 'id', 'extra_info')
map(lambda x: dev_dict.pop(x, None),
[key for key in no_changes])
for k, v in dev_dict.items():
if k in self.fields.keys():
self[k] = v
else:
# Note (yjiang5) extra_info.update does not update
# obj_what_changed, set it explicitely
extra_info = self.extra_info
extra_info.update({k: v})
self.extra_info = extra_info
def __init__(self, *args, **kwargs):
super(PciDevice, self).__init__(*args, **kwargs)
self.obj_reset_changes()
self.extra_info = {}
@staticmethod
def _from_db_object(context, pci_device, db_dev):
for key in pci_device.fields:
if key != 'extra_info':
pci_device[key] = db_dev[key]
else:
extra_info = db_dev.get("extra_info")
pci_device.extra_info = jsonutils.loads(extra_info)
pci_device._context = context
pci_device.obj_reset_changes()
return pci_device
@base.remotable_classmethod
def get_by_dev_addr(cls, context, compute_node_id, dev_addr):
db_dev = db.pci_device_get_by_addr(
context, compute_node_id, dev_addr)
return cls._from_db_object(context, cls(), db_dev)
@base.remotable_classmethod
def get_by_dev_id(cls, context, id):
db_dev = db.pci_device_get_by_id(context, id)
return cls._from_db_object(context, cls(), db_dev)
@classmethod
def create(cls, dev_dict):
"""Create a PCI device based on hypervisor information.
As the device object is just created and is not synced with db yet
thus we should not reset changes here for fields from dict.
"""
pci_device = cls()
pci_device.update_device(dev_dict)
pci_device.status = 'available'
return pci_device
@base.remotable
def save(self, context):
if self.status == 'removed':
self.status = 'deleted'
db.pci_device_destroy(context, self.compute_node_id, self.address)
elif self.status != 'deleted':
updates = self.obj_get_changes()
if 'extra_info' in updates:
updates['extra_info'] = jsonutils.dumps(updates['extra_info'])
if updates:
db_pci = db.pci_device_update(context, self.compute_node_id,
self.address, updates)
self._from_db_object(context, self, db_pci)
class PciDeviceList(base.ObjectListBase, base.NovaObject):
# Version 1.0: Initial version
# PciDevice <= 1.1
VERSION = '1.0'
fields = {
'objects': fields.ListOfObjectsField('PciDevice'),
}
child_versions = {
'1.0': '1.1',
# NOTE(danms): PciDevice was at 1.1 before we added this
}
def __init__(self, *args, **kwargs):
super(PciDeviceList, self).__init__(*args, **kwargs)
self.objects = []
self.obj_reset_changes()
@base.remotable_classmethod
def get_by_compute_node(cls, context, node_id):
db_dev_list = db.pci_device_get_all_by_node(context, node_id)
return base.obj_make_list(context, cls(context), objects.PciDevice,
db_dev_list)
@base.remotable_classmethod
def get_by_instance_uuid(cls, context, uuid):
db_dev_list = db.pci_device_get_all_by_instance_uuid(context, uuid)
return base.obj_make_list(context, cls(context), objects.PciDevice,
db_dev_list)
|
{
"content_hash": "117094399c81084f73692f710a9c8b31",
"timestamp": "",
"source": "github",
"line_count": 180,
"max_line_length": 78,
"avg_line_length": 38.422222222222224,
"alnum_prop": 0.6278195488721805,
"repo_name": "virtualopensystems/nova",
"id": "32caadeebdfa73dcacd42618d157f71849e169b0",
"size": "7549",
"binary": false,
"copies": "5",
"ref": "refs/heads/bp/vif-vhostuser",
"path": "nova/objects/pci_device.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "14939768"
},
{
"name": "Shell",
"bytes": "18352"
}
],
"symlink_target": ""
}
|
try:
from BeautifulSoup import BeautifulSoup
except ImportError:
print("Error importing BeautifulSoup Library")
import re
from util import formatter
import datetime
class Parser(object):
response = {
"time_departure_s": [],
"time_departure_e": [],
"time_return_s": [],
"time_return_e": [],
"stops_departure": [],
"stops_return": [],
"price_departure": [],
"price_return": [],
"prices_around_departure": [],
"prices_around_return": []
}
def __init__(self, airline):
pass
def feed(self, html):
pass
class ParserAzul(Parser):
def __init__(self):
return
def feed(self, html):
soup_all = BeautifulSoup(html.encode("utf8"))
valid_html = soup_all.find("div", {"id": "selectMainBody"}) is not None
if not valid_html:
return False
departure_cell = html.find("<h2>")
return_cell = html.rfind("<h2>")
if return_cell != departure_cell:
array = [[("price_departure", "time_departure_s",
"time_departure_e", "stops_departure",
"prices_around_departure"),
html[departure_cell:return_cell - 1]],
[("price_return", "time_return_s", "time_return_e",
"stops_return", "prices_around_return"),
html[return_cell:]]]
else:
array = [[("price_departure", "time_departure_s",
"time_departure_e", "stops_departure",
"prices_around_departure"),
html[departure_cell:]]]
for keys, html_code in array:
soup = BeautifulSoup(html_code)
# Get cheapest prices around the searched date
around = soup.findAll("div", {"class": "carrossel"})[0]\
.div.ul.findAll("li")
for child in around:
if child.text == "" or child.div.text == "":
value = -1
else:
if child.div.div is None:
value = formatter.format_price(child.div.text)
else:
value = formatter.format_price(child.div.div.text)
self.response[keys[4]].append(value)
tickets_available =\
len(soup.findAll("div", {"class": "noFlightsAvailable"})) == 0
if not tickets_available:
continue
# For each flight option
for tag in soup.findAll(re.compile(r"^(tr)$"), {"class":
re.compile(r"^(flightInfo)$")}):
# Get the minimum price
lowest_fare = 999999
for child in tag.findAll("span", {"class": "farePrice"}):
lowest_fare = min(lowest_fare,
formatter.format_price(child.text))
self.response[keys[0]].append(lowest_fare)
# Get the day and the weekday (output)
for child in tag.findAll("div", {"class": "output"}):
if ":" in child.text:
self.response[keys[1]].append(
child.text.encode("utf8"))
# Get the day and the weekday (arrival)
for child in tag.findAll("div", {"class": "arrival"}):
if ":" in child.text:
self.response[keys[2]].append(
child.text.encode("utf8"))
for child in tag.findAll("p", {"class": "stopNumbers"}):
if child.text == "Voo Direto":
self.response[keys[3]].append(0)
else:
text = child.text[0]
self.response[keys[3]].append(int(text))
return self.response
class ParserGol(Parser):
def __init__(self):
return
def feed(self, html):
soup_all = BeautifulSoup(html.encode("utf8"))
valid_html =\
soup_all.find("div", {"class": "ContentTable"}) is not None
if not valid_html:
return False
departure_cell = html.find("<h2>")
return_cell = html.rfind("<h2>")
if return_cell != departure_cell:
array = [[("price_departure", "time_departure_s",
"time_departure_e", "stops_departure",
"prices_around_departure"),
html[departure_cell:return_cell - 1]],
[("price_return", "time_return_s", "time_return_e",
"stops_return", "prices_around_return"),
html[return_cell:]]]
else:
array = [[("price_departure", "time_departure_s",
"time_departure_e", "stops_departure",
"prices_around_departure"),
html[departure_cell:]]]
for keys, html_code in array:
soup = BeautifulSoup(html_code)
# Get cheapest prices around the searched date
around = soup.findAll("ul", {"class": "listDates"})[0]\
.findAll("li")
for child in around:
c = child.a.findAll("span", {"class": "price"})[0]\
.findAll("span")
if len(c) > 0:
v = c[2].text
self.response[keys[4]].append(formatter.format_price(v))
else:
self.response[keys[4]].append(-1)
tickets_available =\
len(soup.findAll("div", {"class": "noFlightsAvailable"})) == 0
if not tickets_available:
continue
tickets_available =\
len(soup.findAll("div", {"class": "areaNotFound"})) == 0
if not tickets_available:
continue
# For each flight option
for tag in soup.findAll("div", {"class": "lineTable"}):
# Get the minimum price
lowest_fare = 999999
for child in tag.findAll("span", {"class": "fareValue"}):
lowest_fare = min(lowest_fare,
formatter.format_price(child.text[3:]))
self.response[keys[0]].append(lowest_fare)
d_s_time = tag.findAll("span", {"class": "timeGoing"})[0]\
.findAll("span", {"class": "hour"})[0].text
self.response[keys[1]].append(d_s_time)
d_e_time =\
tag.findAll("span", {"class": "timeoutGoing"})[0]\
.findAll("span", {"class": "hour"})[0].text
self.response[keys[2]].append(d_e_time)
airport = tag.findAll("span", {"class": "titleAirplane"})[0]\
.a.span.text
for child in tag.findAll("span",
{"class": "connectionScalesNumber"}):
self.response[keys[3]].append(int(child.strong.text))
return self.response
class ParserAvianca(Parser):
def __init__(self):
return
def feed(self, html):
soup_all = BeautifulSoup(html.encode("utf8"))
valid_html = soup_all.find("table", {"class": "tableFPCUpsellPanel"})\
is not None
if not valid_html:
return False
cells = soup_all.findAll("table", {"class": "tableFPCUpsellPanel"})
departure_cell = cells[0]
return_cell = cells[-1]
departure_cell.append(
soup_all.find("table", {"id": "tableFPCTabs_out"}))
if return_cell != departure_cell:
return_cell.append(
soup_all.find("table", {"id": "tableFPCTabs_in"}))
array = [[("price_departure", "time_departure_s",
"time_departure_e", "stops_departure",
"prices_around_departure"), departure_cell],
[("price_return", "time_return_s", "time_return_e",
"stops_return", "prices_around_return"), return_cell]]
else:
array = [[("price_departure", "time_departure_s",
"time_departure_e", "stops_departure",
"prices_around_departure"), departure_cell]]
for keys, soup in array:
# Get cheapest prices around the searched date
around = soup.findAll("table", {"class": "tableFPCTabs"})[0]\
.tr.findAll("td", recursive=False)
for child in around:
if "elected" not in child.get("class"):
continue
c = child.findAll("span")[-1]
if c.text == "- N/D -":
self.response[keys[4]].append(-1)
else:
v = c.text.split(" ")[1]
self.response[keys[4]].append(formatter.format_price(v))
tickets_available =\
len(soup.findAll("td", {"class": "noFlightsAvail"})) == 0
if not tickets_available:
continue
flights = soup\
.findAll("div", {"class": "divFPCUpsellPanelScroll"})[0]\
.table.findAll("tr", recursive=False)
# For each flight option
for tag in flights:
fare_cells = tag.findAll("td", recursive=False)[1:]
# Get the minimum price
lowest_fare = 999999
for child in fare_cells:
if child.text.find("Esgotado") != -1:
continue
fare = re.search(u'([(R$ )])(\d+(?:\.\d{2})?)', child.text)
fare = fare.string.split(" ")[1][:-1]
lowest_fare = min(lowest_fare,
formatter.format_price(fare))
self.response[keys[0]].append(lowest_fare)
t = tag.findAll("table", {"class": "tableFPCFlightDetails"})[0]
td = t.findAll("td")
d_s_time = td[0].text
self.response[keys[1]].append(d_s_time)
d_e_time = td[3].text
self.response[keys[2]].append(d_e_time)
stops = td[5].findAll("li")[2].text[0]
self.response[keys[3]].append(int(stops))
return self.response
class ParserLatam(Parser):
def __init__(self):
return
def convert_latamdate_to_mysql(self, date):
day, month, year = date.split(" ")
month = formatter.fullmonth_to_number(month)
return year + "-" + month + "-" + day.encode("utf8")
def feed(self, html):
date_search = []
soup_all = BeautifulSoup(html.encode("utf8"))
main = soup_all.find("div", {"id": "mainInner"})
valid_html = main is not None
if not valid_html:
return False
departure_cell = soup_all.find("div", {"id": "sticky-wrap-out"})
return_cell = soup_all.find("div", {"id": "sticky-wrap-in"})
tab_cells = soup_all.findAll("section")
departure_cell.append(tab_cells[3])
# Getting departure date
date_departure = soup_all.find("p", {"id": "outbound-initDate"}).text\
.split(", ")[1].replace("de ", "")
date_departure = self.convert_latamdate_to_mysql(date_departure)
date_departure = datetime.datetime.strptime(
date_departure, '%Y-%m-%d').date()
date_search.append(date_departure)
tickets_available = departure_cell is not None
if not tickets_available:
return self.response
if return_cell is not None:
return_cell.append(tab_cells[4])
# Getting return date
date_return = soup_all.find("p", {"id": "inbound-initDate"}).text\
.split(", ")[1].replace("de ", "")
date_return = self.convert_latamdate_to_mysql(date_return)
date_return = datetime.datetime.strptime(
date_return, '%Y-%m-%d').date()
date_search.append(date_return)
array = [[("price_departure", "time_departure_s",
"time_departure_e", "stops_departure",
"prices_around_departure"), departure_cell,
date_search[0]],
[("price_return", "time_return_s", "time_return_e",
"stops_return", "prices_around_return"), return_cell,
date_search[1]]]
else:
array = [[("price_departure", "time_departure_s",
"time_departure_e", "stops_departure",
"prices_around_departure"), departure_cell,
date_search[0]]]
for keys, soup, d in array:
# fix latam unusual behavior on the "prices around" table
around = soup.findAll("section")[0].findAll("li", {"class": "tc"})
date_today = datetime.datetime.now().date()
date_diff_days = (d - date_today).days
if date_diff_days < 3:
for i in range(3 - date_diff_days):
self.response[keys[4]].append(-1)
around = around[:date_diff_days - 3]
# Get cheapest prices around the searched date
for child in around:
v = child.strong.text
self.response[keys[4]].append(formatter.format_price(v))
flights = soup.table.tbody.findAll("tr")
# For each flight option
for tag in flights:
if "flightNextSegment" in tag.get("class"):
if "stopDuration" not in tag.get("class") and\
"totalDurationRow" not in tag.get("class"):
td = tag.findAll("td", recursive=False)
self.response[keys[2]][-1] = td[1].strong.text
self.response[keys[3]][-1] += 1
continue
if "flight" not in tag.get("class"):
continue
td = tag.findAll("td", recursive=False)
# Get the minimum price
lowest_fare = 999999
for child in td:
if "ff" not in child.get("class"):
continue
if child.text.find("---") != -1:
continue
fare = child.div.strong or child.div.span
fare = fare.text.strip()
lowest_fare = min(lowest_fare,
formatter.format_price(fare))
self.response[keys[0]].append(lowest_fare)
d_s_time = td[0].strong.text
self.response[keys[1]].append(d_s_time)
d_e_time = td[1].strong.text
self.response[keys[2]].append(d_e_time)
self.response[keys[3]].append(0)
return self.response
if __name__ == '__main__':
pass
|
{
"content_hash": "edb89cb38d6d9813cdcd258d4adb7295",
"timestamp": "",
"source": "github",
"line_count": 409,
"max_line_length": 79,
"avg_line_length": 37.22738386308068,
"alnum_prop": 0.4788519637462236,
"repo_name": "nmoya/fscan",
"id": "b0f5b0beb206da38125a8e6008fa6f28e5ace2a7",
"size": "15250",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/parser/parser.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "86948"
}
],
"symlink_target": ""
}
|
'''Utility Mixins to abstract common behavior for CC API Resources'''
from currencycloud.resources.actions.delete import DeleteMixin
from currencycloud.resources.actions.update import UpdateMixin
|
{
"content_hash": "5034bbb159747cebe1ed874ab1dbb74e",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 69,
"avg_line_length": 49.25,
"alnum_prop": 0.8426395939086294,
"repo_name": "CurrencyCloud/currencycloud-python",
"id": "8aa82e67f286b1fa66c8a564f23ac49d245ff909",
"size": "197",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/currencycloud/resources/actions/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "178019"
}
],
"symlink_target": ""
}
|
"""Static data and helper functions."""
from __future__ import absolute_import
import collections
from datetime import timedelta
from datetime import tzinfo
import errno
import locale
import logging
import math
import multiprocessing
import os
import pkgutil
import re
import struct
import sys
import tempfile
import textwrap
import threading
import time
import traceback
import xml.etree.ElementTree as ElementTree
from apitools.base.py import http_wrapper
import boto
from boto import config
import boto.auth
from boto.exception import NoAuthHandlerFound
from boto.gs.connection import GSConnection
from boto.provider import Provider
from boto.pyami.config import BotoConfigLocations
import gslib
from gslib.exception import CommandException
from gslib.storage_url import StorageUrlFromString
from gslib.third_party.storage_apitools import storage_v1_messages as apitools_messages
from gslib.thread_message import RetryableErrorMessage
from gslib.translation_helper import AclTranslation
from gslib.translation_helper import GenerationFromUrlAndString
from gslib.translation_helper import S3_ACL_MARKER_GUID
from gslib.translation_helper import S3_DELETE_MARKER_GUID
from gslib.translation_helper import S3_MARKER_GUIDS
import httplib2
from oauth2client.client import HAS_CRYPTO
from retry_decorator import retry_decorator
# Detect platform types.
PLATFORM = str(sys.platform).lower()
IS_WINDOWS = 'win32' in PLATFORM
IS_CYGWIN = 'cygwin' in PLATFORM
IS_LINUX = 'linux' in PLATFORM
IS_OSX = 'darwin' in PLATFORM
UTF8 = 'utf-8'
WINDOWS_1252 = 'cp1252'
# pylint: disable=g-import-not-at-top
if IS_WINDOWS:
from ctypes import c_int
from ctypes import c_uint64
from ctypes import c_char_p
from ctypes import c_wchar_p
from ctypes import windll
from ctypes import POINTER
from ctypes import WINFUNCTYPE
from ctypes import WinError
IS_CP1252 = locale.getdefaultlocale()[1] == WINDOWS_1252
else:
IS_CP1252 = False
# pylint: disable=g-import-not-at-top
try:
# This module doesn't necessarily exist on Windows.
import resource
HAS_RESOURCE_MODULE = True
except ImportError, e:
HAS_RESOURCE_MODULE = False
DEBUGLEVEL_DUMP_REQUESTS = 3
DEBUGLEVEL_DUMP_REQUESTS_AND_PAYLOADS = 4
ONE_KIB = 1024
ONE_MIB = 1024 * 1024
TWO_MIB = 2 * ONE_MIB
EIGHT_MIB = 8 * ONE_MIB
TEN_MIB = 10 * ONE_MIB
DEFAULT_FILE_BUFFER_SIZE = 8 * ONE_KIB
_DEFAULT_LINES = 25
RESUMABLE_THRESHOLD_MIB = 8
RESUMABLE_THRESHOLD_B = RESUMABLE_THRESHOLD_MIB * ONE_MIB
# By default, the timeout for SSL read errors is infinite. This could
# cause gsutil to hang on network disconnect, so pick a more reasonable
# timeout.
SSL_TIMEOUT = 60
# Start with a progress callback every 64 KiB during uploads/downloads (JSON
# API). Callback implementation should back off until it hits the maximum size
# so that callbacks do not create huge amounts of log output.
START_CALLBACK_PER_BYTES = 1024*256
MAX_CALLBACK_PER_BYTES = 1024*1024*100
# Upload/download files in 8 KiB chunks over the HTTP connection.
TRANSFER_BUFFER_SIZE = 1024*8
# Default number of progress callbacks during transfer (XML API).
XML_PROGRESS_CALLBACKS = 10
# Number of objects to request in listing calls.
NUM_OBJECTS_PER_LIST_PAGE = 1000
# For files >= this size, output a message indicating that we're running an
# operation on the file (like hashing or gzipping) so it does not appear to the
# user that the command is hanging.
MIN_SIZE_COMPUTE_LOGGING = 100*1024*1024 # 100 MiB
NO_MAX = sys.maxint
VERSION_MATCHER = re.compile(r'^(?P<maj>\d+)(\.(?P<min>\d+)(?P<suffix>.*))?')
RELEASE_NOTES_URL = 'https://pub.storage.googleapis.com/gsutil_ReleaseNotes.txt'
# Binary exponentiation strings.
_EXP_STRINGS = [
(0, 'B', 'bit'),
(10, 'KiB', 'Kibit', 'K'),
(20, 'MiB', 'Mibit', 'M'),
(30, 'GiB', 'Gibit', 'G'),
(40, 'TiB', 'Tibit', 'T'),
(50, 'PiB', 'Pibit', 'P'),
(60, 'EiB', 'Eibit', 'E'),
]
_EXP_TEN_STRING = [
(3, 'k'),
(6, 'm'),
(9, 'b'),
(12, 't'),
(15, 'q')
]
# Number of seconds to wait before printing a long retry warning message.
LONG_RETRY_WARN_SEC = 10
SECONDS_PER_DAY = 86400L
global manager # pylint: disable=global-at-module-level
# Single certs file for use across all processes.
configured_certs_file = None
# Temporary certs file for cleanup upon exit.
temp_certs_file = None
def _GenerateSuffixRegex():
"""Creates a suffix regex for human-readable byte counts."""
human_bytes_re = r'(?P<num>\d*\.\d+|\d+)\s*(?P<suffix>%s)?'
suffixes = []
suffix_to_si = {}
for i, si in enumerate(_EXP_STRINGS):
si_suffixes = [s.lower() for s in list(si)[1:]]
for suffix in si_suffixes:
suffix_to_si[suffix] = i
suffixes.extend(si_suffixes)
human_bytes_re %= '|'.join(suffixes)
matcher = re.compile(human_bytes_re)
return suffix_to_si, matcher
SUFFIX_TO_SI, MATCH_HUMAN_BYTES = _GenerateSuffixRegex()
SECONDS_PER_DAY = 3600 * 24
# On Unix-like systems, we will set the maximum number of open files to avoid
# hitting the limit imposed by the OS. This number was obtained experimentally.
MIN_ACCEPTABLE_OPEN_FILES_LIMIT = 1000
GSUTIL_PUB_TARBALL = 'gs://pub/gsutil.tar.gz'
Retry = retry_decorator.retry # pylint: disable=invalid-name
# Cache the values from this check such that they're available to all callers
# without needing to run all the checks again (some of these, such as calling
# multiprocessing.Manager(), are expensive operations).
cached_multiprocessing_is_available = None
cached_multiprocessing_is_available_stack_trace = None
cached_multiprocessing_is_available_message = None
# This function used to belong inside of update.py. However, it needed to be
# moved here due to compatibility issues with Travis CI, because update.py is
# not included with PyPI installations.
def DisallowUpdateIfDataInGsutilDir(directory=gslib.GSUTIL_DIR):
"""Disallows the update command if files not in the gsutil distro are found.
This prevents users from losing data if they are in the habit of running
gsutil from the gsutil directory and leaving data in that directory.
This will also detect someone attempting to run gsutil update from a git
repo, since the top-level directory will contain git files and dirs (like
.git) that are not distributed with gsutil.
Args:
directory: The directory to use this functionality on.
Raises:
CommandException: if files other than those distributed with gsutil found.
"""
# Manifest includes recursive-includes of gslib. Directly add
# those to the list here so we will skip them in os.listdir() loop without
# having to build deeper handling of the MANIFEST file here. Also include
# 'third_party', which isn't present in manifest but gets added to the
# gsutil distro by the gsutil submodule configuration; and the MANIFEST.in
# and CHANGES.md files.
manifest_lines = ['MANIFEST.in', 'third_party']
try:
with open(os.path.join(directory, 'MANIFEST.in'), 'r') as fp:
for line in fp:
if line.startswith('include '):
manifest_lines.append(line.split()[-1])
elif re.match(r'recursive-include \w+ \*', line):
manifest_lines.append(line.split()[1])
except IOError:
logging.getLogger().warn('MANIFEST.in not found in %s.\nSkipping user data '
'check.\n', directory)
return
# Look just at top-level directory. We don't try to catch data dropped into
# subdirs (like gslib) because that would require deeper parsing of
# MANFFEST.in, and most users who drop data into gsutil dir do so at the top
# level directory.
for filename in os.listdir(directory):
if (filename.endswith('.pyc') or filename == '__pycache__'
or filename == '.travis.yml'):
# Ignore compiled code and travis config.
continue
if filename not in manifest_lines:
raise CommandException('\n'.join(textwrap.wrap(
'A file (%s) that is not distributed with gsutil was found in '
'the gsutil directory. The update command cannot run with user '
'data in the gsutil directory.' %
os.path.join(gslib.GSUTIL_DIR, filename))))
# This class is necessary to convert timestamps to UTC. By default Python
# datetime objects are timezone unaware. This created problems when interacting
# with cloud object timestamps which are timezone aware. This issue appeared
# when handling the timeCreated metadata attribute. The values returned by the
# service were placed in RFC 3339 format in the storage_v1_messages module. RFC
# 3339 requires a timezone in any timestamp. This caused problems as the
# datetime object elsewhere in the code was timezone unaware and was different
# by exactly one hour. The main problem is because the local system uses
# daylight savings time which consequently adjusted the timestamp ahead by one
# hour.
class UTC(tzinfo):
"""Timezone information class used to convert datetime timestamps to UTC."""
def utcoffset(self, _):
"""An offset of the number of minutes away from UTC this tzinfo object is.
Returns:
A time duration of zero. UTC is zero minutes away from UTC.
"""
return timedelta(0)
def tzname(self, _):
"""A method to retrieve the name of this timezone object.
Returns:
The name of the timezone (i.e. 'UTC').
"""
return 'UTC'
def dst(self, _):
"""A fixed offset to handle daylight savings time (DST).
Returns:
A time duration of zero as UTC does not use DST.
"""
return timedelta(0)
class LazyWrapper(object):
"""Wrapper for lazily instantiated objects."""
def __init__(self, func):
"""The init method for LazyWrapper.
Args:
func: A function (lambda or otherwise) to lazily evaluate.
"""
self._func = func
def __int__(self):
try:
return int(self._value)
except AttributeError:
self._value = self._func()
return int(self._value)
def __eq__(self, other):
try:
return self._value == other
except AttributeError:
self._value = self._func()
return self._value == other
def __repr__(self):
try:
return str(self._value)
except AttributeError:
self._value = self._func()
return str(self._value)
def __str__(self):
try:
return str(self._value)
except AttributeError:
self._value = self._func()
return str(self._value)
def __call__(self):
"""The call method for a LazyWrapper object."""
try:
return self._value
except AttributeError:
self._value = self._func()
return self._value
def __len__(self):
"""The len method for a LazyWrapper object."""
try:
return len(self._value)
except AttributeError:
self.__call__()
return len(self._value)
def __iter__(self):
"""The iter method for a LazyWrapper object."""
try:
return self._value.__iter__()
except AttributeError:
self.__call__()
return self._value.__iter__()
# Enum class for specifying listing style.
class ListingStyle(object):
SHORT = 'SHORT'
LONG = 'LONG'
LONG_LONG = 'LONG_LONG'
def UsingCrcmodExtension(crcmod):
return (boto.config.get('GSUtil', 'test_assume_fast_crcmod', None) or
(getattr(crcmod, 'crcmod', None) and
getattr(crcmod.crcmod, '_usingExtension', None)))
def ObjectIsGzipEncoded(obj_metadata):
"""Returns true if source apitools Object has gzip content-encoding."""
return (obj_metadata.contentEncoding and
obj_metadata.contentEncoding.lower().endswith('gzip'))
def AddAcceptEncodingGzipIfNeeded(headers_dict, compressed_encoding=False):
if compressed_encoding:
# If we send accept-encoding: gzip with a range request, the service
# may respond with the whole object, which would be bad for resuming.
# So only accept gzip encoding if the object we are downloading has
# a gzip content encoding.
# TODO: If we want to support compressive transcoding fully in the client,
# condition on whether we are requesting the entire range of the object.
# In this case, we can accept the first bytes of the object compressively
# transcoded, but we must perform data integrity checking on bytes after
# they are decompressed on-the-fly, and any connection break must be
# resumed without compressive transcoding since we cannot specify an
# offset. We would also need to ensure that hashes for downloaded data
# from objects stored with content-encoding:gzip continue to be calculated
# prior to our own on-the-fly decompression so they match the stored hashes.
headers_dict['accept-encoding'] = 'gzip'
def CheckFreeSpace(path):
"""Return path/drive free space (in bytes)."""
if IS_WINDOWS:
try:
# pylint: disable=invalid-name
get_disk_free_space_ex = WINFUNCTYPE(c_int, c_wchar_p,
POINTER(c_uint64),
POINTER(c_uint64),
POINTER(c_uint64))
get_disk_free_space_ex = get_disk_free_space_ex(
('GetDiskFreeSpaceExW', windll.kernel32), (
(1, 'lpszPathName'),
(2, 'lpFreeUserSpace'),
(2, 'lpTotalSpace'),
(2, 'lpFreeSpace'),))
except AttributeError:
get_disk_free_space_ex = WINFUNCTYPE(c_int, c_char_p,
POINTER(c_uint64),
POINTER(c_uint64),
POINTER(c_uint64))
get_disk_free_space_ex = get_disk_free_space_ex(
('GetDiskFreeSpaceExA', windll.kernel32), (
(1, 'lpszPathName'),
(2, 'lpFreeUserSpace'),
(2, 'lpTotalSpace'),
(2, 'lpFreeSpace'),))
def GetDiskFreeSpaceExErrCheck(result, unused_func, args):
if not result:
raise WinError()
return args[1].value
get_disk_free_space_ex.errcheck = GetDiskFreeSpaceExErrCheck
return get_disk_free_space_ex(os.getenv('SystemDrive'))
else:
(_, f_frsize, _, _, f_bavail, _, _, _, _, _) = os.statvfs(path)
return f_frsize * f_bavail
def CreateDirIfNeeded(dir_path, mode=0777):
"""Creates a directory, suppressing already-exists errors."""
if not os.path.exists(dir_path):
try:
# Unfortunately, even though we catch and ignore EEXIST, this call will
# output a (needless) error message (no way to avoid that in Python).
os.makedirs(dir_path, mode)
# Ignore 'already exists' in case user tried to start up several
# resumable uploads concurrently from a machine where no tracker dir had
# yet been created.
except OSError as e:
if e.errno != errno.EEXIST:
raise
def GetDiskCounters():
"""Retrieves disk I/O statistics for all disks.
Adapted from the psutil module's psutil._pslinux.disk_io_counters:
http://code.google.com/p/psutil/source/browse/trunk/psutil/_pslinux.py
Originally distributed under under a BSD license.
Original Copyright (c) 2009, Jay Loden, Dave Daeschler, Giampaolo Rodola.
Returns:
A dictionary containing disk names mapped to the disk counters from
/disk/diskstats.
"""
# iostat documentation states that sectors are equivalent with blocks and
# have a size of 512 bytes since 2.4 kernels. This value is needed to
# calculate the amount of disk I/O in bytes.
sector_size = 512
partitions = []
with open('/proc/partitions', 'r') as f:
lines = f.readlines()[2:]
for line in lines:
_, _, _, name = line.split()
if name[-1].isdigit():
partitions.append(name)
retdict = {}
with open('/proc/diskstats', 'r') as f:
for line in f:
values = line.split()[:11]
_, _, name, reads, _, rbytes, rtime, writes, _, wbytes, wtime = values
if name in partitions:
rbytes = int(rbytes) * sector_size
wbytes = int(wbytes) * sector_size
reads = int(reads)
writes = int(writes)
rtime = int(rtime)
wtime = int(wtime)
retdict[name] = (reads, writes, rbytes, wbytes, rtime, wtime)
return retdict
def CalculateThroughput(total_bytes_transferred, total_elapsed_time):
"""Calculates throughput and checks for a small total_elapsed_time.
Args:
total_bytes_transferred: Total bytes transferred in a period of time.
total_elapsed_time: The amount of time elapsed in seconds.
Returns:
The throughput as a float.
"""
if total_elapsed_time < 0.01:
total_elapsed_time = 0.01
return float(total_bytes_transferred) / float(total_elapsed_time)
def DivideAndCeil(dividend, divisor):
"""Returns ceil(dividend / divisor).
Takes care to avoid the pitfalls of floating point arithmetic that could
otherwise yield the wrong result for large numbers.
Args:
dividend: Dividend for the operation.
divisor: Divisor for the operation.
Returns:
Quotient.
"""
quotient = dividend // divisor
if (dividend % divisor) != 0:
quotient += 1
return quotient
def GetGsutilStateDir():
"""Returns the location of the directory for gsutil state files.
Certain operations, such as cross-process credential sharing and
resumable transfer tracking, need a known location for state files which
are created by gsutil as-needed.
This location should only be used for storing data that is required to be in
a static location.
Returns:
Path to directory for gsutil static state files.
"""
config_file_dir = config.get(
'GSUtil', 'state_dir',
os.path.expanduser(os.path.join('~', '.gsutil')))
CreateDirIfNeeded(config_file_dir)
return config_file_dir
def GetGsutilClientIdAndSecret():
"""Returns a tuple of the gsutil OAuth2 client ID and secret.
Google OAuth2 clients always have a secret, even if the client is an installed
application/utility such as gsutil. Of course, in such cases the "secret" is
actually publicly known; security depends entirely on the secrecy of refresh
tokens, which effectively become bearer tokens.
Returns:
Tuple of strings (client ID, secret).
"""
if (os.environ.get('CLOUDSDK_WRAPPER') == '1' and
os.environ.get('CLOUDSDK_CORE_PASS_CREDENTIALS_TO_GSUTIL') == '1'):
# Cloud SDK installs have a separate client ID / secret.
return ('32555940559.apps.googleusercontent.com', # Cloud SDK client ID
'ZmssLNjJy2998hD4CTg2ejr2') # Cloud SDK secret
return ('909320924072.apps.googleusercontent.com', # gsutil client ID
'p3RlpR10xMFh9ZXBS/ZNLYUu') # gsutil secret
def GetCredentialStoreFilename():
return os.path.join(GetGsutilStateDir(), 'credstore')
def GetGceCredentialCacheFilename():
return os.path.join(GetGsutilStateDir(), 'gcecredcache')
def GetTabCompletionLogFilename():
return os.path.join(GetGsutilStateDir(), 'tab-completion-logs')
def GetTabCompletionCacheFilename():
tab_completion_dir = os.path.join(GetGsutilStateDir(), 'tab-completion')
# Limit read permissions on the directory to owner for privacy.
CreateDirIfNeeded(tab_completion_dir, mode=0700)
return os.path.join(tab_completion_dir, 'cache')
def GetPrintableExceptionString(exc):
"""Returns a short Unicode string describing the exception."""
return unicode(exc).encode(UTF8) or str(exc.__class__)
def PrintableStr(input_str):
return input_str.encode(UTF8) if input_str is not None else None
def PrintTrackerDirDeprecationWarningIfNeeded():
# TODO: Remove this along with the tracker_dir config value 1 year after
# 4.6 release date. Use state_dir instead.
if config.has_option('GSUtil', 'resumable_tracker_dir'):
sys.stderr.write('Warning: you have set resumable_tracker_dir in your '
'.boto configuration file. This configuration option is '
'deprecated; please use the state_dir configuration '
'option instead.\n')
# Name of file where we keep the timestamp for the last time we checked whether
# a new version of gsutil is available.
PrintTrackerDirDeprecationWarningIfNeeded()
CreateDirIfNeeded(GetGsutilStateDir())
LAST_CHECKED_FOR_GSUTIL_UPDATE_TIMESTAMP_FILE = (
os.path.join(GetGsutilStateDir(), '.last_software_update_check'))
def HasConfiguredCredentials():
"""Determines if boto credential/config file exists."""
has_goog_creds = (config.has_option('Credentials', 'gs_access_key_id') and
config.has_option('Credentials', 'gs_secret_access_key'))
has_amzn_creds = (config.has_option('Credentials', 'aws_access_key_id') and
config.has_option('Credentials', 'aws_secret_access_key'))
has_oauth_creds = (
config.has_option('Credentials', 'gs_oauth2_refresh_token'))
has_service_account_creds = (
HAS_CRYPTO and
config.has_option('Credentials', 'gs_service_client_id') and
config.has_option('Credentials', 'gs_service_key_file'))
if (has_goog_creds or has_amzn_creds or has_oauth_creds or
has_service_account_creds):
return True
valid_auth_handler = None
try:
valid_auth_handler = boto.auth.get_auth_handler(
GSConnection.DefaultHost, config, Provider('google'),
requested_capability=['s3'])
# Exclude the no-op auth handler as indicating credentials are configured.
# Note we can't use isinstance() here because the no-op module may not be
# imported so we can't get a reference to the class type.
if getattr(getattr(valid_auth_handler, '__class__', None),
'__name__', None) == 'NoOpAuth':
valid_auth_handler = None
except NoAuthHandlerFound:
pass
return valid_auth_handler
def ConfigureNoOpAuthIfNeeded():
"""Sets up no-op auth handler if no boto credentials are configured."""
if not HasConfiguredCredentials():
if (config.has_option('Credentials', 'gs_service_client_id')
and not HAS_CRYPTO):
if os.environ.get('CLOUDSDK_WRAPPER') == '1':
raise CommandException('\n'.join(textwrap.wrap(
'Your gsutil is configured with an OAuth2 service account, but '
'you do not have PyOpenSSL or PyCrypto 2.6 or later installed. '
'Service account authentication requires one of these libraries; '
'please reactivate your service account via the gcloud auth '
'command and ensure any gcloud packages necessary for '
'service accounts are present.')))
else:
raise CommandException('\n'.join(textwrap.wrap(
'Your gsutil is configured with an OAuth2 service account, but '
'you do not have PyOpenSSL or PyCrypto 2.6 or later installed. '
'Service account authentication requires one of these libraries; '
'please install either of them to proceed, or configure a '
'different type of credentials with "gsutil config".')))
else:
# With no boto config file the user can still access publicly readable
# buckets and objects.
from gslib import no_op_auth_plugin # pylint: disable=unused-variable
def GetConfigFilePaths():
"""Returns a list of the path(s) to the boto config file(s) to be loaded."""
config_paths = []
# The only case in which we load multiple boto configurations is
# when the BOTO_CONFIG environment variable is not set and the
# BOTO_PATH environment variable is set with multiple path values.
# Otherwise, we stop when we find the first readable config file.
# This predicate was taken from the boto.pyami.config module.
should_look_for_multiple_configs = (
'BOTO_CONFIG' not in os.environ and
'BOTO_PATH' in os.environ)
for path in BotoConfigLocations:
try:
with open(path, 'r'):
config_paths.append(path)
if not should_look_for_multiple_configs:
break
except IOError:
pass
return config_paths
def GetBotoConfigFileList():
"""Returns list of boto config files that exist."""
config_paths = boto.pyami.config.BotoConfigLocations
if 'AWS_CREDENTIAL_FILE' in os.environ:
config_paths.append(os.environ['AWS_CREDENTIAL_FILE'])
return [cfg_path for cfg_path in config_paths if os.path.exists(cfg_path)]
def GetCertsFile():
return configured_certs_file
def ConfigureCertsFile():
"""Configures and returns the CA Certificates file.
If one is already configured, use it. Otherwise, use the cert roots
distributed with gsutil.
Returns:
string filename of the certs file to use.
"""
certs_file = boto.config.get('Boto', 'ca_certificates_file', None)
# The 'system' keyword indicates to use the system installed certs. Some
# Linux distributions patch the stack such that the Python SSL
# infrastructure picks up the system installed certs by default, thus no
# action necessary on our part
if certs_file == 'system':
return None
if not certs_file:
global configured_certs_file, temp_certs_file
if not configured_certs_file:
configured_certs_file = os.path.abspath(
os.path.join(gslib.GSLIB_DIR, 'data', 'cacerts.txt'))
if not os.path.exists(configured_certs_file):
# If the file is not present on disk, this means the gslib module
# doesn't actually exist on disk anywhere. This can happen if it's
# being imported from a zip file. Unfortunately, we have to copy the
# certs file to a local temp file on disk because the underlying SSL
# socket requires it to be a filesystem path.
certs_data = pkgutil.get_data('gslib', 'data/cacerts.txt')
if not certs_data:
raise CommandException('Certificates file not found. Please '
'reinstall gsutil from scratch')
fd, fname = tempfile.mkstemp(suffix='.txt', prefix='gsutil-cacerts')
f = os.fdopen(fd, 'w')
f.write(certs_data)
f.close()
temp_certs_file = fname
configured_certs_file = temp_certs_file
certs_file = configured_certs_file
return certs_file
def GetCleanupFiles():
"""Returns a list of temp files to delete (if possible) when program exits."""
return [temp_certs_file] if temp_certs_file else []
def ProxyInfoFromEnvironmentVar(proxy_env_var):
"""Reads proxy info from the environment and converts to httplib2.ProxyInfo.
Args:
proxy_env_var: Environment variable string to read, such as http_proxy or
https_proxy.
Returns:
httplib2.ProxyInfo constructed from the environment string.
"""
proxy_url = os.environ.get(proxy_env_var)
if not proxy_url or not proxy_env_var.lower().startswith('http'):
return httplib2.ProxyInfo(httplib2.socks.PROXY_TYPE_HTTP, None, 0)
proxy_protocol = proxy_env_var.lower().split('_')[0]
if not proxy_url.lower().startswith('http'):
# proxy_info_from_url requires a protocol, which is always http or https.
proxy_url = proxy_protocol + '://' + proxy_url
return httplib2.proxy_info_from_url(proxy_url, method=proxy_protocol)
def GetNewHttp(http_class=httplib2.Http, **kwargs):
"""Creates and returns a new httplib2.Http instance.
Args:
http_class: Optional custom Http class to use.
**kwargs: Arguments to pass to http_class constructor.
Returns:
An initialized httplib2.Http instance.
"""
proxy_host = boto.config.get('Boto', 'proxy', None)
proxy_info = httplib2.ProxyInfo(
proxy_type=3,
proxy_host=proxy_host,
proxy_port=boto.config.getint('Boto', 'proxy_port', 0),
proxy_user=boto.config.get('Boto', 'proxy_user', None),
proxy_pass=boto.config.get('Boto', 'proxy_pass', None),
proxy_rdns=boto.config.get('Boto',
'proxy_rdns',
True if proxy_host else False))
if not (proxy_info.proxy_host and proxy_info.proxy_port):
# Fall back to using the environment variable.
for proxy_env_var in ['http_proxy', 'https_proxy', 'HTTPS_PROXY']:
if proxy_env_var in os.environ and os.environ[proxy_env_var]:
proxy_info = ProxyInfoFromEnvironmentVar(proxy_env_var)
# Assume proxy_rnds is True if a proxy environment variable exists.
proxy_info.proxy_rdns = boto.config.get('Boto', 'proxy_rdns', True)
break
# Some installers don't package a certs file with httplib2, so use the
# one included with gsutil.
kwargs['ca_certs'] = GetCertsFile()
# Use a non-infinite SSL timeout to avoid hangs during network flakiness.
kwargs['timeout'] = SSL_TIMEOUT
http = http_class(proxy_info=proxy_info, **kwargs)
http.disable_ssl_certificate_validation = (not config.getbool(
'Boto', 'https_validate_certificates'))
return http
# Retry for 10 minutes with exponential backoff, which corresponds to
# the maximum Downtime Period specified in the GCS SLA
# (https://cloud.google.com/storage/sla)
def GetNumRetries():
return config.getint('Boto', 'num_retries', 23)
def GetMaxRetryDelay():
return config.getint('Boto', 'max_retry_delay', 32)
# Resumable downloads and uploads make one HTTP call per chunk (and must be
# in multiples of 256KiB). Overridable for testing.
def GetJsonResumableChunkSize():
chunk_size = config.getint('GSUtil', 'json_resumable_chunk_size',
1024*1024*100L)
if chunk_size == 0:
chunk_size = 1024*256L
elif chunk_size % 1024*256L != 0:
chunk_size += (1024*256L - (chunk_size % (1024*256L)))
return chunk_size
def JsonResumableChunkSizeDefined():
chunk_size_defined = config.get('GSUtil', 'json_resumable_chunk_size',
None)
return chunk_size_defined is not None
def _RoundToNearestExponent(num):
i = 0
while i + 1 < len(_EXP_STRINGS) and num >= (2 ** _EXP_STRINGS[i+1][0]):
i += 1
return i, round(float(num) / 2 ** _EXP_STRINGS[i][0], 2)
def MakeHumanReadable(num):
"""Generates human readable string for a number of bytes.
Args:
num: The number, in bytes.
Returns:
A string form of the number using size abbreviations (KiB, MiB, etc.).
"""
i, rounded_val = _RoundToNearestExponent(num)
return '%g %s' % (rounded_val, _EXP_STRINGS[i][1])
def MakeBitsHumanReadable(num):
"""Generates human readable string for a number of bits.
Args:
num: The number, in bits.
Returns:
A string form of the number using bit size abbreviations (kbit, Mbit, etc.)
"""
i, rounded_val = _RoundToNearestExponent(num)
return '%g %s' % (rounded_val, _EXP_STRINGS[i][2])
def HumanReadableToBytes(human_string):
"""Tries to convert a human-readable string to a number of bytes.
Args:
human_string: A string supplied by user, e.g. '1M', '3 GiB'.
Returns:
An integer containing the number of bytes.
Raises:
ValueError: on an invalid string.
"""
human_string = human_string.lower()
m = MATCH_HUMAN_BYTES.match(human_string)
if m:
num = float(m.group('num'))
if m.group('suffix'):
power = _EXP_STRINGS[SUFFIX_TO_SI[m.group('suffix')]][0]
num *= (2.0 ** power)
num = int(round(num))
return num
raise ValueError('Invalid byte string specified: %s' % human_string)
def DecimalShort(num):
"""Creates a shorter string version for a given number of objects.
Args:
num: The number of objects to be shortened.
Returns:
shortened string version for this number. It takes the largest
scale (thousand, million or billion) smaller than the number and divides it
by that scale, indicated by a suffix with one decimal place. This will thus
create a string of at most 6 characters, assuming num < 10^18.
Example: 123456789 => 123.4m
"""
for divisor_exp, suffix in reversed(_EXP_TEN_STRING):
if num >= 10**divisor_exp:
quotient = '%.1lf' % (float(num) / 10**divisor_exp)
return quotient + suffix
return str(num)
def PrettyTime(remaining_time):
"""Creates a standard version for a given remaining time in seconds.
Created over using strftime because strftime seems to be
more suitable for a datetime object, rather than just a number of
seconds remaining.
Args:
remaining_time: The number of seconds remaining as a float, or a
string/None value indicating time was not correctly calculated.
Returns:
if remaining_time is a valid float, %H:%M:%D time remaining format with
the nearest integer from remaining_time (%H might be higher than 23).
Else, it returns the same message it received.
"""
remaining_time = int(round(remaining_time))
hours = int(remaining_time / 3600)
if hours >= 100:
# Too large to display with precision of minutes and seconds.
# If over 1000, saying 999+ hours should be enough.
return '%d+ hrs' % min(hours, 999)
remaining_time -= (3600 * hours)
minutes = int(remaining_time / 60)
remaining_time -= (60 * minutes)
seconds = int(remaining_time)
return (str('%02d' % hours) + ':' + str('%02d' % minutes)+':' +
str('%02d' % seconds))
def HumanReadableWithDecimalPlaces(number, decimal_places=1):
"""Creates a human readable format for bytes with fixed decimal places.
Args:
number: The number of bytes.
decimal_places: The number of decimal places.
Returns:
String representing a readable format for number with decimal_places
decimal places.
"""
number_format = MakeHumanReadable(number).split()
num = str(int(round(10**decimal_places * float(number_format[0]))))
if num == '0':
number_format[0] = ('0' + (('.' + ('0' * decimal_places)) if decimal_places
else ''))
else:
num_length = len(num)
if decimal_places:
num = (num[:num_length-decimal_places] + '.' +
num[num_length-decimal_places:])
number_format[0] = num
return ' '.join(number_format)
def Percentile(values, percent, key=lambda x: x):
"""Find the percentile of a list of values.
Taken from: http://code.activestate.com/recipes/511478/
Args:
values: a list of numeric values. Note that the values MUST BE already
sorted.
percent: a float value from 0.0 to 1.0.
key: optional key function to compute value from each element of the list
of values.
Returns:
The percentile of the values.
"""
if not values:
return None
k = (len(values) - 1) * percent
f = math.floor(k)
c = math.ceil(k)
if f == c:
return key(values[int(k)])
d0 = key(values[int(f)]) * (c - k)
d1 = key(values[int(c)]) * (k - f)
return d0 + d1
def RemoveCRLFFromString(input_str):
r"""Returns the input string with all \n and \r removed."""
return re.sub(r'[\r\n]', '', input_str)
def UnaryDictToXml(message):
"""Generates XML representation of a nested dict.
This dict contains exactly one top-level entry and an arbitrary number of
2nd-level entries, e.g. capturing a WebsiteConfiguration message.
Args:
message: The dict encoding the message.
Returns:
XML string representation of the input dict.
Raises:
Exception: if dict contains more than one top-level entry.
"""
if len(message) != 1:
raise Exception('Expected dict of size 1, got size %d' % len(message))
name, content = message.items()[0]
element_type = ElementTree.Element(name)
for element_property, value in sorted(content.items()):
node = ElementTree.SubElement(element_type, element_property)
node.text = value
return ElementTree.tostring(element_type)
def LookUpGsutilVersion(gsutil_api, url_str):
"""Looks up the gsutil version of the specified gsutil tarball URL.
Version is specified in the metadata field set on that object.
Args:
gsutil_api: gsutil Cloud API to use when retrieving gsutil tarball.
url_str: tarball URL to retrieve (such as 'gs://pub/gsutil.tar.gz').
Returns:
Version string if URL is a cloud URL containing x-goog-meta-gsutil-version
metadata, else None.
"""
url = StorageUrlFromString(url_str)
if url.IsCloudUrl():
obj = gsutil_api.GetObjectMetadata(url.bucket_name, url.object_name,
provider=url.scheme,
fields=['metadata'])
if obj.metadata and obj.metadata.additionalProperties:
for prop in obj.metadata.additionalProperties:
if prop.key == 'gsutil_version':
return prop.value
class DiscardMessagesQueue(object):
"""Emulates a Cloud API status queue but drops all messages."""
# pylint: disable=invalid-name, unused-argument
def put(self, message=None, timeout=None):
pass
# pylint: enable=invalid-name, unused-argument
def GetGsutilVersionModifiedTime():
"""Returns unix timestamp of when the VERSION file was last modified."""
if not gslib.VERSION_FILE:
return 0
return int(os.path.getmtime(gslib.VERSION_FILE))
def IsRunningInteractively():
"""Returns True if currently running interactively on a TTY."""
return sys.stdout.isatty() and sys.stderr.isatty() and sys.stdin.isatty()
def _HttpsValidateCertifcatesEnabled():
return config.get('Boto', 'https_validate_certificates', True)
CERTIFICATE_VALIDATION_ENABLED = _HttpsValidateCertifcatesEnabled()
def _BotoIsSecure():
return config.get('Boto', 'is_secure', True)
BOTO_IS_SECURE = _BotoIsSecure()
def ResumableThreshold():
return config.getint('GSUtil', 'resumable_threshold', EIGHT_MIB)
def CreateCustomMetadata(entries=None, custom_metadata=None):
"""Creates a custom metadata (apitools Object.MetadataValue) object.
Inserts the key/value pairs in entries.
Args:
entries: The dictionary containing key/value pairs to insert into metadata.
custom_metadata: A pre-existing custom metadata object to add to.
Returns:
An apitools Object.MetadataVlue.
"""
if custom_metadata is None:
custom_metadata = apitools_messages.Object.MetadataValue(
additionalProperties=[])
if entries is None:
entries = {}
for key, value in entries.iteritems():
custom_metadata.additionalProperties.append(
apitools_messages.Object.MetadataValue.AdditionalProperty(
key=str(key), value=str(value)))
return custom_metadata
def GetValueFromObjectCustomMetadata(obj_metadata, search_key,
default_value=None):
"""Filters a specific element out of an object's custom metadata.
Args:
obj_metadata: The metadata for an object.
search_key: The custom metadata key to search for.
default_value: The default value to use for the key if it cannot be found.
Returns:
A tuple indicating if the value could be found in metadata and a value
corresponding to search_key. The value at the specified key in custom
metadata or the default value, if the specified key does not exist in the
customer metadata.
"""
try:
value = next((attr.value for attr in
obj_metadata.metadata.additionalProperties
if attr.key == search_key), None)
if value is None:
return False, default_value
return True, value
except AttributeError:
return False, default_value
def InsistAscii(string, message):
if not all(ord(c) < 128 for c in string):
raise CommandException(message)
def InsistAsciiHeader(header):
InsistAscii(header, 'Invalid non-ASCII header (%s).' % header)
def InsistAsciiHeaderValue(header, value):
InsistAscii(
value,
'Invalid non-ASCII value (%s) was provided for header %s.\nOnly ASCII '
'characters are allowed in headers other than x-goog-meta- and '
'x-amz-meta- headers' % (value, header))
def IsCustomMetadataHeader(header):
"""Returns true if header (which must be lowercase) is a custom header."""
return header.startswith('x-goog-meta-') or header.startswith('x-amz-meta-')
# pylint: disable=too-many-statements
def PrintFullInfoAboutObject(bucket_listing_ref, incl_acl=True):
"""Print full info for given object (like what displays for gsutil ls -L).
Args:
bucket_listing_ref: BucketListingRef being listed.
Must have ref_type OBJECT and a populated root_object
with the desired fields.
incl_acl: True if ACL info should be output.
Returns:
Tuple (number of objects, object_length)
Raises:
Exception: if calling bug encountered.
"""
url_str = bucket_listing_ref.url_string
storage_url = StorageUrlFromString(url_str)
obj = bucket_listing_ref.root_object
if (obj.metadata and S3_DELETE_MARKER_GUID in
obj.metadata.additionalProperties):
num_bytes = 0
num_objs = 0
url_str += '<DeleteMarker>'
else:
num_bytes = obj.size
num_objs = 1
print '%s:' % url_str.encode(UTF8)
if obj.timeCreated:
print MakeMetadataLine(
'Creation time', obj.timeCreated.strftime('%a, %d %b %Y %H:%M:%S GMT'))
if obj.updated:
print MakeMetadataLine(
'Update time', obj.updated.strftime('%a, %d %b %Y %H:%M:%S GMT'))
if (obj.timeStorageClassUpdated and
obj.timeStorageClassUpdated != obj.timeCreated):
print MakeMetadataLine(
'Storage class update time',
obj.timeStorageClassUpdated.strftime('%a, %d %b %Y %H:%M:%S GMT'))
if obj.storageClass:
print MakeMetadataLine('Storage class', obj.storageClass)
if obj.cacheControl:
print MakeMetadataLine('Cache-Control', obj.cacheControl)
if obj.contentDisposition:
print MakeMetadataLine('Content-Disposition', obj.contentDisposition)
if obj.contentEncoding:
print MakeMetadataLine('Content-Encoding', obj.contentEncoding)
if obj.contentLanguage:
print MakeMetadataLine('Content-Language', obj.contentLanguage)
print MakeMetadataLine('Content-Length', obj.size)
print MakeMetadataLine('Content-Type', obj.contentType)
if obj.componentCount:
print MakeMetadataLine('Component-Count', obj.componentCount)
if obj.timeDeleted:
print MakeMetadataLine(
'Archived time',
obj.timeDeleted.strftime('%a, %d %b %Y %H:%M:%S GMT'))
marker_props = {}
if obj.metadata and obj.metadata.additionalProperties:
non_marker_props = []
for add_prop in obj.metadata.additionalProperties:
if add_prop.key not in S3_MARKER_GUIDS:
non_marker_props.append(add_prop)
else:
marker_props[add_prop.key] = add_prop.value
if non_marker_props:
print MakeMetadataLine('Metadata', '')
for ap in non_marker_props:
print MakeMetadataLine(
('%s' % ap.key).encode(UTF8), ('%s' % ap.value).encode(UTF8),
indent=2)
if obj.customerEncryption:
if not obj.crc32c:
print MakeMetadataLine('Hash (crc32c)', 'encrypted')
if not obj.md5Hash:
print MakeMetadataLine('Hash (md5)', 'encrypted')
print MakeMetadataLine(
'Encryption algorithm', obj.customerEncryption.encryptionAlgorithm)
print MakeMetadataLine(
'Encryption key SHA256', obj.customerEncryption.keySha256)
if obj.crc32c:
print MakeMetadataLine('Hash (crc32c)', obj.crc32c)
if obj.md5Hash:
print MakeMetadataLine('Hash (md5)', obj.md5Hash)
print MakeMetadataLine('ETag', obj.etag.strip('"\''))
if obj.generation:
generation_str = GenerationFromUrlAndString(storage_url, obj.generation)
print MakeMetadataLine('Generation', generation_str)
if obj.metageneration:
print MakeMetadataLine('Metageneration', obj.metageneration)
if incl_acl:
# JSON API won't return acls as part of the response unless we have
# full control scope
if obj.acl:
print MakeMetadataLine('ACL', AclTranslation.JsonFromMessage(obj.acl))
elif S3_ACL_MARKER_GUID in marker_props:
print MakeMetadataLine('ACL', marker_props[S3_ACL_MARKER_GUID])
else:
print MakeMetadataLine('ACL', 'ACCESS DENIED')
print MakeMetadataLine(
'Note', 'You need OWNER permission on the object to read its ACL', 2)
return (num_objs, num_bytes)
def MakeMetadataLine(label, value, indent=1):
"""Returns a string with a vertically aligned label and value.
Labels of the same indentation level will start at the same column. Values
will all start at the same column (unless the combined left-indent and
label length is excessively long). If a value spans multiple lines,
indentation will only be applied to the first line. Example output from
several calls:
Label1: Value (default indent of 1 was used)
Sublabel1: Value (used indent of 2 here)
Label2: Value
Args:
label: The label to print in the first column.
value: The value to print in the second column.
indent: (4 * indent) spaces will be placed before the label.
Returns:
A string with a vertically aligned label and value.
"""
return '%s%s' % (((' ' * indent * 4) + label + ':').ljust(28), value)
def CompareVersions(first, second):
"""Compares the first and second gsutil version strings.
For example, 3.33 > 3.7, and 4.1 is a greater major version than 3.33.
Does not handle multiple periods (e.g. 3.3.4) or complicated suffixes
(e.g., 3.3RC4 vs. 3.3RC5). A version string with a suffix is treated as
less than its non-suffix counterpart (e.g. 3.32 > 3.32pre).
Args:
first: First gsutil version string.
second: Second gsutil version string.
Returns:
(g, m):
g is True if first known to be greater than second, else False.
m is True if first known to be greater by at least 1 major version,
else False.
"""
m1 = VERSION_MATCHER.match(str(first))
m2 = VERSION_MATCHER.match(str(second))
# If passed strings we don't know how to handle, be conservative.
if not m1 or not m2:
return (False, False)
major_ver1 = int(m1.group('maj'))
minor_ver1 = int(m1.group('min')) if m1.group('min') else 0
suffix_ver1 = m1.group('suffix')
major_ver2 = int(m2.group('maj'))
minor_ver2 = int(m2.group('min')) if m2.group('min') else 0
suffix_ver2 = m2.group('suffix')
if major_ver1 > major_ver2:
return (True, True)
elif major_ver1 == major_ver2:
if minor_ver1 > minor_ver2:
return (True, False)
elif minor_ver1 == minor_ver2:
return (bool(suffix_ver2) and not suffix_ver1, False)
return (False, False)
def _IncreaseSoftLimitForResource(resource_name, fallback_value):
"""Sets a new soft limit for the maximum number of open files.
The soft limit is used for this process (and its children), but the
hard limit is set by the system and cannot be exceeded.
We will first try to set the soft limit to the hard limit's value; if that
fails, we will try to set the soft limit to the fallback_value iff this would
increase the soft limit.
Args:
resource_name: Name of the resource to increase the soft limit for.
fallback_value: Fallback value to be used if we couldn't set the
soft value to the hard value (e.g., if the hard value
is "unlimited").
Returns:
Current soft limit for the resource (after any changes we were able to
make), or -1 if the resource doesn't exist.
"""
# Get the value of the resource.
try:
(soft_limit, hard_limit) = resource.getrlimit(resource_name)
except (resource.error, ValueError):
# The resource wasn't present, so we can't do anything here.
return -1
# Try to set the value of the soft limit to the value of the hard limit.
if hard_limit > soft_limit: # Some OS's report 0 for "unlimited".
try:
resource.setrlimit(resource_name, (hard_limit, hard_limit))
return hard_limit
except (resource.error, ValueError):
# We'll ignore this and try the fallback value.
pass
# Try to set the value of the soft limit to the fallback value.
if soft_limit < fallback_value:
try:
resource.setrlimit(resource_name, (fallback_value, hard_limit))
return fallback_value
except (resource.error, ValueError):
# We couldn't change the soft limit, so just report the current
# value of the soft limit.
return soft_limit
else:
return soft_limit
def GetCloudApiInstance(cls, thread_state=None):
"""Gets a gsutil Cloud API instance.
Since Cloud API implementations are not guaranteed to be thread-safe, each
thread needs its own instance. These instances are passed to each thread
via the thread pool logic in command.
Args:
cls: Command class to be used for single-threaded case.
thread_state: Per thread state from this thread containing a gsutil
Cloud API instance.
Returns:
gsutil Cloud API instance.
"""
return thread_state or cls.gsutil_api
def GetFileSize(fp, position_to_eof=False):
"""Returns size of file, optionally leaving fp positioned at EOF."""
if not position_to_eof:
cur_pos = fp.tell()
fp.seek(0, os.SEEK_END)
cur_file_size = fp.tell()
if not position_to_eof:
fp.seek(cur_pos)
return cur_file_size
def GetStreamFromFileUrl(storage_url, mode='rb'):
if storage_url.IsStream():
return sys.stdin
else:
return open(storage_url.object_name, mode)
def UrlsAreForSingleProvider(url_args):
"""Tests whether the URLs are all for a single provider.
Args:
url_args: Strings to check.
Returns:
True if URLs are for single provider, False otherwise.
"""
provider = None
url = None
for url_str in url_args:
url = StorageUrlFromString(url_str)
if not provider:
provider = url.scheme
elif url.scheme != provider:
return False
return provider is not None
def HaveFileUrls(args_to_check):
"""Checks whether args_to_check contain any file URLs.
Args:
args_to_check: Command-line argument subset to check.
Returns:
True if args_to_check contains any file URLs.
"""
for url_str in args_to_check:
storage_url = StorageUrlFromString(url_str)
if storage_url.IsFileUrl():
return True
return False
def HaveProviderUrls(args_to_check):
"""Checks whether args_to_check contains any provider URLs (like 'gs://').
Args:
args_to_check: Command-line argument subset to check.
Returns:
True if args_to_check contains any provider URLs.
"""
for url_str in args_to_check:
storage_url = StorageUrlFromString(url_str)
if storage_url.IsCloudUrl() and storage_url.IsProvider():
return True
return False
# This must be defined at the module level for pickling across processes.
MultiprocessingIsAvailableResult = collections.namedtuple(
'MultiprocessingIsAvailableResult', ['is_available', 'stack_trace'])
def CheckMultiprocessingAvailableAndInit(logger=None):
"""Checks if multiprocessing is available.
There are some environments in which there is no way to use multiprocessing
logic that's built into Python (e.g., if /dev/shm is not available, then
we can't create semaphores). This simply tries out a few things that will be
needed to make sure the environment can support the pieces of the
multiprocessing module that we need.
If multiprocessing is available, this performs necessary initialization for
multiprocessing. See gslib.command.InitializeMultiprocessingVariables for
an explanation of why this is necessary.
Args:
logger: logging.logger to use for debug output.
Returns:
(multiprocessing_is_available, stack_trace):
multiprocessing_is_available: True iff the multiprocessing module is
available for use.
stack_trace: The stack trace generated by the call we tried that failed.
"""
# pylint: disable=global-variable-undefined
global cached_multiprocessing_is_available
global cached_multiprocessing_check_stack_trace
global cached_multiprocessing_is_available_message
if cached_multiprocessing_is_available is not None:
if logger:
logger.debug(cached_multiprocessing_check_stack_trace)
logger.warn(cached_multiprocessing_is_available_message)
return MultiprocessingIsAvailableResult(
is_available=cached_multiprocessing_is_available,
stack_trace=cached_multiprocessing_check_stack_trace)
if IS_WINDOWS:
message = """
Multiple processes are not supported on Windows. Operations requesting
parallelism will be executed with multiple threads in a single process only.
"""
if logger:
logger.warn(message)
return MultiprocessingIsAvailableResult(is_available=False,
stack_trace=None)
stack_trace = None
multiprocessing_is_available = True
message = """
You have requested multiple processes for an operation, but the
required functionality of Python\'s multiprocessing module is not available.
Operations requesting parallelism will be executed with multiple threads in a
single process only.
"""
try:
# Fails if /dev/shm (or some equivalent thereof) is not available for use
# (e.g., there's no implementation, or we can't write to it, etc.).
try:
multiprocessing.Value('i', 0)
except:
message += """
Please ensure that you have write access to both /dev/shm and /run/shm.
"""
raise # We'll handle this in one place below.
global manager # pylint: disable=global-variable-undefined
manager = multiprocessing.Manager()
# Check that the max number of open files is reasonable. Always check this
# after we're sure that the basic multiprocessing functionality is
# available, since this won't matter unless that's true.
limit = -1
if HAS_RESOURCE_MODULE:
# Try to set this with both resource names - RLIMIT_NOFILE for most Unix
# platforms, and RLIMIT_OFILE for BSD. Ignore AttributeError because the
# "resource" module is not guaranteed to know about these names.
try:
limit = max(limit,
_IncreaseSoftLimitForResource(
resource.RLIMIT_NOFILE,
MIN_ACCEPTABLE_OPEN_FILES_LIMIT))
except AttributeError:
pass
try:
limit = max(limit,
_IncreaseSoftLimitForResource(
resource.RLIMIT_OFILE, MIN_ACCEPTABLE_OPEN_FILES_LIMIT))
except AttributeError:
pass
if limit < MIN_ACCEPTABLE_OPEN_FILES_LIMIT:
message += ("""
Your max number of open files, %s, is too low to allow safe multiprocessing.
On Linux you can fix this by adding something like "ulimit -n 10000" to your
~/.bashrc or equivalent file and opening a new terminal.
On MacOS, you may also need to run a command like this once (in addition to the
above instructions), which might require a restart of your system to take
effect:
launchctl limit maxfiles 10000
Alternatively, edit /etc/launchd.conf with something like:
limit maxfiles 10000 10000
""" % limit)
raise Exception('Max number of open files, %s, is too low.' % limit)
except: # pylint: disable=bare-except
stack_trace = traceback.format_exc()
multiprocessing_is_available = False
if logger is not None:
logger.debug(stack_trace)
logger.warn(message)
# Set the cached values so that we never need to do this check again.
cached_multiprocessing_is_available = multiprocessing_is_available
cached_multiprocessing_check_stack_trace = stack_trace
cached_multiprocessing_is_available_message = message
return MultiprocessingIsAvailableResult(
is_available=cached_multiprocessing_is_available,
stack_trace=cached_multiprocessing_check_stack_trace)
def CreateLock():
"""Returns either a multiprocessing lock or a threading lock.
Use Multiprocessing lock iff we have access to the parts of the
multiprocessing module that are necessary to enable parallelism in operations.
Returns:
Multiprocessing or threading lock.
"""
if CheckMultiprocessingAvailableAndInit().is_available:
return manager.Lock()
else:
return threading.Lock()
def IsCloudSubdirPlaceholder(url, blr=None):
"""Determines if URL is a cloud subdir placeholder.
This function is needed because GUI tools (like the GCS cloud console) allow
users to create empty "folders" by creating a placeholder object; and parts
of gsutil need to treat those placeholder objects specially. For example,
gsutil rsync needs to avoid downloading those objects because they can cause
conflicts (see comments in rsync command for details).
We currently detect two cases:
- Cloud objects whose name ends with '_$folder$'
- Cloud objects whose name ends with '/'
Args:
url: The URL to be checked.
blr: BucketListingRef to check, or None if not available.
If None, size won't be checked.
Returns:
True/False.
"""
if not url.IsCloudUrl():
return False
url_str = url.url_string
if url_str.endswith('_$folder$'):
return True
if blr and blr.IsObject():
size = blr.root_object.size
else:
size = 0
return size == 0 and url_str.endswith('/')
def GetTermLines():
"""Returns number of terminal lines."""
# fcntl isn't supported in Windows.
try:
import fcntl # pylint: disable=g-import-not-at-top
import termios # pylint: disable=g-import-not-at-top
except ImportError:
return _DEFAULT_LINES
def ioctl_GWINSZ(fd): # pylint: disable=invalid-name
try:
return struct.unpack(
'hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))[0]
except: # pylint: disable=bare-except
return 0 # Failure (so will retry on different file descriptor below).
# Try to find a valid number of lines from termio for stdin, stdout,
# or stderr, in that order.
ioc = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not ioc:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
ioc = ioctl_GWINSZ(fd)
os.close(fd)
except: # pylint: disable=bare-except
pass
if not ioc:
ioc = os.environ.get('LINES', _DEFAULT_LINES)
return int(ioc)
def FixWindowsEncodingIfNeeded(input_str):
"""Attempts to detect Windows CP1252 encoding and convert to UTF8.
Windows doesn't provide a way to set UTF-8 for string encodings; you can set
the system locale (see
http://windows.microsoft.com/en-us/windows/change-system-locale#1TC=windows-7)
but that takes you to a "Change system locale" dropdown that just lists
languages (e.g., "English (United States)". Instead, we're forced to check if
a encoding as UTF8 raises an exception and if so, try converting from CP1252
to Unicode.
Args:
input_str: The input string.
Returns:
The converted string (or the original, if conversion wasn't needed).
"""
if IS_CP1252:
return input_str.decode(WINDOWS_1252).encode(UTF8)
else:
return input_str
def LogAndHandleRetries(is_data_transfer=False, status_queue=None):
"""Higher-order function allowing retry handler to access global status queue.
Args:
is_data_transfer: If True, disable retries in apitools.
status_queue: The global status queue.
Returns:
A retry function for retryable errors in apitools.
"""
def WarnAfterManyRetriesHandler(retry_args):
"""Exception handler for http failures in apitools.
If the user has had to wait several seconds since their first request, print
a progress message to the terminal to let them know we're still retrying,
then perform the default retry logic and post a RetryableErrorMessage to the
global status queue.
Args:
retry_args: An apitools ExceptionRetryArgs tuple.
"""
if retry_args.total_wait_sec >= LONG_RETRY_WARN_SEC:
logging.info('Retrying request, attempt #%d...', retry_args.num_retries)
if status_queue:
status_queue.put(RetryableErrorMessage(
retry_args.exc, time.time(), num_retries=retry_args.num_retries,
total_wait_sec=retry_args.total_wait_sec))
http_wrapper.HandleExceptionsAndRebuildHttpConnections(retry_args)
def RetriesInDataTransferHandler(retry_args):
"""Exception handler that disables retries in apitools data transfers.
Post a RetryableErrorMessage to the global status queue. We handle the
actual retries within the download and upload functions.
Args:
retry_args: An apitools ExceptionRetryArgs tuple.
"""
if status_queue:
status_queue.put(RetryableErrorMessage(
retry_args.exc, time.time(), num_retries=retry_args.num_retries,
total_wait_sec=retry_args.total_wait_sec))
http_wrapper.RethrowExceptionHandler(retry_args)
if is_data_transfer:
return RetriesInDataTransferHandler
return WarnAfterManyRetriesHandler
def StdinIterator():
"""A generator function that returns lines from stdin."""
for line in sys.stdin:
# Strip CRLF.
yield line.rstrip()
def ConvertRecursiveToFlatWildcard(url_strs):
"""A generator that adds '**' to each url string in url_strs."""
for url_str in url_strs:
yield '%s**' % url_str
def NormalizeStorageClass(sc):
"""Returns a normalized form of the given storage class name.
Converts the given string to uppercase and expands valid abbreviations to
full storage class names (e.g 'std' would return 'STANDARD'). Note that this
method does not check if the given storage class is valid.
Args:
sc: String representing the storage class's full name or abbreviation.
Returns:
A string representing the full name of the given storage class.
"""
shorthand_to_full_name = {
'CL': 'COLDLINE',
'DRA': 'DURABLE_REDUCED_AVAILABILITY',
'NL': 'NEARLINE',
'S': 'STANDARD',
'STD': 'STANDARD'}
# Use uppercase; storage class argument for the S3 API must be uppercase,
# and it's case-insensitive for GS APIs.
sc = sc.upper()
if sc in shorthand_to_full_name:
sc = shorthand_to_full_name[sc]
return sc
class RsyncDiffToApply(object):
"""Class that encapsulates info needed to apply diff for one object."""
def __init__(self, src_url_str, dst_url_str, src_posix_attrs, diff_action,
copy_size):
"""Constructor.
Args:
src_url_str: The source URL string, or None if diff_action is REMOVE.
dst_url_str: The destination URL string.
src_posix_attrs: The source posix_attributes.
diff_action: _DiffAction to be applied.
copy_size: The amount of bytes to copy, or None if diff_action is REMOVE.
"""
self.src_url_str = src_url_str
self.dst_url_str = dst_url_str
self.src_posix_attrs = src_posix_attrs
self.diff_action = diff_action
self.copy_size = copy_size
|
{
"content_hash": "7599e2d88bf6effbc4748ffc01bf4328",
"timestamp": "",
"source": "github",
"line_count": 1788,
"max_line_length": 87,
"avg_line_length": 34.766219239373605,
"alnum_prop": 0.6900839741321064,
"repo_name": "BrandonY/gsutil",
"id": "262443723ced53dfccab5c3a5fee46908dd372e0",
"size": "62782",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gslib/util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2917865"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import os
import sys
import traceback
import unittest2
import keystoneclient
from oslo.config import cfg
import requests
from fuel_health.common import log as logging
from fuel_health import exceptions
LOG = logging.getLogger(__name__)
identity_group = cfg.OptGroup(name='identity',
title="Keystone Configuration Options")
IdentityGroup = [
cfg.StrOpt('catalog_type',
default='identity',
help="Catalog type of the Identity service."),
cfg.BoolOpt('disable_ssl_certificate_validation',
default=False,
help="Set to True if using self-signed SSL certificates."),
cfg.StrOpt('uri',
default='http://localhost/',
help="Full URI of the OpenStack Identity API (Keystone), v2"),
cfg.StrOpt('url',
default='http://localhost:5000/v2.0/',
help="Dashboard Openstack url, v2"),
cfg.StrOpt('ubuntu_url',
default='http://localhost:5000/v2.0/',
help="Dashboard Openstack url, v2"),
cfg.StrOpt('uri_v3',
help='Full URI of the OpenStack Identity API (Keystone), v3'),
cfg.StrOpt('strategy',
default='keystone',
help="Which auth method does the environment use? "
"(basic|keystone)"),
cfg.StrOpt('region',
default='RegionOne',
help="The identity region name to use."),
cfg.StrOpt('admin_username',
default='nova',
help="Administrative Username to use for"
"Keystone API requests."),
cfg.StrOpt('admin_tenant_name',
default='service',
help="Administrative Tenant name to use for Keystone API "
"requests."),
cfg.StrOpt('admin_password',
default='nova',
help="API key to use when authenticating as admin.",
secret=True),
]
def register_identity_opts(conf):
conf.register_group(identity_group)
for opt in IdentityGroup:
conf.register_opt(opt, group='identity')
compute_group = cfg.OptGroup(name='compute',
title='Compute Service Options')
ComputeGroup = [
cfg.BoolOpt('allow_tenant_isolation',
default=False,
help="Allows test cases to create/destroy tenants and "
"users. This option enables isolated test cases and "
"better parallel execution, but also requires that "
"OpenStack Identity API admin credentials are known."),
cfg.BoolOpt('allow_tenant_reuse',
default=True,
help="If allow_tenant_isolation is True and a tenant that "
"would be created for a given test already exists (such "
"as from a previously-failed run), re-use that tenant "
"instead of failing because of the conflict. Note that "
"this would result in the tenant being deleted at the "
"end of a subsequent successful run."),
cfg.StrOpt('image_ssh_user',
default="root",
help="User name used to authenticate to an instance."),
cfg.StrOpt('image_alt_ssh_user',
default="root",
help="User name used to authenticate to an instance using "
"the alternate image."),
cfg.BoolOpt('create_image_enabled',
default=True,
help="Does the test environment support snapshots?"),
cfg.IntOpt('build_interval',
default=10,
help="Time in seconds between build status checks."),
cfg.IntOpt('build_timeout',
default=500,
help="Timeout in seconds to wait for an instance to build."),
cfg.BoolOpt('run_ssh',
default=False,
help="Does the test environment support snapshots?"),
cfg.StrOpt('ssh_user',
default='root',
help="User name used to authenticate to an instance."),
cfg.IntOpt('ssh_timeout',
default=50,
help="Timeout in seconds to wait for authentication to "
"succeed."),
cfg.IntOpt('ssh_channel_timeout',
default=20,
help="Timeout in seconds to wait for output from ssh "
"channel."),
cfg.IntOpt('ip_version_for_ssh',
default=4,
help="IP version used for SSH connections."),
cfg.StrOpt('catalog_type',
default='compute',
help="Catalog type of the Compute service."),
cfg.StrOpt('path_to_private_key',
default='/root/.ssh/id_rsa',
help="Path to a private key file for SSH access to remote "
"hosts"),
cfg.ListOpt('controller_nodes',
default=[],
help="IP addresses of controller nodes"),
cfg.ListOpt('online_controllers',
default=[],
help="ips of online controller nodes"),
cfg.ListOpt('compute_nodes',
default=[],
help="IP addresses of compute nodes"),
cfg.ListOpt('online_computes',
default=[],
help="IP addresses of online compute nodes"),
cfg.ListOpt('ceph_nodes',
default=[],
help="IP addresses of nodes with ceph-osd role"),
cfg.StrOpt('controller_node_ssh_user',
default='root',
help="ssh user of one of the controller nodes"),
cfg.StrOpt('amqp_pwd',
default='root',
help="amqp_pwd"),
cfg.StrOpt('controller_node_ssh_password',
default='r00tme',
help="ssh user pass of one of the controller nodes"),
cfg.StrOpt('image_name',
default="TestVM",
help="Valid secondary image reference to be used in tests."),
cfg.StrOpt('deployment_mode',
default="ha",
help="Deployments mode"),
cfg.StrOpt('deployment_os',
default="RHEL",
help="Deployments os"),
cfg.IntOpt('flavor_ref',
default=42,
help="Valid primary flavor to use in tests."),
cfg.StrOpt('libvirt_type',
default='qemu',
help="Type of hypervisor to use."),
]
def register_compute_opts(conf):
conf.register_group(compute_group)
for opt in ComputeGroup:
conf.register_opt(opt, group='compute')
image_group = cfg.OptGroup(name='image',
title="Image Service Options")
ImageGroup = [
cfg.StrOpt('api_version',
default='1',
help="Version of the API"),
cfg.StrOpt('catalog_type',
default='image',
help='Catalog type of the Image service.'),
cfg.StrOpt('http_image',
default='http://download.cirros-cloud.net/0.3.1/'
'cirros-0.3.1-x86_64-uec.tar.gz',
help='http accessable image')
]
def register_image_opts(conf):
conf.register_group(image_group)
for opt in ImageGroup:
conf.register_opt(opt, group='image')
network_group = cfg.OptGroup(name='network',
title='Network Service Options')
NetworkGroup = [
cfg.StrOpt('catalog_type',
default='network',
help='Catalog type of the Network service.'),
cfg.StrOpt('tenant_network_cidr',
default="10.100.0.0/16",
help="The cidr block to allocate tenant networks from"),
cfg.StrOpt('network_provider',
default="nova_network",
help="Value of network provider"),
cfg.IntOpt('tenant_network_mask_bits',
default=29,
help="The mask bits for tenant networks"),
cfg.BoolOpt('tenant_networks_reachable',
default=True,
help="Whether tenant network connectivity should be "
"evaluated directly"),
cfg.BoolOpt('neutron_available',
default=False,
help="Whether or not neutron is expected to be available"),
]
def register_network_opts(conf):
conf.register_group(network_group)
for opt in NetworkGroup:
conf.register_opt(opt, group='network')
volume_group = cfg.OptGroup(name='volume',
title='Block Storage Options')
VolumeGroup = [
cfg.IntOpt('build_interval',
default=10,
help='Time in seconds between volume availability checks.'),
cfg.IntOpt('build_timeout',
default=180,
help='Timeout in seconds to wait for a volume to become'
'available.'),
cfg.StrOpt('catalog_type',
default='volume',
help="Catalog type of the Volume Service"),
cfg.BoolOpt('cinder_node_exist',
default=True,
help="Allow to run tests if cinder exist"),
cfg.BoolOpt('cinder_vmware_node_exist',
default=True,
help="Allow to run tests if cinder-vmware exist"),
cfg.BoolOpt('ceph_exist',
default=True,
help="Allow to run tests if ceph exist"),
cfg.BoolOpt('multi_backend_enabled',
default=False,
help="Runs Cinder multi-backend test (requires 2 backends)"),
cfg.StrOpt('backend1_name',
default='BACKEND_1',
help="Name of the backend1 (must be declared in cinder.conf)"),
cfg.StrOpt('backend2_name',
default='BACKEND_2',
help="Name of the backend2 (must be declared in cinder.conf)"),
cfg.StrOpt('cinder_vmware_storage_az',
default='vcenter',
help="Name of storage availability zone for cinder-vmware."),
]
def register_volume_opts(conf):
conf.register_group(volume_group)
for opt in VolumeGroup:
conf.register_opt(opt, group='volume')
object_storage_group = cfg.OptGroup(name='object-storage',
title='Object Storage Service Options')
ObjectStoreConfig = [
cfg.StrOpt('catalog_type',
default='object-store',
help="Catalog type of the Object-Storage service."),
cfg.StrOpt('container_sync_timeout',
default=120,
help="Number of seconds to time on waiting for a container"
"to container synchronization complete."),
cfg.StrOpt('container_sync_interval',
default=5,
help="Number of seconds to wait while looping to check the"
"status of a container to container synchronization"),
]
def register_object_storage_opts(conf):
conf.register_group(object_storage_group)
for opt in ObjectStoreConfig:
conf.register_opt(opt, group='object-storage')
sahara = cfg.OptGroup(name='sahara',
title='Sahara Service Options')
SaharaConfig = [
cfg.StrOpt('api_url',
default='10.20.0.131',
help="IP of sahara service."),
cfg.StrOpt('port',
default=8386,
help="Port of sahara service."),
cfg.StrOpt('api_version',
default='1.1',
help="API version of sahara service."),
cfg.StrOpt('plugin',
default='vanilla',
help="Plugin name of sahara service."),
cfg.StrOpt('pligin_version',
default='1.1.2',
help="Plugin version of sahara service."),
cfg.StrOpt('tt_config',
default={'Task Tracker Heap Size': 515},
help="Task Tracker config of sahara service."),
]
def register_sahara_opts(conf):
conf.register_group(sahara)
for opt in SaharaConfig:
conf.register_opt(opt, group='sahara')
murano_group = cfg.OptGroup(name='murano',
title='Murano API Service Options')
MuranoConfig = [
cfg.StrOpt('api_url',
default=None,
help="Murano API Service URL."),
cfg.StrOpt('api_url_management',
default=None,
help="Murano API Service management URL."),
cfg.BoolOpt('insecure',
default=False,
help="This parameter allow to enable SSL encription"),
cfg.StrOpt('agListnerIP',
default='10.100.0.155',
help="Murano SQL Cluster AG IP."),
cfg.StrOpt('clusterIP',
default='10.100.0.150',
help="Murano SQL Cluster IP."),
]
def register_murano_opts(conf):
conf.register_group(murano_group)
for opt in MuranoConfig:
conf.register_opt(opt, group='murano')
heat_group = cfg.OptGroup(name='heat',
title='Heat Options')
HeatConfig = [
cfg.StrOpt('endpoint',
default=None,
help="Heat API Service URL."),
]
fuel_group = cfg.OptGroup(name='fuel',
title='Fuel options')
FuelConf = [
cfg.StrOpt('fuel_version',
default=None,
help="Fuel version"),
cfg.StrOpt('dns',
default=None,
help="dns"),
]
def register_fuel_opts(conf):
conf.register_group(fuel_group)
[conf.register_opt(opt, group='fuel') for opt in FuelConf]
def register_heat_opts(conf):
conf.register_group(heat_group)
for opt in HeatConfig:
conf.register_opt(opt, group='heat')
def process_singleton(cls):
"""Wrapper for classes... To be instantiated only one time per process."""
instances = {}
def wrapper(*args, **kwargs):
LOG.info('INSTANCE %s' % instances)
pid = os.getpid()
if pid not in instances:
instances[pid] = cls(*args, **kwargs)
return instances[pid]
return wrapper
@process_singleton
class FileConfig(object):
"""Provides OpenStack configuration information."""
DEFAULT_CONFIG_DIR = os.path.join(os.path.abspath(
os.path.dirname(__file__)), 'etc')
DEFAULT_CONFIG_FILE = "test.conf"
def __init__(self):
"""Initialize a configuration from a conf directory and conf file."""
config_files = []
failsafe_path = "/etc/fuel/" + self.DEFAULT_CONFIG_FILE
# Environment variables override defaults...
custom_config = os.environ.get('CUSTOM_FUEL_CONFIG')
LOG.info('CUSTOM CONFIG PATH %s' % custom_config)
if custom_config:
path = custom_config
else:
conf_dir = os.environ.get('FUEL_CONFIG_DIR',
self.DEFAULT_CONFIG_DIR)
conf_file = os.environ.get('FUEL_CONFIG', self.DEFAULT_CONFIG_FILE)
path = os.path.join(conf_dir, conf_file)
if not (os.path.isfile(path) or 'FUEL_CONFIG_DIR'
in os.environ or 'FUEL_CONFIG' in os.environ):
path = failsafe_path
LOG.info("Using fuel config file %s" % path)
if not os.path.exists(path):
msg = "Config file {0} not found".format(path)
print(RuntimeError(msg), file=sys.stderr)
else:
config_files.append(path)
cfg.CONF([], project='fuel', default_config_files=config_files)
register_compute_opts(cfg.CONF)
register_identity_opts(cfg.CONF)
register_network_opts(cfg.CONF)
register_volume_opts(cfg.CONF)
register_murano_opts(cfg.CONF)
register_heat_opts(cfg.CONF)
register_sahara_opts(cfg.CONF)
register_fuel_opts(cfg.CONF)
self.compute = cfg.CONF.compute
self.identity = cfg.CONF.identity
self.network = cfg.CONF.network
self.volume = cfg.CONF.volume
self.murano = cfg.CONF.murano
self.heat = cfg.CONF.heat
self.sahara = cfg.CONF.sahara
self.fuel = cfg.CONF.fuel
class ConfigGroup(object):
# USE SLOTS
def __init__(self, opts):
self.parse_opts(opts)
def parse_opts(self, opts):
for opt in opts:
name = opt.name
self.__dict__[name] = opt.default
def __setattr__(self, key, value):
self.__dict__[key] = value
def __getitem__(self, key):
return self.__dict__[key]
def __setitem(self, key, value):
self.__dict__[key] = value
def __repr__(self):
return u"{0} WITH {1}".format(
self.__class__.__name__,
self.__dict__)
@process_singleton
class NailgunConfig(object):
identity = ConfigGroup(IdentityGroup)
compute = ConfigGroup(ComputeGroup)
image = ConfigGroup(ImageGroup)
network = ConfigGroup(NetworkGroup)
volume = ConfigGroup(VolumeGroup)
object_storage = ConfigGroup(ObjectStoreConfig)
murano = ConfigGroup(MuranoConfig)
sahara = ConfigGroup(SaharaConfig)
heat = ConfigGroup(HeatConfig)
fuel = ConfigGroup(FuelConf)
def __init__(self, parse=True):
LOG.info('INITIALIZING NAILGUN CONFIG')
self.nailgun_host = os.environ.get('NAILGUN_HOST', None)
self.nailgun_port = os.environ.get('NAILGUN_PORT', None)
self.nailgun_url = 'http://{0}:{1}'.format(self.nailgun_host,
self.nailgun_port)
token = os.environ.get('NAILGUN_TOKEN')
self.cluster_id = os.environ.get('CLUSTER_ID', None)
self.req_session = requests.Session()
self.req_session.trust_env = False
if token:
self.req_session.headers.update({'X-Auth-Token': token})
if parse:
self.prepare_config()
def prepare_config(self, *args, **kwargs):
try:
self._parse_meta()
LOG.info('parse meta successful')
self._parse_cluster_attributes()
LOG.info('parse cluster attr successful')
self._parse_nodes_cluster_id()
LOG.info('parse node cluster successful')
self._parse_networks_configuration()
LOG.info('parse network configuration successful')
self.set_endpoints()
LOG.info('set endpoints successful')
self.set_proxy()
LOG.info('set proxy successful')
self._parse_cluster_generated_data()
LOG.info('parse generated successful')
self._parse_vmware_attributes()
LOG.info('parse vmware attributes successful')
except exceptions.SetProxy as exc:
raise exc
except Exception:
LOG.warning('Something wrong with endpoints')
LOG.debug(traceback.format_exc())
def _parse_cluster_attributes(self):
api_url = '/api/clusters/%s/attributes' % self.cluster_id
response = self.req_session.get(self.nailgun_url + api_url)
LOG.info('RESPONSE %s STATUS %s' % (api_url, response.status_code))
data = response.json()
LOG.info('RESPONSE FROM %s - %s' % (api_url, data))
access_data = data['editable']['access']
common_data = data['editable']['common']
self.identity.admin_tenant_name = \
(
os.environ.get('OSTF_OS_TENANT_NAME') or
access_data['tenant']['value']
)
self.identity.admin_username = \
(
os.environ.get('OSTF_OS_USERNAME') or
access_data['user']['value']
)
self.identity.admin_password = \
(
os.environ.get('OSTF_OS_PASSWORD') or
access_data['password']['value']
)
self.compute.libvirt_type = common_data['libvirt_type']['value']
self.compute.use_vcenter = common_data['use_vcenter']['value']
self.compute.auto_assign_floating_ip = common_data[
'auto_assign_floating_ip']['value']
api_url = '/api/clusters/%s' % self.cluster_id
cluster_data = self.req_session.get(self.nailgun_url + api_url).json()
network_provider = cluster_data.get('net_provider', 'nova_network')
self.network.network_provider = network_provider
release_id = cluster_data.get('release_id', 'failed to get id')
self.fuel.fuel_version = cluster_data.get(
'fuel_version', 'failed to get fuel version')
LOG.info('Release id is {0}'.format(release_id))
release_data = self.req_session.get(
self.nailgun_url + '/api/releases/{0}'.format(release_id)).json()
deployment_os = release_data.get(
'operating_system', 'failed to get os')
LOG.info('Deployment os is {0}'.format(deployment_os))
if deployment_os != 'RHEL':
storage = data['editable']['storage']['volumes_ceph']['value']
self.volume.ceph_exist = storage
self.fuel.dns = data['editable']['external_dns'].get('value', None)
def _parse_nodes_cluster_id(self):
api_url = '/api/nodes?cluster_id=%s' % self.cluster_id
response = self.req_session.get(self.nailgun_url + api_url)
LOG.info('RESPONSE %s STATUS %s' % (api_url, response.status_code))
data = response.json()
# to make backward compatible
if 'objects' in data:
data = data['objects']
controller_nodes = filter(lambda node: 'controller' in node['roles'],
data)
online_controllers = filter(
lambda node: 'controller' in node['roles'] and
node['online'] is True, data)
cinder_nodes = filter(lambda node: 'cinder' in node['roles'],
data)
cinder_vmware_nodes = filter(lambda node: 'cinder-vmware' in
node['roles'], data)
controller_ips = []
conntroller_names = []
public_ips = []
online_controllers_ips = []
for node in controller_nodes:
public_network = next(network for network in node['network_data']
if network['name'] == 'public')
ip = public_network['ip'].split('/')[0]
public_ips.append(ip)
controller_ips.append(node['ip'])
conntroller_names.append(node['fqdn'])
LOG.info("IP %s NAMES %s" % (controller_ips, conntroller_names))
for node in online_controllers:
online_controllers_ips.append(node['ip'])
LOG.info("Online controllers ips is %s" % online_controllers_ips)
self.compute.public_ips = public_ips
self.compute.controller_nodes = controller_ips
self.compute.online_controllers = online_controllers_ips
if not cinder_nodes:
self.volume.cinder_node_exist = False
if not cinder_vmware_nodes:
self.volume.cinder_vmware_node_exist = False
compute_nodes = filter(lambda node: 'compute' in node['roles'],
data)
online_computes = filter(
lambda node: 'compute' in node['roles']
and node['online'] is True, data)
online_computes_ips = []
for node in online_computes:
online_computes_ips.append(node['ip'])
LOG.info('Online compute ips is {0}'.format(online_computes_ips))
self.compute.online_computes = online_computes_ips
compute_ips = []
for node in compute_nodes:
compute_ips.append(node['ip'])
LOG.info("COMPUTES IPS %s" % compute_ips)
self.compute.compute_nodes = compute_ips
ceph_nodes = filter(lambda node: 'ceph-osd' in node['roles'],
data)
self.compute.ceph_nodes = ceph_nodes
def _parse_meta(self):
api_url = '/api/clusters/%s' % self.cluster_id
data = self.req_session.get(self.nailgun_url + api_url).json()
self.mode = data['mode']
self.compute.deployment_mode = self.mode
release_id = data.get('release_id', 'failed to get id')
LOG.info('Release id is {0}'.format(release_id))
release_data = self.req_session.get(
self.nailgun_url + '/api/releases/{0}'.format(release_id)).json()
self.compute.deployment_os = release_data.get(
'operating_system', 'failed to get os')
def _parse_networks_configuration(self):
api_url = '/api/clusters/{0}/network_configuration/{1}'.format(
self.cluster_id, self.network.network_provider)
data = self.req_session.get(self.nailgun_url + api_url).json()
self.network.raw_data = data
def _parse_cluster_generated_data(self):
api_url = '/api/clusters/%s/generated' % self.cluster_id
data = self.req_session.get(self.nailgun_url + api_url).json()
self.generated_data = data
amqp_data = data['rabbit']
self.amqp_pwd = amqp_data['password']
if 'RHEL' in self.compute.deployment_os:
storage = data['storage']['volumes_ceph']
self.volume.ceph_exist = storage
def _parse_ostf_api(self):
api_url = '/api/ostf/%s' % self.cluster_id
response = self.req_session.get(self.nailgun_url + api_url)
data = response.json()
self.identity.url = data['horizon_url'] + 'dashboard'
self.identity.uri = data['keystone_url'] + 'v2.0/'
def _parse_vmware_attributes(self):
if self.volume.cinder_vmware_node_exist:
api_url = '/api/clusters/%s/vmware_attributes' % self.cluster_id
data = self.req_session.get(self.nailgun_url + api_url).json()
az = data['editable']['value']['availability_zones'][0]['az_name']
self.volume.cinder_vmware_storage_az = "{0}-cinder".format(az)
def find_proxy(self, ip):
endpoint = self.network.raw_data.get(
'public_vip', None) or ip
auth_url = 'http://{0}:{1}/{2}/'.format(endpoint, 5000, 'v2.0')
try:
os.environ['http_proxy'] = 'http://{0}:{1}'.format(ip, 8888)
LOG.warning('Try to check proxy on {0}'.format(ip))
keystoneclient.v2_0.client.Client(
username=self.identity.admin_username,
password=self.identity.admin_password,
tenant_name=self.identity.admin_tenant_name,
auth_url=auth_url,
insecure=False)
return ip
except Exception:
LOG.warning('Can not pass authorization '
'with proxy on {0}'.format(ip))
LOG.debug(traceback.format_exc())
def set_proxy(self):
"""Sets environment property for http_proxy:
To behave properly - method must be called after all nailgun params
is processed
"""
if not self.compute.online_controllers:
raise exceptions.OfflineControllers()
proxies = [self.find_proxy(ip) for ip in
self.compute.online_controllers]
if not proxies:
raise exceptions.SetProxy()
os.environ['http_proxy'] = 'http://{0}:{1}'.format(proxies[0], 8888)
def set_endpoints(self):
public_vip = self.network.raw_data.get('public_vip', None)
# workaround for api without public_vip for ha mode
if not public_vip and 'ha' in self.mode:
self._parse_ostf_api()
else:
endpoint = public_vip or self.compute.public_ips[0]
endpoint_mur_sav = public_vip or self.compute.controller_nodes[0]
self.identity.url = 'http://{0}/{1}/'.format(endpoint, 'dashboard')
self.identity.ubuntu_url = 'http://{0}/'.format(endpoint)
self.identity.uri = 'http://{0}:{1}/{2}/'.format(
endpoint, 5000, 'v2.0')
self.murano.api_url = 'http://{0}:{1}'.format(
endpoint_mur_sav, 8082)
self.sahara.api_url = 'http://{0}:{1}/{2}'.format(
endpoint_mur_sav, 8386, 'v1.0')
self.heat.endpoint = 'http://{0}:{1}/{2}'.format(
endpoint_mur_sav, 8004, 'v1')
def FuelConfig():
if 'CUSTOM_FUEL_CONFIG' in os.environ:
return FileConfig()
else:
try:
return NailgunConfig()
except exceptions.SetProxy as e:
raise unittest2.TestCase.failureException(str(e))
|
{
"content_hash": "3157709a41373038972f3abd1caad2db",
"timestamp": "",
"source": "github",
"line_count": 754,
"max_line_length": 79,
"avg_line_length": 37.673740053050395,
"alnum_prop": 0.5676969654298387,
"repo_name": "mcloudv/fuel-ostf",
"id": "4b141a69dad3414d412b7e1df976bd233dc0bacb",
"size": "29112",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fuel_health/config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "404"
},
{
"name": "Python",
"bytes": "594650"
},
{
"name": "Shell",
"bytes": "6024"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
from twisted.web.template import flattenString
from element_2 import ExampleElement
def renderDone(output):
print(output)
flattenString(None, ExampleElement()).addCallback(renderDone)
|
{
"content_hash": "4dcfa9a5d2edf60224c565449b91e20b",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 61,
"avg_line_length": 32.42857142857143,
"alnum_prop": 0.8105726872246696,
"repo_name": "EricMuller/mynotes-backend",
"id": "01baedf600b034bae44715314f97288e947e6259",
"size": "227",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "requirements/twisted/Twisted-17.1.0/docs/web/howto/listings/render_2.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "11880"
},
{
"name": "Batchfile",
"bytes": "3516"
},
{
"name": "C",
"bytes": "37168"
},
{
"name": "CSS",
"bytes": "6613"
},
{
"name": "DIGITAL Command Language",
"bytes": "1032"
},
{
"name": "GAP",
"bytes": "36244"
},
{
"name": "HTML",
"bytes": "233863"
},
{
"name": "Makefile",
"bytes": "6766"
},
{
"name": "Nginx",
"bytes": "998"
},
{
"name": "Objective-C",
"bytes": "2584"
},
{
"name": "Python",
"bytes": "22991176"
},
{
"name": "Roff",
"bytes": "160293"
},
{
"name": "Shell",
"bytes": "13496"
},
{
"name": "Smarty",
"bytes": "1366"
}
],
"symlink_target": ""
}
|
'''
actg.py
Reverse complement plugin for Sublime Text 2
'''
import sublime, sublime_plugin
class ReverseComplementCommand(sublime_plugin.TextCommand):
def complement(self, sequence, reverse=False):
"""
Compute the complement of a DNA sequence.
If reverse is True, reverse it too.
"""
flip = {
'A': 'T',
'C': 'G',
'G': 'C',
'T': 'A',
'N': 'N',
'a': 't',
'c': 'g',
'g': 'c',
't': 'a',
'n': 'n'
}
complines = []
# Gracefully handle line endings
if '\n' in sequence:
postfix = '\n'
else:
postfix = ''
for line in sequence.split('\n'):
line_complement = []
for i in list(line):
if not i in flip:
sublime.error_message('Selection contains non-nucleotides.')
return False
line_complement.append(flip[i])
if reverse:
complines.append(''.join(line_complement[::-1]))
else:
complines.append(''.join(line_complement))
return postfix.join(complines)
def run(self, edit):
sels = self.view.sel()
for sel in sels:
if not sel.empty():
ret = self.complement(self.view.substr(sel), reverse=True)
if(ret):
self.view.replace(edit, sel, ret)
|
{
"content_hash": "39204cc077712dfb3416666090c1d541",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 65,
"avg_line_length": 20.263157894736842,
"alnum_prop": 0.6034632034632035,
"repo_name": "hackerfriendly/ACTG",
"id": "b7f56c861cec214dc588038ff847595bbc8db64d",
"size": "1155",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "actg.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1155"
}
],
"symlink_target": ""
}
|
from datetime import datetime
from django.conf import settings
from django.core.exceptions import BadRequest
import requests
import olympia.core.logger
from olympia.devhub.models import BlogPost
log = olympia.core.logger.getLogger('z.cron')
def update_blog_posts():
"""Update the blog post cache."""
response = requests.get(settings.DEVELOPER_BLOG_URL, timeout=10)
try:
items = response.json()
except requests.exceptions.JSONDecodeError:
items = None
if not (response.status_code == 200 and items and len(items) > 1):
raise BadRequest('Developer blog JSON import failed.')
latest_five = items[:5]
latest_five_ids = [item['id'] for item in latest_five]
BlogPost.objects.exclude(post_id__in=latest_five_ids).delete()
existing_blogposts = {post.post_id: post for post in BlogPost.objects.all()}
for item in latest_five:
existing = existing_blogposts.get(item['id'])
data = {
'title': item['title']['rendered'],
'date_posted': datetime.strptime(item['date'], '%Y-%m-%dT%H:%M:%S'),
'date_modified': datetime.strptime(item['modified'], '%Y-%m-%dT%H:%M:%S'),
'permalink': item['link'],
}
if not existing:
BlogPost.objects.create(post_id=item['id'], **data)
elif existing.date_modified != data['date_modified']:
existing.update(**data)
log.info(f'Adding {len(latest_five)} blog posts.')
|
{
"content_hash": "7c93bed6f6d572b9f8fdd4ae44991bbf",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 86,
"avg_line_length": 33.40909090909091,
"alnum_prop": 0.6401360544217687,
"repo_name": "mozilla/olympia",
"id": "032baa94e8126bd70ea186e37a78d317cbb3a2b9",
"size": "1470",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/olympia/devhub/cron.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "249"
},
{
"name": "CSS",
"bytes": "663668"
},
{
"name": "HTML",
"bytes": "1600904"
},
{
"name": "JavaScript",
"bytes": "1314155"
},
{
"name": "Makefile",
"bytes": "4235"
},
{
"name": "PLSQL",
"bytes": "74"
},
{
"name": "Python",
"bytes": "3997396"
},
{
"name": "Shell",
"bytes": "9101"
},
{
"name": "Smarty",
"bytes": "1930"
}
],
"symlink_target": ""
}
|
import logging
from random import randint
from django.conf import settings
from django.db import models
from django.db.utils import ProgrammingError
from django.utils.translation import ugettext_lazy as _
from ..utils import slugify, pick_attrs
from .common import CommonFields
from .media_spec import MediaSpec
logger = logging.getLogger(__name__)
class Picture(models.Model):
slug = models.CharField(**CommonFields.slug)
album = models.ForeignKey('edegal.Album', related_name='pictures', on_delete=models.CASCADE)
order = models.IntegerField(**CommonFields.order)
path = models.CharField(**CommonFields.path)
title = models.CharField(**CommonFields.title)
description = models.TextField(**CommonFields.description)
is_public = models.BooleanField(default=True)
created_at = models.DateTimeField(null=True, auto_now_add=True)
updated_at = models.DateTimeField(null=True, auto_now=True)
taken_at = models.DateTimeField(null=True, blank=True, help_text="EXIF original date time of the original media")
created_by = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, on_delete=models.SET_NULL)
@property
def terms_and_conditions(self):
if self.override_terms_and_conditions:
return self.override_terms_and_conditions
else:
return self.album.terms_and_conditions
def as_dict(self, format='jpeg', include_credits=False):
result = pick_attrs(self,
'path',
'title',
'description',
'is_public',
taken_at=self.taken_at.isoformat() if self.taken_at else '',
thumbnail=self.get_media('thumbnail', format).as_dict(),
preview=self.get_media('preview', format).as_dict(),
original=self.get_media('original', format).as_dict(),
)
if include_credits:
result['credits'] = self.make_credits()
return result
def make_credits(self):
# TODO allow overriding photog per-picture
return self.album.make_credits()
def _make_path(self):
assert self.album
return self.album.path + '/' + self.slug
def get_media(self, role, format='jpeg'):
# do this client-side to support prefetch_related and reduce hits to database
all_media = sorted(list(self.media.all()), key=lambda medium: -medium.width)
role_matching_media = [medium for medium in all_media if medium.role == role]
matching_media = [medium for medium in role_matching_media if medium.format == format]
# progressively increasing desperation
if matching_media:
return matching_media[0]
elif role_matching_media:
return role_matching_media[0]
else:
return all_media[0]
def refresh_media(self, dry_run=False):
current_specs = MediaSpec.objects.filter(active=True)
media_to_remove = self.media.all().exclude(role='original').exclude(spec__in=current_specs)
assert dry_run, "actually doing this not implemented yet :)"
for medium in media_to_remove:
print('Would remove', medium)
@classmethod
def get_random_picture(cls):
max_id = cls.objects.only('id').latest('id').id
sample_id = randint(1, max_id)
return cls.objects.filter(
id__gte=sample_id,
is_public=True,
album__is_public=True,
album__is_visible=True,
album__redirect_url='',
).only('id', 'path').order_by('id').first()
@property
def original(self):
if not hasattr(self, '_original'):
self._original = next((media for media in self.media.all() if media.spec is None), None)
return self._original
@property
def thumbnail(self):
if not hasattr(self, '_thumbnail'):
self._thumbnail = next((
media
for media in self.media.all()
if media.spec and media.spec.is_default_thumbnail
), None)
return self._thumbnail
def save(self, *args, **kwargs):
if self.title and not self.slug:
self.slug = slugify(self.title)
if self.slug:
self.path = self._make_path()
return super(Picture, self).save(*args, **kwargs)
def __str__(self):
return self.path
class Meta:
verbose_name = _('Picture')
verbose_name_plural = _('Pictures')
unique_together = [('album', 'slug')]
ordering = ('album', 'order', 'taken_at', 'slug')
index_together = [('album', 'order', 'slug')]
|
{
"content_hash": "0c7c293872139a8e3e02e0df653189e5",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 117,
"avg_line_length": 33.572463768115945,
"alnum_prop": 0.622274983811785,
"repo_name": "conikuvat/edegal",
"id": "005617034ded733b671285fb02dce09e493439b2",
"size": "4633",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/edegal/models/picture.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3136"
},
{
"name": "Dockerfile",
"bytes": "1141"
},
{
"name": "HTML",
"bytes": "507"
},
{
"name": "Python",
"bytes": "151941"
},
{
"name": "Shell",
"bytes": "6154"
},
{
"name": "TypeScript",
"bytes": "48274"
}
],
"symlink_target": ""
}
|
import os
# The fake password we will use to authenticate su'ed users
SECRET_PASSWORD = os.urandom(64)
__version__ = '0.4.7'
|
{
"content_hash": "2419d0ef5bcfcf8e5923ff3e9c933582",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 59,
"avg_line_length": 21.166666666666668,
"alnum_prop": 0.7086614173228346,
"repo_name": "Stackdriver/django-su",
"id": "c17910edb38616a95787197d6a1719b475bb5dea",
"size": "127",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_su/__init__.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
from core.himesis import Himesis
import uuid
class HUnionDaughterRule(Himesis):
def __init__(self):
"""
Creates the himesis graph representing the DSLTrans rule UnionDaughterRule.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HUnionDaughterRule, self).__init__(name='HUnionDaughterRule', num_nodes=0, edges=[])
# Set the graph attributes
self["mm__"] = ['HimesisMM']
self["name"] = """UnionDaughterRule"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'UnionDaughterRule')
# match model. We only support one match model
self.add_node()
self.vs[0]["mm__"] = """MatchModel"""
# apply model node
self.add_node()
self.vs[1]["mm__"] = """ApplyModel"""
# paired with relation between match and apply models
self.add_node()
self.vs[2]["mm__"] = """paired_with"""
# match class HouseholdRoot() node
self.add_node()
self.vs[3]["mm__"] = """HouseholdRoot"""
self.vs[3]["attr1"] = """+"""
# match_contains node for class HouseholdRoot()
self.add_node()
self.vs[4]["mm__"] = """match_contains"""
# match class Member() node
self.add_node()
self.vs[5]["mm__"] = """Member"""
self.vs[5]["attr1"] = """+"""
# match_contains node for class Member()
self.add_node()
self.vs[6]["mm__"] = """match_contains"""
# match class Family() node
self.add_node()
self.vs[7]["mm__"] = """Family"""
self.vs[7]["attr1"] = """+"""
# match_contains node for class Family()
self.add_node()
self.vs[8]["mm__"] = """match_contains"""
# apply class CommunityRoot() node
self.add_node()
self.vs[9]["mm__"] = """CommunityRoot"""
self.vs[9]["attr1"] = """1"""
# apply_contains node for class CommunityRoot()
self.add_node()
self.vs[10]["mm__"] = """apply_contains"""
# apply class Woman() node
self.add_node()
self.vs[11]["mm__"] = """Woman"""
self.vs[11]["attr1"] = """1"""
# apply_contains node for class Woman()
self.add_node()
self.vs[12]["mm__"] = """apply_contains"""
# match association HouseholdRoot--have-->Family node
self.add_node()
self.vs[13]["attr1"] = """have"""
self.vs[13]["mm__"] = """directLink_S"""
# match association Family--daughter-->Member node
self.add_node()
self.vs[14]["attr1"] = """daughter"""
self.vs[14]["mm__"] = """directLink_S"""
# apply association CommunityRoot--has-->Woman node
self.add_node()
self.vs[15]["attr1"] = """has"""
self.vs[15]["mm__"] = """directLink_T"""
# backward association HouseholdRoot---->CommunityRoot node
self.add_node()
self.vs[16]["mm__"] = """backward_link"""
# backward association Family---->Woman node
self.add_node()
self.vs[17]["mm__"] = """backward_link"""
# backward association Member---->Woman node
self.add_node()
self.vs[18]["mm__"] = """backward_link"""
# Add the edges
self.add_edges([
(0,4), # matchmodel -> match_contains
(4,3), # match_contains -> match_class HouseholdRoot()
(0,6), # matchmodel -> match_contains
(6,5), # match_contains -> match_class Member()
(0,8), # matchmodel -> match_contains
(8,7), # match_contains -> match_class Family()
(1,10), # applymodel -> apply_contains
(10,9), # apply_contains -> apply_class CommunityRoot()
(1,12), # applymodel -> apply_contains
(12,11), # apply_contains -> apply_class Woman()
(3,13), # match_class HouseholdRoot() -> association have
(13,7), # association have -> match_class Family()
(7,14), # match_class Family() -> association daughter
(14,5), # association daughter -> match_class Member()
(9,15), # apply_class CommunityRoot() -> association has
(15,11), # association has -> apply_class Woman()
(9,16), # apply_class CommunityRoot() -> backward_association
(16,3), # backward_association -> apply_class HouseholdRoot()
(11,17), # apply_class Woman() -> backward_association
(17,7), # backward_association -> apply_class Family()
(11,18), # apply_class Woman() -> backward_association
(18,5), # backward_association -> apply_class Member()
(0,2), # matchmodel -> pairedwith
(2,1) # pairedwith -> applyModel
])
# Add the attribute equations
self["equations"] = [((9,'ApplyAttribute'),('constant','root')), ((11,'ApplyAttribute'),('constant','famMemberDaughter')), ]
|
{
"content_hash": "2d3500ca8fbb5c3db3ccf9b448a3ef26",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 132,
"avg_line_length": 36.43055555555556,
"alnum_prop": 0.5022874571101792,
"repo_name": "levilucio/SyVOLT",
"id": "9d895220642f73165a136e32125cf0df0f60945a",
"size": "5246",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ATLTrans/HUnionDaughterRule.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "166159"
},
{
"name": "Python",
"bytes": "34207588"
},
{
"name": "Shell",
"bytes": "1118"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from zerver.lib.statistics import seconds_usage_between
from optparse import make_option
from django.core.management.base import BaseCommand
from zerver.models import UserProfile
import datetime
from django.utils.timezone import utc
def analyze_activity(options):
day_start = datetime.datetime.strptime(options["date"], "%Y-%m-%d").replace(tzinfo=utc)
day_end = day_start + datetime.timedelta(days=options["duration"])
user_profile_query = UserProfile.objects.all()
if options["realm"]:
user_profile_query = user_profile_query.filter(realm__domain=options["realm"])
print "Per-user online duration:\n"
total_duration = datetime.timedelta(0)
for user_profile in user_profile_query:
duration = seconds_usage_between(user_profile, day_start, day_end)
if duration == datetime.timedelta(0):
continue
total_duration += duration
print "%-*s%s" % (37, user_profile.email, duration, )
print "\nTotal Duration: %s" % (total_duration,)
print "\nTotal Duration in minutes: %s" % (total_duration.total_seconds() / 60.,)
print "Total Duration amortized to a month: %s" % (total_duration.total_seconds() * 30. / 60.,)
class Command(BaseCommand):
help = """Report analytics of user activity on a per-user and realm basis.
This command aggregates user activity data that is collected by each user using Zulip. It attempts
to approximate how much each user has been using Zulip per day, measured by recording each 15 minute
period where some activity has occurred (mouse move or keyboard activity).
It will correctly not count server-initiated reloads in the activity statistics.
The duration flag can be used to control how many days to show usage duration for
Usage: python2.7 manage.py analyze_user_activity [--realm=zulip.com] [--date=2013-09-10] [--duration=1]
By default, if no date is selected 2013-09-10 is used. If no realm is provided, information
is shown for all realms"""
option_list = BaseCommand.option_list + (
make_option('--realm', action='store'),
make_option('--date', action='store', default="2013-09-06"),
make_option('--duration', action='store', default=1, type=int, help="How many days to show usage information for"),
)
def handle(self, *args, **options):
analyze_activity(options)
|
{
"content_hash": "70b4b4d84ffad9dd80765fbe1ab62243",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 123,
"avg_line_length": 42.45614035087719,
"alnum_prop": 0.7,
"repo_name": "gkotian/zulip",
"id": "9bf5a0b510f277c75da6bf32026ccb257a1f524a",
"size": "2420",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "analytics/management/commands/analyze_user_activity.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "164"
},
{
"name": "CSS",
"bytes": "180162"
},
{
"name": "CoffeeScript",
"bytes": "18435"
},
{
"name": "Groovy",
"bytes": "5515"
},
{
"name": "HTML",
"bytes": "343146"
},
{
"name": "JavaScript",
"bytes": "1549551"
},
{
"name": "Nginx",
"bytes": "1001"
},
{
"name": "PHP",
"bytes": "18930"
},
{
"name": "Pascal",
"bytes": "1113"
},
{
"name": "Perl",
"bytes": "383634"
},
{
"name": "Puppet",
"bytes": "90728"
},
{
"name": "Python",
"bytes": "1791028"
},
{
"name": "Ruby",
"bytes": "255867"
},
{
"name": "Shell",
"bytes": "30112"
}
],
"symlink_target": ""
}
|
from .simple_content_view import SimpleContentView
|
{
"content_hash": "ac7f10af6cf0e925e51fa250d527f2b8",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 50,
"avg_line_length": 51,
"alnum_prop": 0.8627450980392157,
"repo_name": "6aika/issue-reporting",
"id": "06f9c1a828555de7455b7567d426fb7f38d4d173",
"size": "51",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "issues_simple_ui/views/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "307"
},
{
"name": "Dockerfile",
"bytes": "746"
},
{
"name": "HTML",
"bytes": "8542"
},
{
"name": "JavaScript",
"bytes": "24775"
},
{
"name": "Python",
"bytes": "153728"
}
],
"symlink_target": ""
}
|
"""
Window provides access to the DOM model's global Window.
"""
closingListeners = []
resizeListeners = []
from __pyjamas__ import JS, doc, wnd, get_main_frame
from pyjamas import Location
def init_listeners():
pass
def addWindowCloseListener(listener):
closingListeners.append(listener)
def addWindowResizeListener(listener):
resizeListeners.append(listener)
def removeWindowCloseListener(listener):
closingListeners.remove(listener)
def removeWindowResizeListener(listener):
resizeListeners.remove(listener)
def alert(txt):
get_main_frame()._alert(txt)
def confirm(msg):
return wnd().confirm(msg)
def prompt(msg, defaultReply=""):
return wnd().prompt(msg, defaultReply)
def enableScrolling(enable):
doc().body.style.overflow = enable and 'auto' or 'hidden'
def scrollBy(x, y):
wnd().scrollBy(x, y)
def scroll(x, y):
wnd().scroll(x, y)
def getClientHeight():
try:
return wnd().innerHeight
except:
return doc().body.clientHeight;
def getClientWidth():
try:
return wnd().innerWidth
except:
return doc().body.clientWidth;
def setLocation(url):
w = wnd()
w.location = url
location = None
def getLocation():
global location
if not location:
location = Location.Location(wnd().location)
return location
def getTitle():
return doc().title
def open(url, name, features):
wnd().open(url, name, features)
def setMargin(size):
doc().body.style.margin = size;
def setTitle(title):
d = doc()
d.title = title
def setOnError(onError):
pass
def onError(msg, url, linenumber):
pass
# TODO: call fireClosedAndCatch
def onClosed():
fireClosedImpl()
# TODO: call fireClosingAndCatch
def onClosing():
fireClosingImpl()
# TODO: call fireResizedAndCatch
def onResize():
fireResizedImpl()
def fireClosedAndCatch(handler):
# FIXME - need implementation
pass
def fireClosedImpl():
for listener in closingListeners:
listener.onWindowClosed()
def fireClosingAndCatch(handler):
# FIXME - need implementation
pass
def resize(width, height):
""" changes size to specified width and height
"""
wnd().resizeTo(width, height)
def resizeBy(width, height):
""" changes size by specified width and height
"""
wnd().resizeBy(width, height)
def fireClosingImpl():
ret = None
for listener in closingListeners:
msg = listener.onWindowClosing()
if ret is None:
ret = msg
return ret
def fireResizedAndCatch(handler):
# FIXME - need implementation
pass
def fireResizedImpl():
for listener in resizeListeners:
listener.onWindowResized(getClientWidth(), getClientHeight())
def init():
pass
init()
|
{
"content_hash": "6c8d9c8a64a019e58857f8c5f6c2c07b",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 69,
"avg_line_length": 19.659574468085108,
"alnum_prop": 0.68001443001443,
"repo_name": "lovelysystems/pyjamas",
"id": "c8e60f9bd8447b148ad8b7afb8c35afb617081f3",
"size": "2876",
"binary": false,
"copies": "1",
"ref": "refs/heads/ls-production",
"path": "library/pyjamas/Window.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "271093"
},
{
"name": "PHP",
"bytes": "121841"
},
{
"name": "Python",
"bytes": "1958339"
},
{
"name": "Shell",
"bytes": "9117"
}
],
"symlink_target": ""
}
|
import os
import tempfile
import numpy as np
import pytest
import scipy.sparse
from hypothesis import given, settings, strategies
from scipy.sparse import csr_matrix, rand
import xgboost as xgb
from xgboost import testing as tm
rng = np.random.RandomState(1)
dpath = 'demo/data/'
rng = np.random.RandomState(1994)
def set_base_margin_info(DType, DMatrixT, tm: str):
rng = np.random.default_rng()
X = DType(rng.normal(0, 1.0, size=100).astype(np.float32).reshape(50, 2))
if hasattr(X, "iloc"):
y = X.iloc[:, 0]
else:
y = X[:, 0]
base_margin = X
# no error at set
Xy = DMatrixT(X, y, base_margin=base_margin)
# Error at train, caused by check in predictor.
with pytest.raises(ValueError, match=r".*base_margin.*"):
xgb.train({"tree_method": tm}, Xy)
if not hasattr(X, "iloc"):
# column major matrix
got = DType(Xy.get_base_margin().reshape(50, 2))
assert (got == base_margin).all()
assert base_margin.T.flags.c_contiguous is False
assert base_margin.T.flags.f_contiguous is True
Xy.set_info(base_margin=base_margin.T)
got = DType(Xy.get_base_margin().reshape(2, 50))
assert (got == base_margin.T).all()
# Row vs col vec.
base_margin = y
Xy.set_base_margin(base_margin)
bm_col = Xy.get_base_margin()
Xy.set_base_margin(base_margin.reshape(1, base_margin.size))
bm_row = Xy.get_base_margin()
assert (bm_row == bm_col).all()
# type
base_margin = base_margin.astype(np.float64)
Xy.set_base_margin(base_margin)
bm_f64 = Xy.get_base_margin()
assert (bm_f64 == bm_col).all()
# too many dimensions
base_margin = X.reshape(2, 5, 2, 5)
with pytest.raises(ValueError, match=r".*base_margin.*"):
Xy.set_base_margin(base_margin)
class TestDMatrix:
def test_warn_missing(self):
from xgboost import data
with pytest.warns(UserWarning):
data._warn_unused_missing('uri', 4)
with pytest.warns(None) as record:
data._warn_unused_missing('uri', None)
data._warn_unused_missing('uri', np.nan)
assert len(record) == 0
with pytest.warns(None) as record:
x = rng.randn(10, 10)
y = rng.randn(10)
xgb.DMatrix(x, y, missing=4)
assert len(record) == 0
with pytest.warns(UserWarning):
csr = csr_matrix(x)
xgb.DMatrix(csr.tocsc(), y, missing=4)
def test_dmatrix_numpy_init(self):
data = np.random.randn(5, 5)
dm = xgb.DMatrix(data)
assert dm.num_row() == 5
assert dm.num_col() == 5
data = np.array([[1, 2], [3, 4]])
dm = xgb.DMatrix(data)
assert dm.num_row() == 2
assert dm.num_col() == 2
# 0d array
with pytest.raises(ValueError):
xgb.DMatrix(np.array(1))
# 1d array
with pytest.raises(ValueError):
xgb.DMatrix(np.array([1, 2, 3]))
# 3d array
data = np.random.randn(5, 5, 5)
with pytest.raises(ValueError):
xgb.DMatrix(data)
# object dtype
data = np.array([['a', 'b'], ['c', 'd']])
with pytest.raises(ValueError):
xgb.DMatrix(data)
def test_csr(self):
indptr = np.array([0, 2, 3, 6])
indices = np.array([0, 2, 2, 0, 1, 2])
data = np.array([1, 2, 3, 4, 5, 6])
X = scipy.sparse.csr_matrix((data, indices, indptr), shape=(3, 3))
dtrain = xgb.DMatrix(X)
assert dtrain.num_row() == 3
assert dtrain.num_col() == 3
def test_csc(self):
row = np.array([0, 2, 2, 0, 1, 2])
col = np.array([0, 0, 1, 2, 2, 2])
data = np.array([1, 2, 3, 4, 5, 6])
X = scipy.sparse.csc_matrix((data, (row, col)), shape=(3, 3))
dtrain = xgb.DMatrix(X)
assert dtrain.num_row() == 3
assert dtrain.num_col() == 3
def test_coo(self):
row = np.array([0, 2, 2, 0, 1, 2])
col = np.array([0, 0, 1, 2, 2, 2])
data = np.array([1, 2, 3, 4, 5, 6])
X = scipy.sparse.coo_matrix((data, (row, col)), shape=(3, 3))
dtrain = xgb.DMatrix(X)
assert dtrain.num_row() == 3
assert dtrain.num_col() == 3
def test_np_view(self):
# Sliced Float32 array
y = np.array([12, 34, 56], np.float32)[::2]
from_view = xgb.DMatrix(np.array([[]]), label=y).get_label()
from_array = xgb.DMatrix(np.array([[]]), label=y + 0).get_label()
assert (from_view.shape == from_array.shape)
assert (from_view == from_array).all()
# Sliced UInt array
z = np.array([12, 34, 56], np.uint32)[::2]
dmat = xgb.DMatrix(np.array([[]]))
dmat.set_uint_info('group', z)
from_view = dmat.get_uint_info('group_ptr')
dmat = xgb.DMatrix(np.array([[]]))
dmat.set_uint_info('group', z + 0)
from_array = dmat.get_uint_info('group_ptr')
assert (from_view.shape == from_array.shape)
assert (from_view == from_array).all()
def test_slice(self):
X = rng.randn(100, 100)
y = rng.randint(low=0, high=3, size=100).astype(np.float32)
d = xgb.DMatrix(X, y)
np.testing.assert_equal(d.get_label(), y)
fw = rng.uniform(size=100).astype(np.float32)
d.set_info(feature_weights=fw)
# base margin is per-class in multi-class classifier
base_margin = rng.randn(100, 3).astype(np.float32)
d.set_base_margin(base_margin)
np.testing.assert_allclose(d.get_base_margin().reshape(100, 3), base_margin)
ridxs = [1, 2, 3, 4, 5, 6]
sliced = d.slice(ridxs)
# Slicing works with label and other meta info fields
np.testing.assert_equal(sliced.get_label(), y[1:7])
np.testing.assert_equal(sliced.get_float_info('feature_weights'), fw)
np.testing.assert_equal(sliced.get_base_margin(), base_margin[1:7, :].flatten())
np.testing.assert_equal(sliced.get_base_margin(), sliced.get_float_info('base_margin'))
# Slicing a DMatrix results into a DMatrix that's equivalent to a DMatrix that's
# constructed from the corresponding NumPy slice
d2 = xgb.DMatrix(X[1:7, :], y[1:7])
d2.set_base_margin(base_margin[1:7, :])
eval_res = {}
_ = xgb.train(
{'num_class': 3, 'objective': 'multi:softprob',
'eval_metric': 'mlogloss'},
d,
num_boost_round=2, evals=[(d2, 'd2'), (sliced, 'sliced')], evals_result=eval_res)
np.testing.assert_equal(eval_res['d2']['mlogloss'], eval_res['sliced']['mlogloss'])
ridxs_arr = np.array(ridxs)[1:] # handles numpy slice correctly
sliced = d.slice(ridxs_arr)
np.testing.assert_equal(sliced.get_label(), y[2:7])
def test_feature_names_slice(self):
data = np.random.randn(5, 5)
# different length
with pytest.raises(ValueError):
xgb.DMatrix(data, feature_names=list('abcdef'))
# contains duplicates
with pytest.raises(ValueError):
xgb.DMatrix(data, feature_names=['a', 'b', 'c', 'd', 'd'])
# contains symbol
with pytest.raises(ValueError):
xgb.DMatrix(data, feature_names=['a', 'b', 'c', 'd', 'e<1'])
dm = xgb.DMatrix(data)
dm.feature_names = list('abcde')
assert dm.feature_names == list('abcde')
assert dm.slice([0, 1]).num_col() == dm.num_col()
assert dm.slice([0, 1]).feature_names == dm.feature_names
dm.feature_types = 'q'
assert dm.feature_types == list('qqqqq')
dm.feature_types = list('qiqiq')
assert dm.feature_types == list('qiqiq')
with pytest.raises(ValueError):
dm.feature_types = list('abcde')
# reset
dm.feature_names = None
assert dm.feature_names is None
assert dm.feature_types is None
def test_feature_names(self):
data = np.random.randn(100, 5)
target = np.array([0, 1] * 50)
cases = [['Feature1', 'Feature2', 'Feature3', 'Feature4', 'Feature5'],
[u'要因1', u'要因2', u'要因3', u'要因4', u'要因5']]
for features in cases:
dm = xgb.DMatrix(data, label=target,
feature_names=features)
assert dm.feature_names == features
assert dm.num_row() == 100
assert dm.num_col() == 5
params = {'objective': 'multi:softprob',
'eval_metric': 'mlogloss',
'eta': 0.3,
'num_class': 3}
bst = xgb.train(params, dm, num_boost_round=10)
scores = bst.get_fscore()
assert list(sorted(k for k in scores)) == features
dummy = np.random.randn(5, 5)
dm = xgb.DMatrix(dummy, feature_names=features)
bst.predict(dm)
# different feature name must raises error
dm = xgb.DMatrix(dummy, feature_names=list('abcde'))
with pytest.raises(ValueError):
bst.predict(dm)
@pytest.mark.skipif(**tm.no_pandas())
def test_save_binary(self):
import pandas as pd
with tempfile.TemporaryDirectory() as tmpdir:
path = os.path.join(tmpdir, 'm.dmatrix')
data = pd.DataFrame({
"a": [0, 1],
"b": [2, 3],
"c": [4, 5]
})
m0 = xgb.DMatrix(data.loc[:, ["a", "b"]], data["c"])
assert m0.feature_names == ['a', 'b']
m0.save_binary(path)
m1 = xgb.DMatrix(path)
assert m0.feature_names == m1.feature_names
assert m0.feature_types == m1.feature_types
def test_get_info(self):
dtrain = xgb.DMatrix(dpath + 'agaricus.txt.train')
dtrain.get_float_info('label')
dtrain.get_float_info('weight')
dtrain.get_float_info('base_margin')
dtrain.get_uint_info('group_ptr')
group_len = np.array([2, 3, 4])
dtrain.set_group(group_len)
np.testing.assert_equal(group_len, dtrain.get_group())
def test_qid(self):
rows = 100
cols = 10
X, y = rng.randn(rows, cols), rng.randn(rows)
qid = rng.randint(low=0, high=10, size=rows, dtype=np.uint32)
qid = np.sort(qid)
Xy = xgb.DMatrix(X, y)
Xy.set_info(qid=qid)
group_ptr = Xy.get_uint_info('group_ptr')
assert group_ptr[0] == 0
assert group_ptr[-1] == rows
def test_feature_weights(self):
kRows = 10
kCols = 50
rng = np.random.RandomState(1994)
fw = rng.uniform(size=kCols)
X = rng.randn(kRows, kCols)
m = xgb.DMatrix(X)
m.set_info(feature_weights=fw)
np.testing.assert_allclose(fw, m.get_float_info('feature_weights'))
# Handle empty
m.set_info(feature_weights=np.empty((0, )))
assert m.get_float_info('feature_weights').shape[0] == 0
fw -= 1
with pytest.raises(ValueError):
m.set_info(feature_weights=fw)
def test_sparse_dmatrix_csr(self):
nrow = 100
ncol = 1000
x = rand(nrow, ncol, density=0.0005, format='csr', random_state=rng)
assert x.indices.max() < ncol - 1
x.data[:] = 1
dtrain = xgb.DMatrix(x, label=rng.binomial(1, 0.3, nrow))
assert (dtrain.num_row(), dtrain.num_col()) == (nrow, ncol)
watchlist = [(dtrain, 'train')]
param = {'max_depth': 3, 'objective': 'binary:logistic', 'verbosity': 0}
bst = xgb.train(param, dtrain, 5, watchlist)
bst.predict(dtrain)
i32 = csr_matrix((x.data.astype(np.int32), x.indices, x.indptr), shape=x.shape)
f32 = csr_matrix(
(i32.data.astype(np.float32), x.indices, x.indptr), shape=x.shape
)
di32 = xgb.DMatrix(i32)
df32 = xgb.DMatrix(f32)
dense = xgb.DMatrix(f32.toarray(), missing=0)
with tempfile.TemporaryDirectory() as tmpdir:
path = os.path.join(tmpdir, "f32.dmatrix")
df32.save_binary(path)
with open(path, "rb") as fd:
df32_buffer = np.array(fd.read())
path = os.path.join(tmpdir, "f32.dmatrix")
di32.save_binary(path)
with open(path, "rb") as fd:
di32_buffer = np.array(fd.read())
path = os.path.join(tmpdir, "dense.dmatrix")
dense.save_binary(path)
with open(path, "rb") as fd:
dense_buffer = np.array(fd.read())
np.testing.assert_equal(df32_buffer, di32_buffer)
np.testing.assert_equal(df32_buffer, dense_buffer)
def test_sparse_dmatrix_csc(self):
nrow = 1000
ncol = 100
x = rand(nrow, ncol, density=0.0005, format='csc', random_state=rng)
assert x.indices.max() < nrow - 1
x.data[:] = 1
dtrain = xgb.DMatrix(x, label=rng.binomial(1, 0.3, nrow))
assert (dtrain.num_row(), dtrain.num_col()) == (nrow, ncol)
watchlist = [(dtrain, 'train')]
param = {'max_depth': 3, 'objective': 'binary:logistic', 'verbosity': 0}
bst = xgb.train(param, dtrain, 5, watchlist)
bst.predict(dtrain)
def test_unknown_data(self):
class Data:
pass
with pytest.raises(TypeError):
with pytest.warns(UserWarning):
d = Data()
xgb.DMatrix(d)
from scipy import sparse
rng = np.random.RandomState(1994)
X = rng.rand(10, 10)
y = rng.rand(10)
X = sparse.dok_matrix(X)
Xy = xgb.DMatrix(X, y)
assert Xy.num_row() == 10
assert Xy.num_col() == 10
@pytest.mark.skipif(**tm.no_pandas())
def test_np_categorical(self):
n_features = 10
X, y = tm.make_categorical(10, n_features, n_categories=4, onehot=False)
X = X.values.astype(np.float32)
feature_types = ['c'] * n_features
assert isinstance(X, np.ndarray)
Xy = xgb.DMatrix(X, y, feature_types=feature_types)
np.testing.assert_equal(np.array(Xy.feature_types), np.array(feature_types))
def test_scipy_categorical(self):
from scipy import sparse
n_features = 10
X, y = tm.make_categorical(10, n_features, n_categories=4, onehot=False)
X = X.values.astype(np.float32)
feature_types = ['c'] * n_features
X[1, 3] = np.NAN
X[2, 4] = np.NAN
X = sparse.csr_matrix(X)
Xy = xgb.DMatrix(X, y, feature_types=feature_types)
np.testing.assert_equal(np.array(Xy.feature_types), np.array(feature_types))
X = sparse.csc_matrix(X)
Xy = xgb.DMatrix(X, y, feature_types=feature_types)
np.testing.assert_equal(np.array(Xy.feature_types), np.array(feature_types))
X = sparse.coo_matrix(X)
Xy = xgb.DMatrix(X, y, feature_types=feature_types)
np.testing.assert_equal(np.array(Xy.feature_types), np.array(feature_types))
def test_uri_categorical(self):
path = os.path.join(dpath, 'agaricus.txt.train')
feature_types = ["q"] * 5 + ["c"] + ["q"] * 120
Xy = xgb.DMatrix(path + "?indexing_mode=1", feature_types=feature_types)
np.testing.assert_equal(np.array(Xy.feature_types), np.array(feature_types))
def test_base_margin(self):
set_base_margin_info(np.asarray, xgb.DMatrix, "hist")
@given(
strategies.integers(0, 1000),
strategies.integers(0, 100),
strategies.fractions(0, 1),
)
@settings(deadline=None, print_blob=True)
def test_to_csr(self, n_samples, n_features, sparsity) -> None:
if n_samples == 0 or n_features == 0 or sparsity == 1.0:
csr = scipy.sparse.csr_matrix(np.empty((0, 0)))
else:
csr = tm.make_sparse_regression(n_samples, n_features, sparsity, False)[
0
].astype(np.float32)
m = xgb.DMatrix(data=csr)
ret = m.get_data()
np.testing.assert_equal(csr.indptr, ret.indptr)
np.testing.assert_equal(csr.data, ret.data)
np.testing.assert_equal(csr.indices, ret.indices)
|
{
"content_hash": "ee05e3e72f57f5ab35af819b64f81eaf",
"timestamp": "",
"source": "github",
"line_count": 455,
"max_line_length": 95,
"avg_line_length": 35.824175824175825,
"alnum_prop": 0.5571779141104295,
"repo_name": "dmlc/xgboost",
"id": "def3690275b53ab9787fc7ae2cbbd47792a7b609",
"size": "16320",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/python/test_dmatrix.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1383"
},
{
"name": "C",
"bytes": "23067"
},
{
"name": "C++",
"bytes": "2182522"
},
{
"name": "CMake",
"bytes": "52394"
},
{
"name": "CSS",
"bytes": "3812"
},
{
"name": "Cuda",
"bytes": "855374"
},
{
"name": "Dockerfile",
"bytes": "2364"
},
{
"name": "Groovy",
"bytes": "1251"
},
{
"name": "Java",
"bytes": "206549"
},
{
"name": "M4",
"bytes": "2131"
},
{
"name": "Makefile",
"bytes": "8179"
},
{
"name": "PowerShell",
"bytes": "4308"
},
{
"name": "Python",
"bytes": "1189411"
},
{
"name": "R",
"bytes": "342898"
},
{
"name": "Scala",
"bytes": "471040"
},
{
"name": "Shell",
"bytes": "45815"
},
{
"name": "TeX",
"bytes": "913"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from datetime import datetime
from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def index(request):
return render(request, 'index.html', {
'current_time': datetime.now().strftime('%Y-%m-%d(%a) %H:%M:%S'),
})
|
{
"content_hash": "d84eaa3f8d1af542133daca953673684",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 73,
"avg_line_length": 28.545454545454547,
"alnum_prop": 0.697452229299363,
"repo_name": "steny138/WorldTraveller",
"id": "5618354fbff32f0d17ecae16f4b3b4eca253ff63",
"size": "338",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/world_traveller/wtweb/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "102"
},
{
"name": "Python",
"bytes": "26294"
}
],
"symlink_target": ""
}
|
from django.test import TestCase
from django.test.client import Client
from django.conf import settings
from django.core.urlresolvers import reverse
class LadingPageTest(TestCase):
"""
Note: You should go ahead and do your own integration tests
with selenium or something like it.
This merely makes sure the landing page runs.
It does not test style or layout.
"""
def setUp(self):
self.client = Client()
def test_landing_page_request_returns_200(self):
response = self.client.get(reverse('index'))
self.assertEqual(response.status_code, 200)
def test_company_name_in_landing_page(self):
response = self.client.get(reverse('index'))
self.assertTrue(settings.COMPANY_NAME in response.content)
|
{
"content_hash": "251ac6cae62615b39689c5d134b6cbae",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 66,
"avg_line_length": 31.56,
"alnum_prop": 0.6945500633713562,
"repo_name": "bliti/tienda",
"id": "b8cc8793e18a872c7a2f8bfbad59bfc13139c9cb",
"size": "789",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "landing_page/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12902"
}
],
"symlink_target": ""
}
|
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('numpy',parent_package,top_path)
config.add_subpackage('distutils')
config.add_subpackage('testing')
config.add_subpackage('f2py')
config.add_subpackage('core')
config.add_subpackage('lib')
config.add_subpackage('oldnumeric')
config.add_subpackage('numarray')
config.add_subpackage('fft')
config.add_subpackage('linalg')
config.add_subpackage('random')
config.add_subpackage('ma')
config.add_subpackage('matrixlib')
config.add_subpackage('compat')
config.add_subpackage('doc')
config.add_data_dir('doc')
config.add_data_dir('tests')
config.make_config_py() # installs __config__.py
return config
if __name__ == '__main__':
print 'This is the wrong setup.py file to run'
|
{
"content_hash": "9d3b4d69dc97e09edf5d040502262974",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 59,
"avg_line_length": 36.75,
"alnum_prop": 0.6870748299319728,
"repo_name": "plaes/numpy",
"id": "faa13d9d16f9c99b2f8c8a8104519bf915e66e3e",
"size": "905",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "numpy/setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
from django.db import models
# Create your models here.
from django.contrib.auth.models import User
from django.utils.translation import ugettext as _
from django.conf import settings
from django.core.validators import MinValueValidator, MaxValueValidator
from nr.formulas import mask_toggle
from datetime import date, timedelta, datetime
from django.utils.timezone import utc
class Applicant(models.Model):
TIRANA=20
ELBASAN=30
DURRES=40
SITE_CHOICES = (
(TIRANA,_('Tirana')),
(ELBASAN,_('Elbasan')),
(DURRES,_('Durres')),
)
d_site = dict(SITE_CHOICES)
PRIVATE_HOME = 10
APARTMENT = 20
DWELLINGTYPE_CHOICES = (
(PRIVATE_HOME,_('Single Family house or private home')),
(APARTMENT, _('Apartment')),
)
VERY_GOOD_CONDITION = 10
APPROPRIATE_FOR_LIVING = 20
INAPPROPRIATE_FOR_LIVING = 30
UNDER_CONSTRUCTION = 40
DWELLINGCONDITION_CHOICES = (
(VERY_GOOD_CONDITION, _('Very good condition')),
(APPROPRIATE_FOR_LIVING, _('Appropriate for living')),
(INAPPROPRIATE_FOR_LIVING, _('Inappropriate for living')),
(UNDER_CONSTRUCTION, _('Under construction')),
)
YES = 1
NO = 0
YESNO_CHOICES = (
(YES,_('Yes')),
(NO,_('No')),
)
URBAN = 1
RURAL = 0
AREA_CHOICES = (
(URBAN,_('Urban')),
(RURAL,_('Rural')),
)
SLATE = 1
TIN = 2
OTHER = 3
ROOFMATERIAL_CHOICES = (
(SLATE, _('Slate roof')),
(TIN, _('Tin material roof')),
(OTHER, _('Something else')),
)
d_site = dict(SITE_CHOICES)
d_dwellingtype = dict(DWELLINGTYPE_CHOICES)
d_dwellingcondition = dict(DWELLINGCONDITION_CHOICES)
d_yesno = dict(YESNO_CHOICES)
d_area = dict(AREA_CHOICES)
user = models.ForeignKey(User, editable=False)
bank_card_number = models.CharField(_('Bank card number'),max_length=50, unique=True)
site_of_interview = models.IntegerField(_('Site of interview'), choices = SITE_CHOICES, default=TIRANA, blank=False)
housenumber = models.CharField(_('House Number'),max_length=8)
address_line1 = models.CharField(_('Address line 1'),max_length=50)
address_line2 = models.CharField(_('Apt #'),max_length=50,blank=True)
municipality = models.CharField(_('Municipality/commune'),max_length=25)
district = models.CharField(_('District'),max_length=25,blank=True)
urban = models.IntegerField(_('Area (urban/rural)'), choices = AREA_CHOICES, blank=False)
postal = models.CharField(_('Postal code'),max_length=25,blank=True)
home_phone = models.CharField(_('Home telephone'),max_length=15)
mobile_phone = models.CharField(_('Mobile telephone'),max_length=15)
dwelling_type = models.IntegerField(_('Dwelling type'), choices = DWELLINGTYPE_CHOICES, blank=False)
rooms = models.IntegerField(_('Number of rooms that your family occupies'), blank=False)
condition = models.IntegerField(_('What is the condition of the dwelling?'), choices = DWELLINGCONDITION_CHOICES, blank=False)
roof_material = models.IntegerField(_('Roof Material?'), choices = ROOFMATERIAL_CHOICES, blank=False)
gas_for_lighting = models.IntegerField(_('Does your household use gas for lighting?'), choices = YESNO_CHOICES, blank=False)
gas_for_heating = models.IntegerField(_('Does your household use gas for heating?'), choices = YESNO_CHOICES, blank=False)
gas_for_cooking = models.IntegerField(_('Does your household use gas for cooking?'), choices = YESNO_CHOICES, blank=False)
gas_for_other = models.IntegerField(_('Does your household use gas for other appliances?'), choices = YESNO_CHOICES, blank=False)
a_color_tv = models.IntegerField(_('Does your household have a color TV?'), choices = YESNO_CHOICES, blank=False)
a_washing_machine = models.IntegerField(_('Does your household have a washing machine?'), choices = YESNO_CHOICES, blank=False)
q_cows = models.IntegerField(_('How many cows does your household own?'), blank=False, validators=[MinValueValidator(0), MaxValueValidator(10000000)])
q_pigs = models.IntegerField(_('How many pigs does your household own?'), blank=False, validators=[MinValueValidator(0), MaxValueValidator(10000000)])
sp_ne_amount = models.IntegerField(_('How much did your household receive from NE for the last payment?'), blank=False, validators=[MinValueValidator(0), MaxValueValidator(10000000)])
sp_ne_months = models.IntegerField(_('How many months was this NE payment for?'), blank=False, validators=[MinValueValidator(0), MaxValueValidator(10000000)])
sp_ne_last12m = models.IntegerField(_('How much did your household receive from NE for the last 12 months?'), blank=False, validators=[MinValueValidator(0), MaxValueValidator(10000000)])
sp_dp_amount = models.IntegerField(_('How much did your household receive from Disability Pension for the last payment?'), blank=False, validators=[MinValueValidator(0), MaxValueValidator(10000000)])
sp_dp_months = models.IntegerField(_('How many months was this Disability Pension payment for?'), blank=False, validators=[MinValueValidator(0), MaxValueValidator(10000000)])
sp_dp_last12m = models.IntegerField(_('How much did your household receive from Disability Pension for the last 12 months?'), blank=False, validators=[MinValueValidator(0), MaxValueValidator(10000000)])
oi_remit = models.IntegerField(_('How much did your household receive in total from remittances in the last 12 months, including the value of any gift or payment in the form of goods?'), blank=False, validators=[MinValueValidator(0), MaxValueValidator(10000000)])
oi_rent = models.IntegerField(_('How much did your household receive in total from rent from land in the last 12 months, including the value of any gift or payment in the form of goods?'), blank=False, validators=[MinValueValidator(0), MaxValueValidator(10000000)])
tell_me = models.TextField(_('Is there anything else you would like to tell me?'), blank=True)
created_at = models.DateTimeField(auto_now_add = True)
updated_at = models.DateTimeField(auto_now = True)
class Meta:
ordering = ["-created_at"]
@property
def pk_masked(self):
return mask_toggle(self.pk)
@property
def score(self):
return 10-1*Householdmember.objects.filter(applicant=self).count()+2*self.a_washing_machine+5*self.a_color_tv
def __unicode__(self):
return "BCN:"+str(self.bank_card_number)+", PKM:"+str(self.pk_masked)+", PK:"+str(self.pk)
@property
def hhsize(self):
return Householdmember.objects.filter(applicant=self).count()
# from http://stackoverflow.com/questions/2170228/django-iterate-over-model-instance-field-names-and-values-in-template
def get_all_fields(self):
fields = []
for f in self._meta.fields:
fname = f.name
# resolve picklists/choices, with get_xyz_display() function
get_choice = 'get_'+fname+'_display'
if hasattr( self, get_choice):
value = getattr( self, get_choice)()
else:
try :
value = getattr(self, fname)
except User.DoesNotExist:
value = None
# only display fields with values and skip some fields entirely
if f.editable and f.name not in ('id', 'created_at', 'updated_at', 'user') :
fields.append(
{
'label':f.verbose_name,
'name':f.name,
'value':value,
}
)
return fields
class Householdmember(models.Model):
MALE = 0
FEMALE = 1
GENDER_CHOICES =(
(MALE,_('Male')),
(FEMALE,_('Female')),
)
d_gender = dict(GENDER_CHOICES)
HEAD = 1
SPOUSE = 2
CHILD = 3
GRANDCHILD = 4
NIECENEPHEW = 5
FATHERMOTHER = 6
SIBLING = 7
SONDINLAW = 8
SIBLINGINLAW = 9
GRANDDADMOM = 10
FATHERMOTHERINLAW = 11
OTHER = 12
NOTRELATED = 13
RELTOHEAD_CHOICES = (
(HEAD,_('Head')),
(SPOUSE,_('Spouse/Partner')),
(CHILD,_('Child/Adopted child')),
(GRANDCHILD,_('Grandchild')),
(NIECENEPHEW,_('Niece/Nephew')),
(FATHERMOTHER,_('Father/Mother')),
(SIBLING,_('Sister/Brother')),
(SONDINLAW,_('Son/Daughter in law')),
(SIBLINGINLAW,_('Brother/Sister in law')),
(GRANDDADMOM,_('Grandfather/Grandmother')),
(FATHERMOTHERINLAW,_('Father/Mother in law')),
(OTHER,_('Other relative')),
(NOTRELATED,_('Not related')),
)
d_reltohead = dict(RELTOHEAD_CHOICES)
MARRIED = 1
DIVORCED = 2
LIVINGTOGETHER = 3
WIDOWER = 4
SINGLE = 5
MARITAL_CHOICES = (
(MARRIED,_('Married')),
(DIVORCED,_('Divorced/Separated')),
(LIVINGTOGETHER,_('Living together')),
(WIDOWER,_('Widower')),
(SINGLE,_('Single')),
)
d_marital = dict(MARITAL_CHOICES)
YES = 1
NO = 0
YESNO_CHOICES = (
(YES,_('Yes')),
(NO,_('No')),
)
d_yesno = dict(YESNO_CHOICES)
applicant = models.ForeignKey(Applicant)
first_name = models.CharField(_('First name'),max_length=50,blank=False)
middle_name = models.CharField(_('Middle name'),max_length=50,blank=True)
last_name = models.CharField(_('Last name'),max_length=50,blank=False)
national_id = models.CharField(_('National ID'),max_length=50,blank=False, unique=True)
male = models.IntegerField(_('Gender'), choices = GENDER_CHOICES, blank=False)
date_of_birth = models.DateField()
rel_to_head = models.IntegerField(_('Relationship to HH Head'), choices = RELTOHEAD_CHOICES, blank=False)
disability = models.IntegerField(_('Has a disability'), choices = YESNO_CHOICES, blank=False)
created_at = models.DateTimeField(auto_now_add = True)
updated_at = models.DateTimeField(auto_now = True)
class Meta:
ordering = ["rel_to_head"]
# from http://stackoverflow.com/questions/2170228/django-iterate-over-model-instance-field-names-and-values-in-template
def get_all_fields(self):
fields = []
for f in self._meta.fields:
fname = f.name
# resolve picklists/choices, with get_xyz_display() function
get_choice = 'get_'+fname+'_display'
if hasattr( self, get_choice):
value = getattr( self, get_choice)()
else:
try :
value = getattr(self, fname)
except User.DoesNotExist:
value = None
# only display fields with values and skip some fields entirely
if f.editable and f.name not in ('id', 'created_at', 'updated_at', 'applicant'):
fields.append(
{
'label':f.verbose_name,
'name':f.name,
'value':value,
}
)
return fields
|
{
"content_hash": "0926c60996427f216025708b25d8284d",
"timestamp": "",
"source": "github",
"line_count": 251,
"max_line_length": 270,
"avg_line_length": 40.330677290836654,
"alnum_prop": 0.6954460140274622,
"repo_name": "shafiquejamal/socialassistanceregistry",
"id": "229e997b8ad1b4c7f3205613d917f26e9c50af88",
"size": "10123",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nr/applicants/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "652875"
},
{
"name": "JavaScript",
"bytes": "909032"
},
{
"name": "PHP",
"bytes": "51712"
},
{
"name": "Python",
"bytes": "56181"
},
{
"name": "Shell",
"bytes": "1008"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("apostello", "0018_auto_20170808_1045")]
operations = [migrations.RemoveField(model_name="smsinbound", name="matched_link")]
|
{
"content_hash": "c6774a3bfae7681bc07d2373a7066143",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 87,
"avg_line_length": 26.7,
"alnum_prop": 0.7415730337078652,
"repo_name": "monty5811/apostello",
"id": "c81d499e90dba43967d095bcfb17660f24aca3f3",
"size": "340",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apostello/migrations/0019_remove_smsinbound_matched_link.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "18413"
},
{
"name": "Elm",
"bytes": "484874"
},
{
"name": "HTML",
"bytes": "21141"
},
{
"name": "JavaScript",
"bytes": "31346"
},
{
"name": "Makefile",
"bytes": "640"
},
{
"name": "Python",
"bytes": "372217"
},
{
"name": "Shell",
"bytes": "3175"
}
],
"symlink_target": ""
}
|
"""Mocks for the august component."""
import json
import os
import time
from unittest.mock import AsyncMock, MagicMock, PropertyMock, patch
from yalexs.activity import (
ACTIVITY_ACTIONS_BRIDGE_OPERATION,
ACTIVITY_ACTIONS_DOOR_OPERATION,
ACTIVITY_ACTIONS_DOORBELL_DING,
ACTIVITY_ACTIONS_DOORBELL_MOTION,
ACTIVITY_ACTIONS_DOORBELL_VIEW,
ACTIVITY_ACTIONS_LOCK_OPERATION,
SOURCE_LOCK_OPERATE,
SOURCE_LOG,
BridgeOperationActivity,
DoorbellDingActivity,
DoorbellMotionActivity,
DoorbellViewActivity,
DoorOperationActivity,
LockOperationActivity,
)
from yalexs.authenticator import AuthenticationState
from yalexs.doorbell import Doorbell, DoorbellDetail
from yalexs.lock import Lock, LockDetail
from yalexs.pubnub_async import AugustPubNub
from homeassistant.components.august.const import CONF_LOGIN_METHOD, DOMAIN
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from tests.common import MockConfigEntry, load_fixture
def _mock_get_config():
"""Return a default august config."""
return {
DOMAIN: {
CONF_LOGIN_METHOD: "email",
CONF_USERNAME: "mocked_username",
CONF_PASSWORD: "mocked_password",
}
}
def _mock_authenticator(auth_state):
"""Mock an august authenticator."""
authenticator = MagicMock()
type(authenticator).state = PropertyMock(return_value=auth_state)
return authenticator
@patch("homeassistant.components.august.gateway.ApiAsync")
@patch("homeassistant.components.august.gateway.AuthenticatorAsync.async_authenticate")
async def _mock_setup_august(
hass, api_instance, pubnub_mock, authenticate_mock, api_mock
):
"""Set up august integration."""
authenticate_mock.side_effect = MagicMock(
return_value=_mock_august_authentication(
"original_token", 1234, AuthenticationState.AUTHENTICATED
)
)
api_mock.return_value = api_instance
entry = MockConfigEntry(
domain=DOMAIN,
data=_mock_get_config()[DOMAIN],
options={},
)
entry.add_to_hass(hass)
with patch("homeassistant.components.august.async_create_pubnub"), patch(
"homeassistant.components.august.AugustPubNub", return_value=pubnub_mock
):
assert await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
return entry
async def _create_august_with_devices(
hass, devices, api_call_side_effects=None, activities=None, pubnub=None
):
entry, api_instance = await _create_august_api_with_devices(
hass, devices, api_call_side_effects, activities, pubnub
)
return entry
async def _create_august_api_with_devices( # noqa: C901
hass, devices, api_call_side_effects=None, activities=None, pubnub=None
):
if api_call_side_effects is None:
api_call_side_effects = {}
if pubnub is None:
pubnub = AugustPubNub()
device_data = {"doorbells": [], "locks": []}
for device in devices:
if isinstance(device, LockDetail):
device_data["locks"].append(
{"base": _mock_august_lock(device.device_id), "detail": device}
)
elif isinstance(device, DoorbellDetail):
device_data["doorbells"].append(
{"base": _mock_august_doorbell(device.device_id), "detail": device}
)
else:
raise ValueError
def _get_device_detail(device_type, device_id):
for device in device_data[device_type]:
if device["detail"].device_id == device_id:
return device["detail"]
raise ValueError
def _get_base_devices(device_type):
base_devices = []
for device in device_data[device_type]:
base_devices.append(device["base"])
return base_devices
def get_lock_detail_side_effect(access_token, device_id):
return _get_device_detail("locks", device_id)
def get_doorbell_detail_side_effect(access_token, device_id):
return _get_device_detail("doorbells", device_id)
def get_operable_locks_side_effect(access_token):
return _get_base_devices("locks")
def get_doorbells_side_effect(access_token):
return _get_base_devices("doorbells")
def get_house_activities_side_effect(access_token, house_id, limit=10):
if activities is not None:
return activities
return []
def lock_return_activities_side_effect(access_token, device_id):
lock = _get_device_detail("locks", device_id)
return [
# There is a check to prevent out of order events
# so we set the doorclosed & lock event in the future
# to prevent a race condition where we reject the event
# because it happened before the dooropen & unlock event.
_mock_lock_operation_activity(lock, "lock", 2000),
_mock_door_operation_activity(lock, "doorclosed", 2000),
]
def unlock_return_activities_side_effect(access_token, device_id):
lock = _get_device_detail("locks", device_id)
return [
_mock_lock_operation_activity(lock, "unlock", 0),
_mock_door_operation_activity(lock, "dooropen", 0),
]
if "get_lock_detail" not in api_call_side_effects:
api_call_side_effects["get_lock_detail"] = get_lock_detail_side_effect
if "get_doorbell_detail" not in api_call_side_effects:
api_call_side_effects["get_doorbell_detail"] = get_doorbell_detail_side_effect
if "get_operable_locks" not in api_call_side_effects:
api_call_side_effects["get_operable_locks"] = get_operable_locks_side_effect
if "get_doorbells" not in api_call_side_effects:
api_call_side_effects["get_doorbells"] = get_doorbells_side_effect
if "get_house_activities" not in api_call_side_effects:
api_call_side_effects["get_house_activities"] = get_house_activities_side_effect
if "lock_return_activities" not in api_call_side_effects:
api_call_side_effects[
"lock_return_activities"
] = lock_return_activities_side_effect
if "unlock_return_activities" not in api_call_side_effects:
api_call_side_effects[
"unlock_return_activities"
] = unlock_return_activities_side_effect
api_instance, entry = await _mock_setup_august_with_api_side_effects(
hass, api_call_side_effects, pubnub
)
if device_data["locks"]:
# Ensure we sync status when the integration is loaded if there
# are any locks
assert api_instance.async_status_async.mock_calls
return entry, api_instance
async def _mock_setup_august_with_api_side_effects(hass, api_call_side_effects, pubnub):
api_instance = MagicMock(name="Api")
if api_call_side_effects["get_lock_detail"]:
type(api_instance).async_get_lock_detail = AsyncMock(
side_effect=api_call_side_effects["get_lock_detail"]
)
if api_call_side_effects["get_operable_locks"]:
type(api_instance).async_get_operable_locks = AsyncMock(
side_effect=api_call_side_effects["get_operable_locks"]
)
if api_call_side_effects["get_doorbells"]:
type(api_instance).async_get_doorbells = AsyncMock(
side_effect=api_call_side_effects["get_doorbells"]
)
if api_call_side_effects["get_doorbell_detail"]:
type(api_instance).async_get_doorbell_detail = AsyncMock(
side_effect=api_call_side_effects["get_doorbell_detail"]
)
if api_call_side_effects["get_house_activities"]:
type(api_instance).async_get_house_activities = AsyncMock(
side_effect=api_call_side_effects["get_house_activities"]
)
if api_call_side_effects["lock_return_activities"]:
type(api_instance).async_lock_return_activities = AsyncMock(
side_effect=api_call_side_effects["lock_return_activities"]
)
if api_call_side_effects["unlock_return_activities"]:
type(api_instance).async_unlock_return_activities = AsyncMock(
side_effect=api_call_side_effects["unlock_return_activities"]
)
api_instance.async_unlock_async = AsyncMock()
api_instance.async_lock_async = AsyncMock()
api_instance.async_status_async = AsyncMock()
api_instance.async_get_user = AsyncMock(return_value={"UserID": "abc"})
return api_instance, await _mock_setup_august(hass, api_instance, pubnub)
def _mock_august_authentication(token_text, token_timestamp, state):
authentication = MagicMock(name="yalexs.authentication")
type(authentication).state = PropertyMock(return_value=state)
type(authentication).access_token = PropertyMock(return_value=token_text)
type(authentication).access_token_expires = PropertyMock(
return_value=token_timestamp
)
return authentication
def _mock_august_lock(lockid="mocklockid1", houseid="mockhouseid1"):
return Lock(lockid, _mock_august_lock_data(lockid=lockid, houseid=houseid))
def _mock_august_doorbell(deviceid="mockdeviceid1", houseid="mockhouseid1"):
return Doorbell(
deviceid, _mock_august_doorbell_data(deviceid=deviceid, houseid=houseid)
)
def _mock_august_doorbell_data(deviceid="mockdeviceid1", houseid="mockhouseid1"):
return {
"_id": deviceid,
"DeviceID": deviceid,
"name": f"{deviceid} Name",
"HouseID": houseid,
"UserType": "owner",
"serialNumber": "mockserial",
"battery": 90,
"status": "standby",
"currentFirmwareVersion": "mockfirmware",
"Bridge": {
"_id": "bridgeid1",
"firmwareVersion": "mockfirm",
"operative": True,
},
"LockStatus": {"doorState": "open"},
}
def _mock_august_lock_data(lockid="mocklockid1", houseid="mockhouseid1"):
return {
"_id": lockid,
"LockID": lockid,
"LockName": f"{lockid} Name",
"HouseID": houseid,
"UserType": "owner",
"SerialNumber": "mockserial",
"battery": 90,
"currentFirmwareVersion": "mockfirmware",
"Bridge": {
"_id": "bridgeid1",
"firmwareVersion": "mockfirm",
"operative": True,
},
"LockStatus": {"doorState": "open"},
}
async def _mock_operative_august_lock_detail(hass):
return await _mock_lock_from_fixture(hass, "get_lock.online.json")
async def _mock_inoperative_august_lock_detail(hass):
return await _mock_lock_from_fixture(hass, "get_lock.offline.json")
async def _mock_activities_from_fixture(hass, path):
json_dict = await _load_json_fixture(hass, path)
activities = []
for activity_json in json_dict:
activity = _activity_from_dict(activity_json)
if activity:
activities.append(activity)
return activities
async def _mock_lock_from_fixture(hass, path):
json_dict = await _load_json_fixture(hass, path)
return LockDetail(json_dict)
async def _mock_doorbell_from_fixture(hass, path):
json_dict = await _load_json_fixture(hass, path)
return DoorbellDetail(json_dict)
async def _load_json_fixture(hass, path):
fixture = await hass.async_add_executor_job(
load_fixture, os.path.join("august", path)
)
return json.loads(fixture)
async def _mock_doorsense_enabled_august_lock_detail(hass):
return await _mock_lock_from_fixture(hass, "get_lock.online_with_doorsense.json")
async def _mock_doorsense_missing_august_lock_detail(hass):
return await _mock_lock_from_fixture(hass, "get_lock.online_missing_doorsense.json")
def _mock_lock_operation_activity(lock, action, offset):
return LockOperationActivity(
SOURCE_LOCK_OPERATE,
{
"dateTime": (time.time() + offset) * 1000,
"deviceID": lock.device_id,
"deviceType": "lock",
"action": action,
},
)
def _mock_door_operation_activity(lock, action, offset):
return DoorOperationActivity(
SOURCE_LOCK_OPERATE,
{
"dateTime": (time.time() + offset) * 1000,
"deviceID": lock.device_id,
"deviceType": "lock",
"action": action,
},
)
def _activity_from_dict(activity_dict):
action = activity_dict.get("action")
activity_dict["dateTime"] = time.time() * 1000
if action in ACTIVITY_ACTIONS_DOORBELL_DING:
return DoorbellDingActivity(SOURCE_LOG, activity_dict)
if action in ACTIVITY_ACTIONS_DOORBELL_MOTION:
return DoorbellMotionActivity(SOURCE_LOG, activity_dict)
if action in ACTIVITY_ACTIONS_DOORBELL_VIEW:
return DoorbellViewActivity(SOURCE_LOG, activity_dict)
if action in ACTIVITY_ACTIONS_LOCK_OPERATION:
return LockOperationActivity(SOURCE_LOG, activity_dict)
if action in ACTIVITY_ACTIONS_DOOR_OPERATION:
return DoorOperationActivity(SOURCE_LOG, activity_dict)
if action in ACTIVITY_ACTIONS_BRIDGE_OPERATION:
return BridgeOperationActivity(SOURCE_LOG, activity_dict)
return None
|
{
"content_hash": "efb4b37a96cb810931216e7775eae8c3",
"timestamp": "",
"source": "github",
"line_count": 376,
"max_line_length": 88,
"avg_line_length": 34.829787234042556,
"alnum_prop": 0.6589034819792303,
"repo_name": "toddeye/home-assistant",
"id": "e419488beccbfbcf01036f573e73ee46f012a273",
"size": "13096",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "tests/components/august/mocks.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3005"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "47414832"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
}
|
import os
import shutil
import tempfile
import unittest
import mock
from cli_tools.pinboard import pinboard
from core.external_modules import pandas as pd
def StateItem(revision, **kwargs):
item = {'revision': revision,
'timestamp': kwargs.pop('timestamp', '2019-03-15'),
'jobs': []}
with_bots = False
if kwargs.get('with_bots'):
with_bots = kwargs.pop('with_bots')
for job_id, status in sorted(kwargs.items()):
job = {'id': job_id, 'status': status}
if with_bots:
job['bot'] = job_id
item['jobs'].append(job)
return item
@unittest.skipIf(pd is None, 'pandas not available')
class PinboardToolTests(unittest.TestCase):
def setUp(self):
self.cache_dir = tempfile.mkdtemp()
os.mkdir(os.path.join(self.cache_dir, 'job_results'))
mock.patch(
'cli_tools.pinboard.pinboard.CACHED_DATA_DIR',
new=self.cache_dir).start()
self.subprocess = mock.patch(
'cli_tools.pinboard.pinboard.subprocess').start()
self.upload_to_cloud = mock.patch(
'cli_tools.pinboard.pinboard.UploadToCloudStorage').start()
self.download_from_cloud = mock.patch(
'cli_tools.pinboard.pinboard.DownloadFromCloudStorage').start()
self.download_from_cloud.return_value = False
def tearDown(self):
mock.patch.stopall()
shutil.rmtree(self.cache_dir)
@mock.patch('cli_tools.pinboard.pinboard.GetLastCommitOfDate')
@mock.patch('cli_tools.pinboard.pinboard.LoadJsonFile')
def testStartPinpointJobs(self, load_configs, get_last_commit):
load_configs.return_value = [{
'name': 'config1',
'configuration': 'AndroidGo'
}, {
'name': 'config2',
'configuration': 'Pixel2'
}]
get_last_commit.return_value = ('2a66bac4', '2019-03-17T23:50:16-07:00')
self.subprocess.check_output.side_effect = [
'Started: https://pinpoint.example.com/job/14b4c451f40000\n',
'Started: https://pinpoint.example.com/job/11fae481f40000\n']
state = []
pinboard.StartPinpointJobs(state, '2019-03-17')
self.assertEqual(state, [{
'revision':
'2a66bac4',
'timestamp':
'2019-03-17T23:50:16-07:00',
'jobs': [{
'id': '14b4c451f40000',
'status': 'queued',
'bot': 'AndroidGo'
}, {
'id': '11fae481f40000',
'status': 'queued',
'bot': 'Pixel2'
}]
}])
def testCollectPinpointResults(self):
state = [
StateItem('a100', job1='completed', job2='completed'),
StateItem('a200', job3='completed', job4='running'),
StateItem('a300', job5='running', job6='running')]
# Write some fake "previous" results for first revision.
df = pd.DataFrame({'revision': ['a100']})
df.to_csv(pinboard.RevisionResultsFile(state[0]), index=False)
self.subprocess.check_output.side_effect = [
'job4: completed\n',
'job5: running\njob6: failed\n',
'getting csv data ...\n'
]
expected_state = [
StateItem('a100', job1='completed', job2='completed'),
StateItem('a200', job3='completed', job4='completed'),
StateItem('a300', job5='running', job6='failed')]
pinboard.CollectPinpointResults(state)
self.assertEqual(state, expected_state)
self.subprocess.check_output.assert_has_calls([
mock.call(['vpython', pinboard.PINPOINT_CLI, 'status', 'job4'],
universal_newlines=True),
mock.call(['vpython', pinboard.PINPOINT_CLI, 'status', 'job5', 'job6'],
universal_newlines=True),
mock.call([
'vpython', pinboard.PINPOINT_CLI, 'get-csv', '--output',
pinboard.RevisionResultsFile(state[1]), '--', 'job3', 'job4'])
])
def testUpdateJobsState(self):
state = pinboard.LoadJobsState()
self.assertEqual(state, [])
# Update state with new data a couple of times.
state.append(StateItem('a100'))
pinboard.UpdateJobsState(state)
state.append(StateItem('a200'))
pinboard.UpdateJobsState(state)
# No new data. Should be a no-op.
pinboard.UpdateJobsState(state)
stored_state = pinboard.LoadJobsState()
self.assertEqual(stored_state, state)
self.assertEqual([i['revision'] for i in stored_state], ['a100', 'a200'])
self.assertEqual(self.upload_to_cloud.call_count, 2)
@mock.patch('cli_tools.pinboard.pinboard.GetRevisionResults')
@mock.patch('cli_tools.pinboard.pinboard.TimeAgo')
def testAggregateAndUploadResults(self, time_ago, get_revision_results):
state = [
StateItem('a100', timestamp='2019-03-15', job1='completed'),
StateItem('a200', timestamp='2019-03-16', job2='completed'),
StateItem('a300', timestamp='2019-03-17', job3='failed'),
StateItem('a400', timestamp='2019-03-18', job4='completed'),
StateItem('a500', timestamp='2019-03-19', job5='completed'),
]
def GetFakeResults(item):
df = pd.DataFrame(index=[0])
df['revision'] = item['revision']
df['label'] = 'with_patch'
df['benchmark'] = 'loading'
df['name'] = 'Total:duration'
df['timestamp'] = pd.Timestamp(item['timestamp'])
df['count'] = 1 if item['revision'] != 'a400' else 0
return df
get_revision_results.side_effect = GetFakeResults
time_ago.return_value = pd.Timestamp('2018-10-20')
# Only process first few revisions.
new_items, cached_df = pinboard.GetItemsToUpdate(state[:3])
pinboard.AggregateAndUploadResults(new_items, cached_df)
dataset_file = pinboard.CachedFilePath(pinboard.DATASET_CSV_FILE)
df = pd.read_csv(dataset_file)
self.assertEqual(set(df['revision']), set(['a100', 'a200']))
self.assertTrue((df[df['reference']]['revision'] == 'a200').all())
# Incrementally process the rest.
new_items, cached_df = pinboard.GetItemsToUpdate(state)
pinboard.AggregateAndUploadResults(new_items, cached_df)
dataset_file = pinboard.CachedFilePath(pinboard.DATASET_CSV_FILE)
df = pd.read_csv(dataset_file)
self.assertEqual(set(df['revision']), set(['a100', 'a200', 'a500']))
self.assertTrue((df[df['reference']]['revision'] == 'a500').all())
# No new revisions. This should be a no-op.
new_items, cached_df = pinboard.GetItemsToUpdate(state)
pinboard.AggregateAndUploadResults(new_items, cached_df)
self.assertEqual(get_revision_results.call_count, 4)
# Uploads twice (the pkl and csv) on each call to aggregate results.
self.assertEqual(self.upload_to_cloud.call_count, 2 * 2)
def testGetRevisionResults_different_bots(self):
item = StateItem(
'2a66ba',
timestamp='2019-03-17T23:50:16-07:00',
with_bots=True,
job1='completed',
job2='completed')
csv = [
'change,benchmark,story,name,unit,mean,job_id\n',
'2a66ba,loading,story1,Total:duration,ms_smallerIsBetter,300.0,job1\n',
'2a66ba,loading,story2,Total:duration,ms_smallerIsBetter,400.0,job2\n',
'2a66ba+patch,loading,story1,Total:duration,ms_smallerIsBetter,100.0,' +
'job1\n',
'2a66ba+patch,loading,story2,Total:duration,ms_smallerIsBetter,200.0,' +
'job2\n',
'2a66ba,loading,story1,Other:metric,count_smallerIsBetter,1.0,job1\n'
]
expected_results = [
('without_patch', 'job1', 0.3, '2018-03-17T12:00:00'),
('with_patch', 'job1', 0.1, '2019-03-17T12:00:00'),
('without_patch', 'job2', 0.4, '2018-03-17T12:00:00'),
('with_patch', 'job2', 0.2, '2019-03-17T12:00:00'),
]
filename = pinboard.RevisionResultsFile(item)
with open(filename, 'w') as f:
f.writelines(csv)
with mock.patch(
'cli_tools.pinboard.pinboard.ACTIVE_STORIES', new=['story1', 'story2']):
df = pinboard.GetRevisionResults(item)
self.assertEqual(len(df.index), 4) # Only two rows of output.
self.assertTrue((df['revision'] == '2a66ba').all())
self.assertTrue((df['benchmark'] == 'loading').all())
self.assertTrue((df['name'] == 'Total:duration').all())
self.assertTrue((df['count'] == 1).all())
df = df.set_index(['label', 'bot'], verify_integrity=True)
for label, bot, value, timestamp in expected_results:
self.assertEqual(df.loc[label, bot]['mean'], value)
self.assertEqual(df.loc[label, bot]['timestamp'], pd.Timestamp(timestamp))
def testGetRevisionResults_simple(self):
item = StateItem('2a66ba', timestamp='2019-03-17T23:50:16-07:00')
csv = [
'change,benchmark,story,name,unit,mean,job_id\n',
'2a66ba,loading,story1,Total:duration,ms_smallerIsBetter,300.0,job1\n',
'2a66ba,loading,story2,Total:duration,ms_smallerIsBetter,400.0,job1\n',
'2a66ba+patch,loading,story1,Total:duration,ms_smallerIsBetter,100.0,' +
'job1\n',
'2a66ba+patch,loading,story2,Total:duration,ms_smallerIsBetter,200.0,' +
'job1\n',
'2a66ba,loading,story1,Other:metric,count_smallerIsBetter,1.0,job1\n'
]
expected_results = [
('without_patch', 0.35, '2018-03-17T12:00:00'),
('with_patch', 0.15, '2019-03-17T12:00:00'),
]
filename = pinboard.RevisionResultsFile(item)
with open(filename, 'w') as f:
f.writelines(csv)
with mock.patch('cli_tools.pinboard.pinboard.ACTIVE_STORIES',
new=['story1', 'story2']):
df = pinboard.GetRevisionResults(item)
self.assertEqual(len(df.index), 2) # Only two rows of output.
self.assertTrue((df['revision'] == '2a66ba').all())
self.assertTrue((df['benchmark'] == 'loading').all())
self.assertTrue((df['name'] == 'Total:duration').all())
self.assertTrue((df['count'] == 2).all())
df = df.set_index('label', verify_integrity=True)
for label, value, timestamp in expected_results:
self.assertEqual(df.loc[label, 'mean'], value)
self.assertEqual(df.loc[label, 'timestamp'], pd.Timestamp(timestamp))
def testGetRevisionResults_empty(self):
item = StateItem('2a66ba', timestamp='2019-03-17T23:50:16-07:00')
csv = [
'change,benchmark,story,name,unit,mean,job_id\n',
'2a66ba,loading,story1,Other:metric,count_smallerIsBetter,1.0,job1\n'
]
filename = pinboard.RevisionResultsFile(item)
with open(filename, 'w') as f:
f.writelines(csv)
df = pinboard.GetRevisionResults(item)
self.assertEqual(len(df.index), 1) # Only one row of output.
row = df.iloc[0]
self.assertEqual(row['revision'], '2a66ba')
self.assertEqual(row['count'], 0)
@mock.patch('cli_tools.pinboard.pinboard.FindCommit')
def testGetLastCommitOfDate_simple(self, find_commit):
commit_before = ('2a66bac4', '2019-03-17T23:50:16-07:00')
commit_after = ('5aefdb31', '2019-03-18T02:41:58-07:00')
find_commit.side_effect = [commit_after, commit_before]
date = pd.Timestamp('2019-03-17 04:01:01', tz=pinboard.TZ)
return_value = pinboard.GetLastCommitOfDate(date)
cutoff_date = pd.Timestamp('2019-03-18 00:00:00', tz=pinboard.TZ)
find_commit.assert_has_calls([
mock.call(after_date=cutoff_date),
mock.call(before_date=cutoff_date)])
self.assertEqual(return_value, commit_before)
@mock.patch('cli_tools.pinboard.pinboard.FindCommit')
def testGetLastCommitOfDate_failed(self, find_commit):
commit_before = ('2a66bac4', '2019-03-17T23:50:16-07:00')
find_commit.side_effect = [None, commit_before]
date = pd.Timestamp('2019-03-17 04:01:01', tz=pinboard.TZ)
with self.assertRaises(ValueError):
pinboard.GetLastCommitOfDate(date)
cutoff_date = pd.Timestamp('2019-03-18 00:00:00', tz=pinboard.TZ)
find_commit.assert_has_calls([
mock.call(after_date=cutoff_date)])
def testFindCommit_simple(self):
self.subprocess.check_output.return_value = '2a66bac4:1552891816\n'
date = pd.Timestamp('2019-03-18T00:00:00', tz=pinboard.TZ)
revision, timestamp = pinboard.FindCommit(before_date=date)
self.subprocess.check_output.assert_called_once_with(
['git', 'log', '--max-count', '1', '--format=format:%H:%ct',
'--before', '2019-03-18T00:00:00-07:00', 'origin/master'],
cwd=pinboard.TOOLS_PERF_DIR)
self.assertEqual(revision, '2a66bac4')
self.assertEqual(timestamp, '2019-03-17T23:50:16-07:00')
def testFindCommit_notFound(self):
self.subprocess.check_output.return_value = ''
date = pd.Timestamp('2019-03-18T00:00:00', tz=pinboard.TZ)
return_value = pinboard.FindCommit(after_date=date)
self.subprocess.check_output.assert_called_once_with(
['git', 'log', '--max-count', '1', '--format=format:%H:%ct',
'--after', '2019-03-18T00:00:00-07:00', 'origin/master'],
cwd=pinboard.TOOLS_PERF_DIR)
self.assertIsNone(return_value)
|
{
"content_hash": "0782d585267ee4a0695e63434ed5b38f",
"timestamp": "",
"source": "github",
"line_count": 322,
"max_line_length": 80,
"avg_line_length": 39.52173913043478,
"alnum_prop": 0.6471790036146472,
"repo_name": "endlessm/chromium-browser",
"id": "65481e48933f5ff57488f607276bab5902836049",
"size": "12889",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/perf/cli_tools/pinboard/pinboard_unittest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "user_tags.tests.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
{
"content_hash": "ff75b0f94cb2007af21cb000ed18ec70",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 79,
"avg_line_length": 26.22222222222222,
"alnum_prop": 0.711864406779661,
"repo_name": "bitmazk/django-user-tags",
"id": "e3ac378eef38ef58b8fd7de70798c09700f4a896",
"size": "258",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "15877"
},
{
"name": "Python",
"bytes": "49944"
}
],
"symlink_target": ""
}
|
"""The Sonarr component."""
from __future__ import annotations
from datetime import timedelta
from sonarr import Sonarr, SonarrAccessRestricted, SonarrError
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONF_API_KEY,
CONF_HOST,
CONF_PORT,
CONF_SSL,
CONF_VERIFY_SSL,
)
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryAuthFailed, ConfigEntryNotReady
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from .const import (
CONF_BASE_PATH,
CONF_UPCOMING_DAYS,
CONF_WANTED_MAX_ITEMS,
DATA_SONARR,
DEFAULT_UPCOMING_DAYS,
DEFAULT_WANTED_MAX_ITEMS,
DOMAIN,
)
PLATFORMS = ["sensor"]
SCAN_INTERVAL = timedelta(seconds=30)
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up Sonarr from a config entry."""
if not entry.options:
options = {
CONF_UPCOMING_DAYS: entry.data.get(
CONF_UPCOMING_DAYS, DEFAULT_UPCOMING_DAYS
),
CONF_WANTED_MAX_ITEMS: entry.data.get(
CONF_WANTED_MAX_ITEMS, DEFAULT_WANTED_MAX_ITEMS
),
}
hass.config_entries.async_update_entry(entry, options=options)
sonarr = Sonarr(
host=entry.data[CONF_HOST],
port=entry.data[CONF_PORT],
api_key=entry.data[CONF_API_KEY],
base_path=entry.data[CONF_BASE_PATH],
session=async_get_clientsession(hass),
tls=entry.data[CONF_SSL],
verify_ssl=entry.data[CONF_VERIFY_SSL],
)
try:
await sonarr.update()
except SonarrAccessRestricted as err:
raise ConfigEntryAuthFailed(
"API Key is no longer valid. Please reauthenticate"
) from err
except SonarrError as err:
raise ConfigEntryNotReady from err
entry.async_on_unload(entry.add_update_listener(_async_update_listener))
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN][entry.entry_id] = {
DATA_SONARR: sonarr,
}
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
async def _async_update_listener(hass: HomeAssistant, entry: ConfigEntry) -> None:
"""Handle options update."""
await hass.config_entries.async_reload(entry.entry_id)
|
{
"content_hash": "52f2e51bf5c26ee5d09e77e3e8879a10",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 82,
"avg_line_length": 28.944444444444443,
"alnum_prop": 0.6760076775431861,
"repo_name": "aronsky/home-assistant",
"id": "b2cc13abc37faf80cb51cdc8ea94984f01958f04",
"size": "2605",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/sonarr/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2443"
},
{
"name": "Python",
"bytes": "38448521"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
}
|
"""Image v1 API Library Tests"""
from requests_mock.contrib import fixture
from keystoneclient import session
from openstackclient.api import image_v1
from openstackclient.tests import utils
FAKE_PROJECT = 'xyzpdq'
FAKE_URL = 'http://gopher.com'
class TestImageAPIv1(utils.TestCase):
def setUp(self):
super(TestImageAPIv1, self).setUp()
sess = session.Session()
self.api = image_v1.APIv1(session=sess, endpoint=FAKE_URL)
self.requests_mock = self.useFixture(fixture.Fixture())
class TestImage(TestImageAPIv1):
PUB_PROT = {
'id': '1',
'name': 'pub1',
'is_public': True,
'protected': True,
}
PUB_NOPROT = {
'id': '2',
'name': 'pub2-noprot',
'is_public': True,
'protected': False,
}
NOPUB_PROT = {
'id': '3',
'name': 'priv3',
'is_public': False,
'protected': True,
}
NOPUB_NOPROT = {
'id': '4',
'name': 'priv4-noprot',
'is_public': False,
'protected': False,
}
LIST_IMAGE_RESP = [
PUB_PROT,
PUB_NOPROT,
NOPUB_PROT,
NOPUB_NOPROT,
]
def test_image_list_no_options(self):
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/v1/images',
json={'images': self.LIST_IMAGE_RESP},
status_code=200,
)
ret = self.api.image_list()
self.assertEqual(self.LIST_IMAGE_RESP, ret)
def test_image_list_public(self):
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/v1/images/detail',
json={'images': self.LIST_IMAGE_RESP},
status_code=200,
)
ret = self.api.image_list(public=True)
self.assertEqual([self.PUB_PROT, self.PUB_NOPROT], ret)
def test_image_list_private(self):
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/v1/images/detail',
json={'images': self.LIST_IMAGE_RESP},
status_code=200,
)
ret = self.api.image_list(private=True)
self.assertEqual([self.NOPUB_PROT, self.NOPUB_NOPROT], ret)
|
{
"content_hash": "bb0ec6acf036d2fe3c7d7a9588428c41",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 67,
"avg_line_length": 25.729411764705883,
"alnum_prop": 0.5486968449931413,
"repo_name": "sjsucohort6/openstack",
"id": "34fcfca4436e3b39f80162ff7ee3c6ab0f5e7fd7",
"size": "2753",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/venv/lib/python2.7/site-packages/openstackclient/tests/api/test_image_v1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "410"
},
{
"name": "CSS",
"bytes": "144982"
},
{
"name": "FreeMarker",
"bytes": "14104"
},
{
"name": "HTML",
"bytes": "8308"
},
{
"name": "Java",
"bytes": "243125"
},
{
"name": "JavaScript",
"bytes": "1493715"
},
{
"name": "Python",
"bytes": "16921939"
},
{
"name": "Shell",
"bytes": "13926"
}
],
"symlink_target": ""
}
|
from setuptools import setup, find_packages
setup(
name='django-misc-base',
version='1.1.1',
description='Base miscellaneous packages for django.',
long_description=open('README.md').read(),
author='BlackWizard',
author_email='BlackWizard@mail.ru',
url='http://github.com/BlackWizard/django-misc-base',
packages=find_packages(exclude=[]),
include_package_data=True,
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Django',
],
install_requires=[
'django-nginx-filter-image',
'unidecode',
'django-tinymce',
'django-positions',
],
zip_safe=False,
)
|
{
"content_hash": "f427e2ac9f30def4f936bb7313228b2f",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 58,
"avg_line_length": 30.214285714285715,
"alnum_prop": 0.6205673758865248,
"repo_name": "BlackWizard/django-misc-base",
"id": "ddf7b9342907179df13feabeea97658e16975ede",
"size": "846",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "12343"
}
],
"symlink_target": ""
}
|
"""A simple handler to serve static assets."""
import logging
import mimetypes
import os
import os.path
import threading
import google
import webapp2
ASSETS_PATH = os.path.join(os.path.dirname(__file__), 'assets')
class StaticFileHandler(webapp2.RequestHandler):
"""A request handler for returning static files."""
_asset_name_to_path = None
_asset_name_to_path_lock = threading.Lock()
@classmethod
def _initialize_asset_map(cls):
# Generating a list of acceptable asset files reduces the possibility of
# path attacks.
cls._asset_name_to_path = {}
assets = os.listdir(ASSETS_PATH)
for asset in assets:
path = os.path.join(ASSETS_PATH, asset)
if os.path.isfile(path):
cls._asset_name_to_path[os.path.basename(path)] = path
def get(self, asset_name):
"""Serve out the contents of a file to self.response.
Args:
asset_name: The name of the static asset to serve. Must be in ASSETS_PATH.
"""
with self._asset_name_to_path_lock:
if self._asset_name_to_path is None:
self._initialize_asset_map()
if asset_name in self._asset_name_to_path:
asset_path = self._asset_name_to_path[asset_name]
try:
with open(asset_path, 'rb') as f:
data = f.read()
except (OSError, IOError):
logging.exception('Error reading file %s', asset_path)
self.response.set_status(500)
else:
content_type, _ = mimetypes.guess_type(asset_path)
assert content_type, (
'cannot determine content-type for %r' % asset_path
)
self.response.headers['Content-Type'] = content_type
self.response.out.write(data)
else:
self.response.set_status(404)
|
{
"content_hash": "235be8574fd360b7abbb9a4ad4f3ed4e",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 80,
"avg_line_length": 28.34426229508197,
"alnum_prop": 0.6500867553499132,
"repo_name": "ychen820/microblog",
"id": "0d25b2f65e484fe64d2886a202c2a4eff8967a9e",
"size": "2330",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "y/google-cloud-sdk/platform/google_appengine/google/appengine/tools/devappserver2/admin/static_file_handler.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "414229"
},
{
"name": "CSS",
"bytes": "257787"
},
{
"name": "Emacs Lisp",
"bytes": "4733"
},
{
"name": "Groff",
"bytes": "1236200"
},
{
"name": "HTML",
"bytes": "2617468"
},
{
"name": "JavaScript",
"bytes": "1106437"
},
{
"name": "Makefile",
"bytes": "15714"
},
{
"name": "Objective-C",
"bytes": "26302"
},
{
"name": "PHP",
"bytes": "2511443"
},
{
"name": "Perl",
"bytes": "1109010"
},
{
"name": "Python",
"bytes": "71588489"
},
{
"name": "R",
"bytes": "548"
},
{
"name": "Shell",
"bytes": "49796"
},
{
"name": "TeX",
"bytes": "3149"
},
{
"name": "VimL",
"bytes": "5645"
}
],
"symlink_target": ""
}
|
import decimal
import numpy as np
import pandas as pd
import pytest
import ibis
import ibis.common.exceptions as com
import ibis.util as util
from ibis import literal as L
@pytest.mark.parametrize(
('expr', 'expected'),
[
(ibis.NA.fillna(5), 5),
(L(5).fillna(10), 5),
(L(5).nullif(5), None),
(L(10).nullif(5), 10),
],
)
@pytest.mark.xfail_unsupported
def test_fillna_nullif(backend, con, expr, expected):
if expected is None:
# The exact kind of null value used differs per backend (and version).
# Example 1: Pandas returns np.nan while BigQuery returns None.
# Example 2: PySpark returns np.nan if pyspark==3.0.0, but returns None
# if pyspark <=3.0.0.
# TODO: Make this behavior consistent (#2365)
assert pd.isna(con.execute(expr))
else:
assert con.execute(expr) == expected
@pytest.mark.only_on_backends(['pandas', 'dask', 'pyspark'])
def test_isna(backend, alltypes):
table = alltypes.mutate(na_col=np.nan)
table = table.mutate(none_col=None)
table = table.mutate(none_col=table['none_col'].cast('float64'))
table_pandas = table.execute()
for col in ['na_col', 'none_col']:
result = table[table[col].isnan()].execute().reset_index(drop=True)
expected = table_pandas[table_pandas[col].isna()].reset_index(
drop=True
)
backend.assert_frame_equal(result, expected)
@pytest.mark.only_on_backends(['pandas', 'dask', 'pyspark'])
def test_fillna(backend, alltypes):
table = alltypes.mutate(na_col=np.nan)
table = table.mutate(none_col=None)
table = table.mutate(none_col=table['none_col'].cast('float64'))
table_pandas = table.execute()
for col in ['na_col', 'none_col']:
result = (
table.mutate(filled=table[col].fillna(0.0))
.execute()
.reset_index(drop=True)
)
expected = table_pandas.assign(
filled=table_pandas[col].fillna(0.0)
).reset_index(drop=True)
backend.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
('expr', 'expected'),
[
(ibis.coalesce(5, None, 4), 5),
(ibis.coalesce(ibis.NA, 4, ibis.NA), 4),
(ibis.coalesce(ibis.NA, ibis.NA, 3.14), 3.14),
],
)
@pytest.mark.xfail_unsupported
def test_coalesce(backend, con, expr, expected):
result = con.execute(expr)
if isinstance(result, decimal.Decimal):
# in case of Impala the result is decimal
# >>> decimal.Decimal('5.56') == 5.56
# False
assert result == decimal.Decimal(str(expected))
else:
assert result == expected
@pytest.mark.skip_backends(['dask']) # TODO - identicalTo - #2553
@pytest.mark.xfail_unsupported
def test_identical_to(backend, alltypes, con, sorted_df):
sorted_alltypes = alltypes.sort_by('id')
df = sorted_df
dt = df[['tinyint_col', 'double_col']]
ident = sorted_alltypes.tinyint_col.identical_to(
sorted_alltypes.double_col
)
expr = sorted_alltypes['id', ident.name('tmp')].sort_by('id')
result = expr.execute().tmp
expected = (dt.tinyint_col.isnull() & dt.double_col.isnull()) | (
dt.tinyint_col == dt.double_col
)
expected = backend.default_series_rename(expected)
backend.assert_series_equal(result, expected)
@pytest.mark.parametrize(
('column', 'elements'),
[
('int_col', [1, 2, 3]),
('int_col', (1, 2, 3)),
('string_col', ['1', '2', '3']),
('string_col', ('1', '2', '3')),
('int_col', {1}),
('int_col', frozenset({1})),
],
)
@pytest.mark.xfail_unsupported
def test_isin(backend, alltypes, sorted_df, column, elements):
sorted_alltypes = alltypes.sort_by('id')
expr = sorted_alltypes[
'id', sorted_alltypes[column].isin(elements).name('tmp')
].sort_by('id')
result = expr.execute().tmp
expected = sorted_df[column].isin(elements)
expected = backend.default_series_rename(expected)
backend.assert_series_equal(result, expected)
@pytest.mark.parametrize(
('column', 'elements'),
[
('int_col', [1, 2, 3]),
('int_col', (1, 2, 3)),
('string_col', ['1', '2', '3']),
('string_col', ('1', '2', '3')),
('int_col', {1}),
('int_col', frozenset({1})),
],
)
@pytest.mark.xfail_unsupported
def test_notin(backend, alltypes, sorted_df, column, elements):
sorted_alltypes = alltypes.sort_by('id')
expr = sorted_alltypes[
'id', sorted_alltypes[column].notin(elements).name('tmp')
].sort_by('id')
result = expr.execute().tmp
expected = ~sorted_df[column].isin(elements)
expected = backend.default_series_rename(expected)
backend.assert_series_equal(result, expected)
@pytest.mark.parametrize(
('predicate_fn', 'expected_fn'),
[
(lambda t: t['bool_col'], lambda df: df['bool_col']),
(lambda t: ~t['bool_col'], lambda df: ~df['bool_col']),
],
)
@pytest.mark.skip_backends(['dask']) # TODO - sorting - #2553
@pytest.mark.xfail_unsupported
def test_filter(backend, alltypes, sorted_df, predicate_fn, expected_fn):
sorted_alltypes = alltypes.sort_by('id')
table = sorted_alltypes[predicate_fn(sorted_alltypes)].sort_by('id')
result = table.execute()
expected = sorted_df[expected_fn(sorted_df)]
backend.assert_frame_equal(result, expected)
@pytest.mark.only_on_backends(['dask', 'pandas', 'pyspark'])
@pytest.mark.xfail_unsupported
def test_filter_with_window_op(backend, alltypes, sorted_df):
sorted_alltypes = alltypes.sort_by('id')
table = sorted_alltypes
window = ibis.window(group_by=table.id)
table = table.filter(lambda t: t['id'].mean().over(window) > 3).sort_by(
'id'
)
result = table.execute()
expected = (
sorted_df.groupby(['id'])
.filter(lambda t: t['id'].mean() > 3)
.reset_index(drop=True)
)
backend.assert_frame_equal(result, expected)
@pytest.mark.xfail_unsupported
def test_case_where(backend, alltypes, df):
table = alltypes
table = table.mutate(
new_col=(
ibis.case()
.when(table['int_col'] == 1, 20)
.when(table['int_col'] == 0, 10)
.else_(0)
.end()
.cast('int64')
)
)
result = table.execute()
expected = df.copy()
mask_0 = expected['int_col'] == 1
mask_1 = expected['int_col'] == 0
expected['new_col'] = 0
expected.loc[mask_0, 'new_col'] = 20
expected.loc[mask_1, 'new_col'] = 10
expected['new_col'] = expected['new_col']
backend.assert_frame_equal(result, expected)
# Pr 2635
@pytest.mark.xfail_unsupported
@pytest.mark.skip_backends(['postgres'])
def test_select_filter_mutate(backend, alltypes, df):
"""Test that select, filter and mutate are executed in right order.
Before Pr 2635, try_fusion in analysis.py would fuse these operations
together in a way that the order of the operations were wrong. (mutate
was executed before filter).
"""
t = alltypes
# Prepare the float_col so that filter must execute
# before the cast to get the correct result.
t = t.mutate(
float_col=ibis.case()
.when(t['bool_col'], t['float_col'])
.else_(np.nan)
.end()
)
# Actual test
t = t[t.columns]
t = t[~t['float_col'].isnan()]
t = t.mutate(float_col=t['float_col'].cast('int32'))
result = t.execute()
expected = df.copy()
expected.loc[~df['bool_col'], 'float_col'] = None
expected = expected[~expected['float_col'].isna()]
expected = expected.assign(float_col=expected['float_col'].astype('int32'))
backend.assert_frame_equal(result, expected)
def test_fillna_invalid(alltypes):
with pytest.raises(
com.IbisTypeError, match=r"value \['invalid_col'\] is not a field in.*"
):
alltypes.fillna({'invalid_col': 0.0})
def test_dropna_invalid(alltypes):
with pytest.raises(
com.IbisTypeError, match=r"value 'invalid_col' is not a field in.*"
):
alltypes.dropna(subset=['invalid_col'])
with pytest.raises(ValueError, match=r".*is not in.*"):
alltypes.dropna(how='invalid')
@pytest.mark.parametrize(
'replacements',
[
0.0,
0,
1,
({'na_col': 0.0}),
({'na_col': 1}),
({'none_col': 0.0}),
({'none_col': 1}),
],
)
@pytest.mark.only_on_backends(['pandas', 'dask', 'pyspark'])
def test_fillna_table(backend, alltypes, replacements):
table = alltypes.mutate(na_col=np.nan)
table = table.mutate(none_col=None)
table = table.mutate(none_col=table['none_col'].cast('float64'))
table_pandas = table.execute()
result = table.fillna(replacements).execute().reset_index(drop=True)
expected = table_pandas.fillna(replacements).reset_index(drop=True)
# check_dtype is False here because there are dtype diffs between
# Pyspark and Pandas on Java 8 - filling the 'none_col' with an int
# results in float in Pyspark, and int in Pandas. This diff does
# not exist in Java 11.
backend.assert_frame_equal(result, expected, check_dtype=False)
@pytest.mark.parametrize(
('how', 'subset'),
[
('any', None),
('any', []),
('any', ['int_col', 'na_col']),
('all', None),
('all', ['int_col', 'na_col']),
('all', 'none_col'),
],
)
@pytest.mark.only_on_backends(['pandas', 'dask', 'pyspark'])
def test_dropna_table(backend, alltypes, how, subset):
table = alltypes.mutate(na_col=np.nan)
table = table.mutate(none_col=None)
table = table.mutate(none_col=table['none_col'].cast('float64'))
table_pandas = table.execute()
result = table.dropna(subset, how).execute().reset_index(drop=True)
subset = util.promote_list(subset) if subset else table_pandas.columns
expected = table_pandas.dropna(how=how, subset=subset).reset_index(
drop=True
)
# check_dtype is False here because there are dtype diffs between
# Pyspark and Pandas on Java 8 - the 'bool_col' of an empty DataFrame
# is type object in Pyspark, and type bool in Pandas. This diff does
# not exist in Java 11.
backend.assert_frame_equal(result, expected, check_dtype=False)
|
{
"content_hash": "112f49477da4ebbca836e36323bb234b",
"timestamp": "",
"source": "github",
"line_count": 335,
"max_line_length": 79,
"avg_line_length": 30.785074626865672,
"alnum_prop": 0.6125278774362455,
"repo_name": "cloudera/ibis",
"id": "901ac4fd2613b71aa5ace25c6330f56e65af0d15",
"size": "10313",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ibis/backends/tests/test_generic.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "44943"
},
{
"name": "CMake",
"bytes": "4383"
},
{
"name": "Python",
"bytes": "2570944"
},
{
"name": "Shell",
"bytes": "1989"
}
],
"symlink_target": ""
}
|
"""
reader
======
A minimal feed reader.
Usage
-----
Here is small example of using reader.
Create a Reader object::
reader = make_reader('db.sqlite')
Add a feed::
reader.add_feed('http://www.hellointernet.fm/podcast?format=rss')
Update all the feeds::
reader.update_feeds()
Get all the entries, both read and unread::
entries = list(reader.get_entries())
Mark the first entry as read::
reader.mark_as_read(entries[0])
Print the titles of the unread entries::
for e in reader.get_entries(read=False):
print(e.title)
"""
__version__ = '0.17'
from .core.reader import Reader, make_reader
from .core.types import Feed, Entry, Content, Enclosure
from .core.exceptions import (
ReaderError,
FeedError,
FeedExistsError,
FeedNotFoundError,
ParseError,
EntryError,
EntryNotFoundError,
MetadataError,
MetadataNotFoundError,
StorageError,
)
# For internal use only.
_DB_ENVVAR = 'READER_DB'
_PLUGIN_ENVVAR = 'READER_PLUGIN'
|
{
"content_hash": "d37d1342664d81d82f270b46eb75652d",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 69,
"avg_line_length": 15.796875,
"alnum_prop": 0.6775469831849654,
"repo_name": "kushalbhola/MyStuff",
"id": "0d1004df588bf15c122a18065963fe2b1e30ad29",
"size": "1011",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Practice/PythonApplication/env/Lib/site-packages/reader/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1330"
},
{
"name": "C#",
"bytes": "332967"
},
{
"name": "CSS",
"bytes": "1451"
},
{
"name": "HTML",
"bytes": "7539"
},
{
"name": "Java",
"bytes": "14860"
},
{
"name": "JavaScript",
"bytes": "9843"
},
{
"name": "Jupyter Notebook",
"bytes": "374013"
},
{
"name": "PowerShell",
"bytes": "1448"
},
{
"name": "Python",
"bytes": "6511820"
},
{
"name": "Tcl",
"bytes": "24289"
},
{
"name": "TypeScript",
"bytes": "15697"
}
],
"symlink_target": ""
}
|
import ez_setup
ez_setup.use_setuptools()
from setuptools import setup, find_packages
setup(
name = "django-sort",
version = "0.1",
packages = ["sorting"],
py_modules = ['setup', 'ez_setup'],
author = "Agiliq and friends",
author_email ="shabda@agiliq.com",
description = "Sort arbitrary querysets in templates.",
url = "http://github.com/agiliq/django-sorting",
include_package_data = True
)
|
{
"content_hash": "4b8c790f4a93d8008db9dabce592f7f7",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 59,
"avg_line_length": 30.714285714285715,
"alnum_prop": 0.6581395348837209,
"repo_name": "agiliq/django-sorting",
"id": "7f79e9825f772eabb756e0a700168dd9a2f5a78b",
"size": "430",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "23017"
}
],
"symlink_target": ""
}
|
import numpy as np
from PIL import Image
from wordcloud import WordCloud
def _load_mask(mask_fname):
return np.asarray(Image.open(mask_fname))
def generate_wordcloud(text, bgcolor, width, height, max_words, mask):
if mask is not None:
mask = _load_mask(mask)
wc = WordCloud(relative_scaling=.5, width=width, height=height,
background_color=bgcolor, mask=mask,
max_words=max_words)
return wc.generate_from_text(text)
|
{
"content_hash": "b152a3fde779c144bdde132c5f20a8d0",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 70,
"avg_line_length": 30.125,
"alnum_prop": 0.6680497925311203,
"repo_name": "neocortex/wcloud",
"id": "585aab0a54bf984cc6091be7cade380fc7573e1c",
"size": "506",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wcloud/wcloud.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2268"
},
{
"name": "Python",
"bytes": "4274"
}
],
"symlink_target": ""
}
|
import time
import unittest
import node
LEADER = 1
ROUTER1 = 2
ED = 3
class Cert_6_2_1_NewPartition(unittest.TestCase):
def setUp(self):
self.nodes = {}
for i in range(1,4):
self.nodes[i] = node.Node(i)
self.nodes[LEADER].set_panid(0xface)
self.nodes[LEADER].set_mode('rsdn')
self.nodes[LEADER].add_whitelist(self.nodes[ROUTER1].get_addr64())
self.nodes[LEADER].enable_whitelist()
self.nodes[ROUTER1].set_panid(0xface)
self.nodes[ROUTER1].set_mode('rsdn')
self.nodes[ROUTER1].add_whitelist(self.nodes[LEADER].get_addr64())
self.nodes[ROUTER1].add_whitelist(self.nodes[ED].get_addr64())
self.nodes[ROUTER1].enable_whitelist()
self.nodes[ROUTER1].set_router_selection_jitter(1)
self.nodes[ED].set_panid(0xface)
self.nodes[ED].set_mode('rsn')
self.nodes[ED].add_whitelist(self.nodes[ROUTER1].get_addr64())
self.nodes[ED].enable_whitelist()
def tearDown(self):
for node in list(self.nodes.values()):
node.stop()
del self.nodes
def test(self):
self.nodes[LEADER].start()
self.nodes[LEADER].set_state('leader')
self.assertEqual(self.nodes[LEADER].get_state(), 'leader')
self.nodes[ROUTER1].start()
time.sleep(5)
self.assertEqual(self.nodes[ROUTER1].get_state(), 'router')
self.nodes[ED].start()
time.sleep(5)
self.assertEqual(self.nodes[ED].get_state(), 'child')
self.nodes[LEADER].stop()
time.sleep(140)
self.assertEqual(self.nodes[ROUTER1].get_state(), 'leader')
self.assertEqual(self.nodes[ED].get_state(), 'child')
addrs = self.nodes[ED].get_addrs()
for addr in addrs:
self.assertTrue(self.nodes[ROUTER1].ping(addr))
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "9f02fae990c6b3fb248095ba67895e16",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 74,
"avg_line_length": 30.934426229508198,
"alnum_prop": 0.6104928457869634,
"repo_name": "JakubBrachTieto/openthread",
"id": "fac073dc4e8ba051de645bcc0a95a0afd5b2df99",
"size": "3491",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/scripts/thread-cert/Cert_6_2_01_NewPartition.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "10128"
},
{
"name": "C",
"bytes": "550277"
},
{
"name": "C#",
"bytes": "18077"
},
{
"name": "C++",
"bytes": "3193522"
},
{
"name": "M4",
"bytes": "45778"
},
{
"name": "Makefile",
"bytes": "80751"
},
{
"name": "Python",
"bytes": "1018345"
},
{
"name": "Ruby",
"bytes": "3397"
},
{
"name": "Shell",
"bytes": "18076"
}
],
"symlink_target": ""
}
|
"""
Copyright (c) 2016 - Sean Bailey - All Rights Reserved
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Django Imports
from django.conf.urls import url
# Other Imports
from . import views
urlpatterns = [
url(r'^$', views.index, name="docs"),
url(r'^create$', views.create, name="docs_create"),
url(r'^edit/$', views.edit),
url(r'^edit/(?P<slug>[a-zA-Z0-9-]+)$', views.edit, name="edit"),
url(r'^(?P<category>[a-zA-Z0-9]+)/$', views.category, name="category"),
url(r'^(?P<category>[a-zA-Z0-9]+)/(?P<slug>[a-zA-Z0-9-]+)$', views.article, name="article"),
]
|
{
"content_hash": "9e3049fb80ae18f389a223f4efb1a772",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 96,
"avg_line_length": 35.6,
"alnum_prop": 0.6947565543071161,
"repo_name": "LuckehPickle/Comet",
"id": "bbac7647701cddedc3042774422af4056073acd9",
"size": "1068",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "docs/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "64820"
},
{
"name": "HTML",
"bytes": "40712"
},
{
"name": "JavaScript",
"bytes": "62122"
},
{
"name": "Python",
"bytes": "68027"
}
],
"symlink_target": ""
}
|
"""
Run regression on apartment data.
"""
from __future__ import print_function
import argparse
import pandas as pd
import numpy as np
import itertools
import matplotlib.pyplot as plt
def parse_args(*argument_array):
parser = argparse.ArgumentParser()
parser.add_argument('--csv',
help='CSV file with the apartment data.')
args = parser.parse_args(*argument_array)
return args
def featurize(apartment):
"""
:param apartment: Apartment DataFrame row (a dictionary like object)
:return: (x, y) tuple, where x is a numpy vector, and y is a number
"""
return [1], apartment['price']
def poly_featurize(apartment, degree=2):
"""
:param apartment: Apartment DataFrame row (a dictionary like object)
:return: (x, y) tuple, where x is a numpy vector, and y is a number
"""
x, y = featurize(apartment)
poly_x = # TODO: use itertools.product to get higher degree elements.
return poly_x, y
def fit_ridge_regression(X, Y, l=0.1):
"""
:param X: A numpy matrix, where each row is a data element (X)
:param Y: A numpy vector of responses for each of the rows (y)
:param l: ridge variable
:return: A vector containing the hyperplane equation (beta)
"""
D = X.shape[1] # dimension + 1
beta = np.zeroes(D) # FIXME: ridge regression formula.
return beta
def cross_validate(X, Y, fitter, folds=5):
"""
:param X: A numpy matrix, where each row is a data element (X)
:param Y: A numpy vector of responses for each of the rows (y)
:param fitter: A function that takes X, Y as parameters and returns beta
:param folds: number of cross validation folds (parts)
:return: list of corss-validation scores
"""
scores = []
# TODO: Divide X, Y into `folds` parts (e.g. 5)
for i in range(folds):
# TODO: train on the rest
# TODO: Add corresponding score to scores
pass
return scores
def my_featurize(apartment):
"""
This is the function we will use for scoring your implmentation.
:param apartment: apartment row
:return: (x, y) pair where x is feature vector, y is the response variable.
"""
return x, y
def my_beta():
"""
:return: beta_hat that you estimate.
"""
return np.zeroes(1)
def main(args):
df = pd.read_csv(args.csv)
# TODO: Convert `df` into features (X) and responses (Y) using featurize
beta = fit_ridge_regression(X, Y, l=0)
# TODO you should probably create another function to pass to `cross_validate`
scores = cross_validate(X, Y, fit_ridge_regression)
print(np.mean(scores))
if __name__ == '__main__':
args = parse_args()
main(args)
|
{
"content_hash": "aeda3fa59b17ab7c765822d46a38eb93",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 82,
"avg_line_length": 28.48421052631579,
"alnum_prop": 0.647450110864745,
"repo_name": "mamikonyana/mamikonyana.github.io",
"id": "fa74512c5fdb15135161cd0243d22110211f530e",
"size": "2729",
"binary": false,
"copies": "1",
"ref": "refs/heads/flask",
"path": "static/ml_afternoon/presentation_data/practical1/practical1_TEMPLATE.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "102"
},
{
"name": "HTML",
"bytes": "11586263"
},
{
"name": "Makefile",
"bytes": "384"
},
{
"name": "Python",
"bytes": "95088"
},
{
"name": "Shell",
"bytes": "1662"
},
{
"name": "Stan",
"bytes": "872"
}
],
"symlink_target": ""
}
|
"""Allows the creation of a sensor that breaks out state_attributes."""
import logging
from typing import Optional
import voluptuous as vol
from homeassistant.components.sensor import (
DEVICE_CLASSES_SCHEMA,
ENTITY_ID_FORMAT,
PLATFORM_SCHEMA,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_FRIENDLY_NAME,
ATTR_UNIT_OF_MEASUREMENT,
CONF_DEVICE_CLASS,
CONF_ENTITY_PICTURE_TEMPLATE,
CONF_FRIENDLY_NAME_TEMPLATE,
CONF_ICON_TEMPLATE,
CONF_SENSORS,
CONF_VALUE_TEMPLATE,
EVENT_HOMEASSISTANT_START,
MATCH_ALL,
)
from homeassistant.core import callback
from homeassistant.exceptions import TemplateError
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity, async_generate_entity_id
from homeassistant.helpers.event import async_track_state_change
from . import extract_entities, initialise_templates
from .const import CONF_AVAILABILITY_TEMPLATE
CONF_ATTRIBUTE_TEMPLATES = "attribute_templates"
_LOGGER = logging.getLogger(__name__)
SENSOR_SCHEMA = vol.Schema(
{
vol.Required(CONF_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_ICON_TEMPLATE): cv.template,
vol.Optional(CONF_ENTITY_PICTURE_TEMPLATE): cv.template,
vol.Optional(CONF_FRIENDLY_NAME_TEMPLATE): cv.template,
vol.Optional(CONF_AVAILABILITY_TEMPLATE): cv.template,
vol.Optional(CONF_ATTRIBUTE_TEMPLATES, default={}): vol.Schema(
{cv.string: cv.template}
),
vol.Optional(ATTR_FRIENDLY_NAME): cv.string,
vol.Optional(ATTR_UNIT_OF_MEASUREMENT): cv.string,
vol.Optional(CONF_DEVICE_CLASS): DEVICE_CLASSES_SCHEMA,
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Required(CONF_SENSORS): cv.schema_with_slug_keys(SENSOR_SCHEMA)}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the template sensors."""
sensors = []
for device, device_config in config[CONF_SENSORS].items():
state_template = device_config[CONF_VALUE_TEMPLATE]
icon_template = device_config.get(CONF_ICON_TEMPLATE)
entity_picture_template = device_config.get(CONF_ENTITY_PICTURE_TEMPLATE)
availability_template = device_config.get(CONF_AVAILABILITY_TEMPLATE)
friendly_name = device_config.get(ATTR_FRIENDLY_NAME, device)
friendly_name_template = device_config.get(CONF_FRIENDLY_NAME_TEMPLATE)
unit_of_measurement = device_config.get(ATTR_UNIT_OF_MEASUREMENT)
device_class = device_config.get(CONF_DEVICE_CLASS)
attribute_templates = device_config[CONF_ATTRIBUTE_TEMPLATES]
templates = {
CONF_VALUE_TEMPLATE: state_template,
CONF_ICON_TEMPLATE: icon_template,
CONF_ENTITY_PICTURE_TEMPLATE: entity_picture_template,
CONF_FRIENDLY_NAME_TEMPLATE: friendly_name_template,
CONF_AVAILABILITY_TEMPLATE: availability_template,
}
initialise_templates(hass, templates, attribute_templates)
entity_ids = extract_entities(
device,
"sensor",
device_config.get(ATTR_ENTITY_ID),
templates,
attribute_templates,
)
sensors.append(
SensorTemplate(
hass,
device,
friendly_name,
friendly_name_template,
unit_of_measurement,
state_template,
icon_template,
entity_picture_template,
availability_template,
entity_ids,
device_class,
attribute_templates,
)
)
async_add_entities(sensors)
return True
class SensorTemplate(Entity):
"""Representation of a Template Sensor."""
def __init__(
self,
hass,
device_id,
friendly_name,
friendly_name_template,
unit_of_measurement,
state_template,
icon_template,
entity_picture_template,
availability_template,
entity_ids,
device_class,
attribute_templates,
):
"""Initialize the sensor."""
self.hass = hass
self.entity_id = async_generate_entity_id(
ENTITY_ID_FORMAT, device_id, hass=hass
)
self._name = friendly_name
self._friendly_name_template = friendly_name_template
self._unit_of_measurement = unit_of_measurement
self._template = state_template
self._state = None
self._icon_template = icon_template
self._entity_picture_template = entity_picture_template
self._availability_template = availability_template
self._icon = None
self._entity_picture = None
self._entities = entity_ids
self._device_class = device_class
self._available = True
self._attribute_templates = attribute_templates
self._attributes = {}
async def async_added_to_hass(self):
"""Register callbacks."""
@callback
def template_sensor_state_listener(entity, old_state, new_state):
"""Handle device state changes."""
self.async_schedule_update_ha_state(True)
@callback
def template_sensor_startup(event):
"""Update template on startup."""
if self._entities != MATCH_ALL:
# Track state change only for valid templates
async_track_state_change(
self.hass, self._entities, template_sensor_state_listener
)
self.async_schedule_update_ha_state(True)
self.hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_START, template_sensor_startup
)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return self._icon
@property
def device_class(self) -> Optional[str]:
"""Return the device class of the sensor."""
return self._device_class
@property
def entity_picture(self):
"""Return the entity_picture to use in the frontend, if any."""
return self._entity_picture
@property
def unit_of_measurement(self):
"""Return the unit_of_measurement of the device."""
return self._unit_of_measurement
@property
def available(self) -> bool:
"""Return if the device is available."""
return self._available
@property
def device_state_attributes(self):
"""Return the state attributes."""
return self._attributes
@property
def should_poll(self):
"""No polling needed."""
return False
async def async_update(self):
"""Update the state from the template."""
try:
self._state = self._template.async_render()
self._available = True
except TemplateError as ex:
self._available = False
if ex.args and ex.args[0].startswith(
"UndefinedError: 'None' has no attribute"
):
# Common during HA startup - so just a warning
_LOGGER.warning(
"Could not render template %s, the state is unknown.", self._name
)
else:
self._state = None
_LOGGER.error("Could not render template %s: %s", self._name, ex)
attrs = {}
for key, value in self._attribute_templates.items():
try:
attrs[key] = value.async_render()
except TemplateError as err:
_LOGGER.error("Error rendering attribute %s: %s", key, err)
self._attributes = attrs
templates = {
"_icon": self._icon_template,
"_entity_picture": self._entity_picture_template,
"_name": self._friendly_name_template,
"_available": self._availability_template,
}
for property_name, template in templates.items():
if template is None:
continue
try:
value = template.async_render()
if property_name == "_available":
value = value.lower() == "true"
setattr(self, property_name, value)
except TemplateError as ex:
friendly_property_name = property_name[1:].replace("_", " ")
if ex.args and ex.args[0].startswith(
"UndefinedError: 'None' has no attribute"
):
# Common during HA startup - so just a warning
_LOGGER.warning(
"Could not render %s template %s, the state is unknown.",
friendly_property_name,
self._name,
)
continue
try:
setattr(self, property_name, getattr(super(), property_name))
except AttributeError:
_LOGGER.error(
"Could not render %s template %s: %s",
friendly_property_name,
self._name,
ex,
)
|
{
"content_hash": "ad254a2d69a3f4f02c01b359ad78bcd4",
"timestamp": "",
"source": "github",
"line_count": 285,
"max_line_length": 86,
"avg_line_length": 33.4280701754386,
"alnum_prop": 0.5847591056995907,
"repo_name": "postlund/home-assistant",
"id": "0ca5571515a573bb6c1df393a0a49861d434ffcb",
"size": "9527",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/template/sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "20215859"
},
{
"name": "Shell",
"bytes": "6663"
}
],
"symlink_target": ""
}
|
from oslo_log import log as logging
from sqlalchemy import MetaData, Table, Index
from sqlalchemy.engine import reflection
from nova.i18n import _LI
LOG = logging.getLogger(__name__)
INDEX_COLUMNS_1 = ['project_id']
INDEX_NAME_1 = 'instances_project_id_idx'
INDEX_COLUMNS_2 = ['updated_at', 'project_id']
INDEX_NAME_2 = 'instances_updated_at_project_id_idx'
TABLE_NAME = 'instances'
def _get_table_index(migrate_engine, table_name, index_columns):
inspector = reflection.Inspector.from_engine(migrate_engine)
for idx in inspector.get_indexes(table_name):
if idx['column_names'] == index_columns:
break
else:
idx = None
return idx
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
table = Table(TABLE_NAME, meta, autoload=True)
if _get_table_index(migrate_engine, TABLE_NAME, INDEX_COLUMNS_1):
LOG.info(_LI('Skipped adding %s because an equivalent index'
' already exists.'), INDEX_NAME_1)
else:
columns = [getattr(table.c, col_name) for col_name in INDEX_COLUMNS_1]
index = Index(INDEX_NAME_1, *columns)
index.create(migrate_engine)
if _get_table_index(migrate_engine, TABLE_NAME, INDEX_COLUMNS_2):
LOG.info(_LI('Skipped adding %s because an equivalent index'
' already exists.'), INDEX_NAME_2)
else:
columns = [getattr(table.c, col_name) for col_name in INDEX_COLUMNS_2]
index = Index(INDEX_NAME_2, *columns)
index.create(migrate_engine)
|
{
"content_hash": "b22e02f98cf2d1fb96dce54b213cffdd",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 78,
"avg_line_length": 33.71739130434783,
"alnum_prop": 0.6595744680851063,
"repo_name": "hanlind/nova",
"id": "b87fcf6984855cf5f79223ac0955edc756ed2bd2",
"size": "2124",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "nova/db/sqlalchemy/migrate_repo/versions/347_add_updated_at_index.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PHP",
"bytes": "3325"
},
{
"name": "Python",
"bytes": "18681206"
},
{
"name": "Shell",
"bytes": "32127"
},
{
"name": "Smarty",
"bytes": "306159"
}
],
"symlink_target": ""
}
|
class TaobaoPipeline(object):
def process_item(self, item, spider):
return item
|
{
"content_hash": "cf1b2b7ba2f6535db848f7a05872a8e0",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 41,
"avg_line_length": 30.666666666666668,
"alnum_prop": 0.6847826086956522,
"repo_name": "leopard7777777/taobao_deal_history",
"id": "ec6b09b74d3b646298c4b4c12130e4b55af0e870",
"size": "286",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "taobao/pipelines.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "8395"
}
],
"symlink_target": ""
}
|
"""MySQLdb - A DB API v2.0 compatible interface to MySQL.
This package is a wrapper around _mysql, which mostly implements the
MySQL C API.
connect() -- connects to server
See the C API specification and the MySQL documentation for more info
on other items.
For information on how MySQLdb handles type conversion, see the
MySQLdb.converters module.
"""
__revision__ = """$Revision$"""[11:-2]
from MySQLdb.release import __version__, version_info, __author__
import _mysql
if version_info != _mysql.version_info:
raise ImportError("this is MySQLdb version %s, but _mysql is version %r" %
(version_info, _mysql.version_info))
threadsafety = 1
apilevel = "2.0"
paramstyle = "format"
from _mysql import *
from MySQLdb.constants import FIELD_TYPE
from MySQLdb.times import Date, Time, Timestamp, \
DateFromTicks, TimeFromTicks, TimestampFromTicks
try:
frozenset
except NameError:
from sets import ImmutableSet as frozenset
class DBAPISet(frozenset):
"""A special type of set for which A == x is true if A is a
DBAPISet and x is a member of that set."""
def __eq__(self, other):
if isinstance(other, DBAPISet):
return not self.difference(other)
return other in self
STRING = DBAPISet([FIELD_TYPE.ENUM, FIELD_TYPE.STRING,
FIELD_TYPE.VAR_STRING])
BINARY = DBAPISet([FIELD_TYPE.BLOB, FIELD_TYPE.LONG_BLOB,
FIELD_TYPE.MEDIUM_BLOB, FIELD_TYPE.TINY_BLOB])
NUMBER = DBAPISet([FIELD_TYPE.DECIMAL, FIELD_TYPE.DOUBLE, FIELD_TYPE.FLOAT,
FIELD_TYPE.INT24, FIELD_TYPE.LONG, FIELD_TYPE.LONGLONG,
FIELD_TYPE.TINY, FIELD_TYPE.YEAR])
DATE = DBAPISet([FIELD_TYPE.DATE, FIELD_TYPE.NEWDATE])
TIME = DBAPISet([FIELD_TYPE.TIME])
TIMESTAMP = DBAPISet([FIELD_TYPE.TIMESTAMP, FIELD_TYPE.DATETIME])
DATETIME = TIMESTAMP
ROWID = DBAPISet()
def test_DBAPISet_set_equality():
assert STRING == STRING
def test_DBAPISet_set_inequality():
assert STRING != NUMBER
def test_DBAPISet_set_equality_membership():
assert FIELD_TYPE.VAR_STRING == STRING
def test_DBAPISet_set_inequality_membership():
assert FIELD_TYPE.DATE != STRING
def Binary(x):
return bytes(x)
def Connect(*args, **kwargs):
"""Factory function for connections.Connection."""
from MySQLdb.connections import Connection
return Connection(*args, **kwargs)
connect = Connection = Connect
__all__ = [ 'BINARY', 'Binary', 'Connect', 'Connection', 'DATE',
'Date', 'Time', 'Timestamp', 'DateFromTicks', 'TimeFromTicks',
'TimestampFromTicks', 'DataError', 'DatabaseError', 'Error',
'FIELD_TYPE', 'IntegrityError', 'InterfaceError', 'InternalError',
'MySQLError', 'NULL', 'NUMBER', 'NotSupportedError', 'DBAPISet',
'OperationalError', 'ProgrammingError', 'ROWID', 'STRING', 'TIME',
'TIMESTAMP', 'Warning', 'apilevel', 'connect', 'connections',
'constants', 'converters', 'cursors', 'debug', 'escape', 'escape_dict',
'escape_sequence', 'escape_string', 'get_client_info',
'paramstyle', 'string_literal', 'threadsafety', 'version_info']
|
{
"content_hash": "c7faeac21f64ae9bb1ac415ed0a0daec",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 78,
"avg_line_length": 31.96938775510204,
"alnum_prop": 0.6769869135014364,
"repo_name": "isabernardes/Heriga",
"id": "fc41481001dbab3b46acda7ef5170a84461ca612",
"size": "3133",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "Herigaenv/lib/python2.7/site-packages/MySQLdb/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "662999"
},
{
"name": "HTML",
"bytes": "116009"
},
{
"name": "JavaScript",
"bytes": "848298"
},
{
"name": "Python",
"bytes": "5703559"
},
{
"name": "Shell",
"bytes": "3711"
}
],
"symlink_target": ""
}
|
from app.api import api
from flask import abort, jsonify
from flask.ext.cors import cross_origin
from urllib.parse import unquote
# Models
from app.models.writer import Writer
@cross_origin()
@api.route('/writers', methods=['GET'])
def get_writer_names():
"""
Return all writer names existing in the database
:return: JSON with all writer names
"""
# Get all movies from DB
writers = Writer.query.all()
# Store writer names in an array
writers_names = []
for writer in writers:
writers_names.append(writer.name)
# return writers names in a JSON array
return jsonify(names=writers_names)
@cross_origin()
@api.route('/writers/<name>', methods=['GET'])
def get_writer(name):
"""
Return information about the writer
:param name of the writer (URL encoded)
:return: JSON with writer information
"""
# Get the writer in the Database
writer = Writer.query.filter(Writer.name == unquote(name)).first()
# If the writer doesn't exist error 404
if not writer:
return abort(404)
# return writer information in a JSON array
return jsonify(writer=writer.get_information())
@cross_origin()
@api.route('/writers/<name>/movies', methods=['GET'])
def get_writer_movies(name):
"""
Return the list all writer's movies
:param name of the writer (URL encoded)
:return: JSON with movies information
"""
# Get the writer in the Database (Names are unique)
writer = Writer.query.filter(Writer.name == unquote(name)).first()
# If the writer doesn't exist error 404
if not writer:
return abort(404)
# Store writer's movies in an array
movies = []
for movie in writer.movies:
movies.append(movie.get_information())
# return movies in a JSON array
return jsonify(movies=movies)
@cross_origin()
@api.route('/writers/<name>/locations', methods=['GET'])
def get_writer_locations(name):
"""
Return the list of all locations linked to a writer
:param name of the writer (URL encoded)
:return: JSON with locations
"""
# Get the writer in the Database
writer = Writer.query.filter(Writer.name == unquote(name)).first()
# If the writer doesn't exist error 404
if not writer:
return abort(404)
# Store the locations in an array
locations = []
for movie in writer.movies:
for location in movie.locations:
locations.append(location.get_information())
# return locations in a JSON array
return jsonify(locations=locations)
|
{
"content_hash": "f72a076299c14f66edb6a652d691f5de",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 70,
"avg_line_length": 27.526881720430108,
"alnum_prop": 0.66875,
"repo_name": "boltzj/movies-in-sf",
"id": "c72bbef26b3b7e9d0e038cea60d0bb0e5b3186ff",
"size": "2567",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/api/writers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "28684"
}
],
"symlink_target": ""
}
|
'''
Copyright 2017 Fabio Lima and Filipe CN
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
from Entity import Entity
class Problem(Entity):
def __init__(self, contest_id, index, name, description, url):
Entity.__init__(self, str(contest_id) + index, name)
self.description = description
self.url = url
self.contest = None
self.test = []
self.index = index
self.contest_id = contest_id
def add_contest(self, contest):
self.contest_id = contest
def add_test(self, test):
self.test.append(test)
|
{
"content_hash": "22a187e7a13e671a1e85d8ceeb0b59cc",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 79,
"avg_line_length": 38.1219512195122,
"alnum_prop": 0.7370441458733206,
"repo_name": "limafabio/lazycf",
"id": "1a32bb728a0abdce6357666129a1a181284d3a20",
"size": "1577",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sample/Problem.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13163"
}
],
"symlink_target": ""
}
|
import tool_utils as tu
from PyQt4.QtGui import *
from PyQt4.QtCore import *
#class EmptyState(tu.StateBase):
#def __getstate__(self):
# state = tu.StateBase.__getstate__(self)
# my_state = [self.temporary]
# return {'parent_state': state, 'self': my_state}
#def __setstate__(self, state):
# tu.StateBase.__setstate__(self, state['parent_state'])
# self.temporary = state['self'][0]
class OutcomeTool(tu.ToolBase):
def __init__(self, button, rcommander):
tu.ToolBase.__init__(self, rcommander, tu.EmptyState.TOOL_NAME, 'Add Outcome', tu.EmptyState)
self.button = button
self.rcommander.connect(self.button, SIGNAL('clicked()'), self.activate_cb)
def activate_cb(self, loaded_node_name=None):
tu.ToolBase.activate_cb(self, loaded_node_name)
self.outcome_mode()
self.rcommander.ui.add_button.setDisabled(False)
def outcome_mode(self):
cidx = self.rcommander.ui.node_settings_tabs.indexOf(self.rcommander.ui.connections_tab)
self.rcommander.ui.node_settings_tabs.setCurrentIndex(cidx)
self.rcommander.ui.run_button.setDisabled(True)
self.rcommander.ui.reset_button.setDisabled(True)
def new_node(self, name=None):
if name == None:
nname = self.name + str(self.counter)
else:
nname = name
state = tu.EmptyState(nname, False)
return state
def set_node_properties(self, node):
self.rcommander.disable_buttons()
self.outcome_mode()
#Don't have any properties
def fill_property_box(self, pbox):
return
#Can't reset
def reset(self):
return
#Can't save
def save(self):
return
#NavigateState setting state {'self': ['navigate1', [0.0, 0.0], 0.0, 'map'],
# 'simple_state': ['navigate1', 'navigate',
# {'preempted': PyQt4.QtCore.QString(u'preempted'), 'aborted': PyQt4.QtCore.QString(u'aborted'), 'succeeded': PyQt4.QtCore.QString(u'succeeded')}]}
#
|
{
"content_hash": "daae0da7a18c20598f6bbb51baad6d07",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 179,
"avg_line_length": 33.564516129032256,
"alnum_prop": 0.6160499759730899,
"repo_name": "gt-ros-pkg/rcommander-core",
"id": "7b9b33302b76b8041311b30ee0eb2018a2d9c295",
"size": "2081",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rcommander/src/rcommander/outcome_tool.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "6250"
},
{
"name": "Python",
"bytes": "3117267"
},
{
"name": "R",
"bytes": "13731"
}
],
"symlink_target": ""
}
|
__author__ = 'pascal'
import mysql.connector
from datetime import datetime
import json
class LSFEventDBAccess:
def __init__(self):
credentials = json.load(open('db_credentials_MYSQL.json', 'r'))
self.cnx = mysql.connector.connect(**credentials)
def add_events(self, events):
cursor = self.cnx.cursor()
chunks = self.events_to_chunks(events,1000)
for chunk in chunks:
add_event = ('INSERT INTO events '
'(id, begin, end, title, event_link, campus, building, room, room_link, student_group, lecturer)'
'VALUES (%s, %s, %s,%s, %s, %s, %s, %s, %s, %s, %s)')
success = False
while not success:
try:
cursor.executemany(add_event, chunk)
success = True
except Exception as e:
print(e)
self.cnx.commit()
def add_event(self, event):
cursor = self.cnx.cursor()
add_event = ('INSERT INTO events '
'(id, begin, end, title, event_link, campus, building, room, room_link, student_group, lecturer)'
'VALUES (%s, %s, %s,%s, %s, %s, %s, %s, %s, %s, %s)')
event_data = (event.id, datetime.now(), datetime.now(), event.title, event.event_link, event.campus, event.building, event.room, event.room_link, event.student_group, event.lecturer)
cursor.execute(add_event, event_data)
self.cnx.commit()
# doesn't work right now
# =======================
# def initialize_db(self):
# cursor = self.cnx.cursor()
# query = ''
# with open('RoomDBInit_MYSQL.db', 'r') as f:
# query = f.read()
# cursor.execute(query, multi = True)
# self.cnx.commit()
# resets all data + resets auto_increment (although it isn't used here)
def events_to_chunks(self, events, size):
chunks = []
arr = []
s = size
for i in range(len(events)):
event = events[i]
arr.append([event.id, event.begin, event.end, event.title, event.event_link, event.campus, event.building, event.room, event.room_link,event.student_group,event.lecturer])
if s == 1:
chunks.append(arr)
arr = []
s = size
else:
s -= 1
chunks.append(arr)
return chunks
def reset(self):
cursor = self.cnx.cursor()
cursor.execute('TRUNCATE events')
self.cnx.commit()
def close_connection(self):
self.cnx.close()
|
{
"content_hash": "6ffa474b7c60e3cd03901660f6b94918",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 190,
"avg_line_length": 35.95890410958904,
"alnum_prop": 0.5318095238095238,
"repo_name": "pascalweiss/LSFEventScraper",
"id": "e25814903f09db486c9135f4c5dc60af1ed8fd16",
"size": "2625",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "LSFEventDBAccess_MYSQL.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "23309"
}
],
"symlink_target": ""
}
|
"""The WaveBlocks Project
This file contains a simple factory method for Grid instances.
The exact subtype of the instance is read from the description.
@author: R. Bourquin
@copyright: Copyright (C) 2012 R. Bourquin
@license: Modified BSD License
"""
def create_grid(description):
"""The method that creates a :py:class:`Grid` instance and decides
which subclass to instantiate depending on the given description.
:param description: A ``description`` (``dict`` or :py:class:`ParameterProvider` instance)
with all necessary parameters.
:return: An adequate :py:class:`Grid` instance.
"""
if "grid_type" in description:
grid_type = description["type"]
else:
grid_type = "tensor_product"
if grid_type == "tensor_product":
from WaveBlocksND import TensorProductGrid
limits = description["limits"]
number_nodes = description["number_nodes"]
# TODO: Improve for one|multiple values: limits = "D*(a,b)" || "[(a1,b1), (a2,b2), ...]"
grid = TensorProductGrid(limits, number_nodes)
return grid
|
{
"content_hash": "c662cc7a2db21e2e0d7f58dae0478fa7",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 96,
"avg_line_length": 32.6764705882353,
"alnum_prop": 0.6642664266426642,
"repo_name": "WaveBlocks/WaveBlocksND",
"id": "3972e0e08f7b37e94f28f4fff557d675edf9d452",
"size": "1111",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "WaveBlocksND/GridFactory.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1116280"
},
{
"name": "Shell",
"bytes": "521"
}
],
"symlink_target": ""
}
|
from pprint import pprint
from hpOneView.oneview_client import OneViewClient
from hpOneView.exceptions import HPOneViewException
from config_loader import try_load_from_file
# You can use username/password or sessionID for authentication.
# Be sure to inform a valid and active sessionID.
config = {
"ip": "<oneview_ip>",
"credentials": {
"userName": "<username>",
"password": "<password>",
"sessionID": "<sessionID>"
}
}
# Try load config from a file (if there is a config file)
config = try_load_from_file(config)
oneview_client = OneViewClient(config)
# Get all connections
print("Get all connections")
cons = oneview_client.connections.get_all()
pprint(cons)
# Get all connections with interconnectUri filter
try:
print("Get connections based on interconnect uri")
filter = "interconnectUri='/rest/interconnects/794079a2-7eb4-4992-8027-e9743a40f5b0'"
cons_interconnectUri = oneview_client.connections.get_all(filter=filter)
pprint(cons_interconnectUri)
except HPOneViewException as e:
print(e.msg)
# Get first 10 connections, sorted by name
print("Get first 10 connections, sorting by name")
cons_sorted = oneview_client.connections.get_all(0, 10, sort='name:descending')
pprint(cons_sorted)
# Find connection by name
try:
print("Get connection by name")
con_byName = oneview_client.connections.get_by(
'name', "name981375475-1465399560370")
pprint(con_byName)
except HPOneViewException as e:
print(e.msg)
# Get by Uri
try:
print("Get connection by uri")
con_byUri = oneview_client.connections.get(
'/rest/connections/58ffb307-3087-4c9d-8574-44e8a79e0d6e')
pprint(con_byUri)
except HPOneViewException as e:
print(e.msg)
|
{
"content_hash": "35aba52d8007a90c42fa5a9b5201a5e7",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 89,
"avg_line_length": 29.45762711864407,
"alnum_prop": 0.7266973532796318,
"repo_name": "HewlettPackard/python-hpOneView",
"id": "a95f5957bfbd94c0540b2eea63d6c67890d83563",
"size": "2898",
"binary": false,
"copies": "1",
"ref": "refs/heads/release/v4.8.0",
"path": "examples/connections.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1337929"
},
{
"name": "Shell",
"bytes": "1906"
}
],
"symlink_target": ""
}
|
try:
from django.contrib.auth import get_user_model
except ImportError: # django < 1.5
from django.contrib.auth.models import User
else:
User = get_user_model()
import datetime
from django.db.models.aggregates import Count
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
# Note: Remember to use orm['appname.ModelName'] rather than "from appname.models..."
for dup_group in orm['pybb.TopicReadTracker'].objects.values('topic', 'user')\
.annotate(Count('topic'), Count('user'))\
.filter(topic__count__gt=1):
for dup in orm['pybb.TopicReadTracker'].objects.filter(topic=dup_group['topic'], user=dup_group['user'])\
.order_by('-time_stamp')[1:]:
dup.delete()
for dup_group in orm['pybb.ForumReadTracker'].objects.values('forum', 'user')\
.annotate(Count('forum'), Count('user'))\
.filter(forum__count__gt=1):
for dup in orm['pybb.ForumReadTracker'].objects.filter(topic=dup_group['forum'], user=dup_group['user'])\
.order_by('-time_stamp')[1:]:
dup.delete()
def backwards(self, orm):
"Write your backwards methods here."
pass
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'pybb.attachment': {
'Meta': {'object_name': 'Attachment'},
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'post': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attachments'", 'to': "orm['pybb.Post']"}),
'size': ('django.db.models.fields.IntegerField', [], {})
},
'pybb.category': {
'Meta': {'ordering': "['position']", 'object_name': 'Category'},
'hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'position': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
},
'pybb.forum': {
'Meta': {'ordering': "['position']", 'object_name': 'Forum'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'forums'", 'to': "orm['pybb.Category']"}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'headline': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderators': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['%s.%s']"% (User._meta.app_label, User._meta.object_name), 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'position': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'post_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'readed_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'readed_forums'", 'symmetrical': 'False', 'through': "orm['pybb.ForumReadTracker']", 'to': "orm['%s.%s']"% (User._meta.app_label, User._meta.object_name)}),
'topic_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'pybb.forumreadtracker': {
'Meta': {'object_name': 'ForumReadTracker'},
'forum': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['pybb.Forum']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'time_stamp': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s.%s']"% (User._meta.app_label, User._meta.object_name)})
},
'pybb.pollanswer': {
'Meta': {'object_name': 'PollAnswer'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'topic': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'poll_answers'", 'to': "orm['pybb.Topic']"})
},
'pybb.pollansweruser': {
'Meta': {'unique_together': "(('poll_answer', 'user'),)", 'object_name': 'PollAnswerUser'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'poll_answer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'users'", 'to': "orm['pybb.PollAnswer']"}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'poll_answers'", 'to': "orm['%s.%s']"% (User._meta.app_label, User._meta.object_name)})
},
'pybb.post': {
'Meta': {'ordering': "['created']", 'object_name': 'Post'},
'body': ('django.db.models.fields.TextField', [], {}),
'body_html': ('django.db.models.fields.TextField', [], {}),
'body_text': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'on_moderation': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'topic': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'posts'", 'to': "orm['pybb.Topic']"}),
'updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'posts'", 'to': "orm['%s.%s']"% (User._meta.app_label, User._meta.object_name)}),
'user_ip': ('django.db.models.fields.IPAddressField', [], {'default': "'0.0.0.0'", 'max_length': '15', 'blank': 'True'})
},
'pybb.profile': {
'Meta': {'object_name': 'Profile'},
'autosubscribe': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'avatar': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'en-us'", 'max_length': '10', 'blank': 'True'}),
'post_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'show_signatures': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'signature': ('django.db.models.fields.TextField', [], {'max_length': '1024', 'blank': 'True'}),
'signature_html': ('django.db.models.fields.TextField', [], {'max_length': '1054', 'blank': 'True'}),
'time_zone': ('django.db.models.fields.FloatField', [], {'default': '3.0'}),
'user': ('annoying.fields.AutoOneToOneField', [], {'related_name': "'pybb_profile'", 'unique': 'True', 'to': "orm['%s.%s']"% (User._meta.app_label, User._meta.object_name)})
},
'pybb.topic': {
'Meta': {'ordering': "['-created']", 'object_name': 'Topic'},
'closed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'forum': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'topics'", 'to': "orm['pybb.Forum']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'on_moderation': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'poll_question': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'poll_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'post_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'readed_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'readed_topics'", 'symmetrical': 'False', 'through': "orm['pybb.TopicReadTracker']", 'to': "orm['%s.%s']"% (User._meta.app_label, User._meta.object_name)}),
'sticky': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subscribers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'subscriptions'", 'blank': 'True', 'to': "orm['%s.%s']"% (User._meta.app_label, User._meta.object_name)}),
'updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s.%s']"% (User._meta.app_label, User._meta.object_name)}),
'views': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
},
'pybb.topicreadtracker': {
'Meta': {'object_name': 'TopicReadTracker'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'time_stamp': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'topic': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['pybb.Topic']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s.%s']"% (User._meta.app_label, User._meta.object_name)})
}
}
complete_apps = ['pybb']
symmetrical = True
|
{
"content_hash": "654ecb63fdd4dd97418922f2882e427b",
"timestamp": "",
"source": "github",
"line_count": 176,
"max_line_length": 255,
"avg_line_length": 78.10795454545455,
"alnum_prop": 0.5418636793482214,
"repo_name": "acamposruiz/quecoins",
"id": "f786cc1cfefa50f6c7aa071283b8403aac69c13c",
"size": "13765",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pybb/migrations/0024_delete_duplicate_read_trackers.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "230928"
},
{
"name": "JavaScript",
"bytes": "27682"
},
{
"name": "Python",
"bytes": "520066"
},
{
"name": "Shell",
"bytes": "4509"
}
],
"symlink_target": ""
}
|
from django.contrib.syndication.views import Feed
from django.urls import reverse
from django.utils.feedgenerator import Atom1Feed
from django.utils.translation import ugettext_lazy as _
from pybb.models import Post, Topic
class PybbFeed(Feed):
feed_type = Atom1Feed
def link(self):
return reverse('pybb:index')
def item_guid(self, obj):
return str(obj.id)
def item_pubdate(self, obj):
return obj.created
class LastPosts(PybbFeed):
title = _('Latest posts on forum')
description = _('Latest posts on forum')
title_template = 'pybb/feeds/posts_title.html'
description_template = 'pybb/feeds/posts_description.html'
def items(self):
return Post.objects.filter(topic__forum__hidden=False,
topic__forum__forum__hidden=False).order_by('-created')[:15]
class LastTopics(PybbFeed):
title = _('Latest topics on forum')
description = _('Latest topics on forum')
title_template = 'pybb/feeds/topics_title.html'
description_template = 'pybb/feeds/topics_description.html'
def items(self):
return Topic.objects.filter(forum__hidden=False,
forum__forum__hidden=False).order_by('-created')[:15]
|
{
"content_hash": "06c3843d669c2c5d99a0d94c80f4fa25",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 95,
"avg_line_length": 30.73170731707317,
"alnum_prop": 0.6603174603174603,
"repo_name": "thoas/pybbm",
"id": "ac87bd8854436323d858d0473f7de75595f4eef5",
"size": "1285",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pybb/feeds.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "3644"
},
{
"name": "HTML",
"bytes": "55789"
},
{
"name": "JavaScript",
"bytes": "11346"
},
{
"name": "Makefile",
"bytes": "218"
},
{
"name": "Python",
"bytes": "362855"
}
],
"symlink_target": ""
}
|
import matplotlib.pyplot as plt
class Logger(object):
def __init__(self,num_channels):
self.num_channels = num_channels
self.data = [[] for i in range(num_channels)]
def _subplot(self,chan_num):
res = self.num_channels * 100
res = res + 10 # one column only
res = res + chan_num
plt.subplot(res)
def add_data(self,chan_num,data):
self._subplot(chan_num)
self.
|
{
"content_hash": "e1f79b54adccc58d8895f4462cbc35b3",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 53,
"avg_line_length": 29.2,
"alnum_prop": 0.589041095890411,
"repo_name": "travistang/late_fyt",
"id": "2100a861711db515ef8560a05f1bebb9594c3475",
"size": "438",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "logger.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "358950"
},
{
"name": "C",
"bytes": "1088525"
},
{
"name": "C++",
"bytes": "3701643"
},
{
"name": "CSS",
"bytes": "2093"
},
{
"name": "HTML",
"bytes": "727213"
},
{
"name": "Java",
"bytes": "870718"
},
{
"name": "JavaScript",
"bytes": "360"
},
{
"name": "M4",
"bytes": "5404"
},
{
"name": "Makefile",
"bytes": "360885"
},
{
"name": "NSIS",
"bytes": "120996"
},
{
"name": "Objective-C",
"bytes": "24134"
},
{
"name": "Python",
"bytes": "110376"
},
{
"name": "Roff",
"bytes": "6972"
},
{
"name": "Shell",
"bytes": "110228"
},
{
"name": "TeX",
"bytes": "156855"
},
{
"name": "XSLT",
"bytes": "18454"
}
],
"symlink_target": ""
}
|
"""Treadmill vring manager."""
from __future__ import absolute_import
import signal
import sys
import logging
import yaml
import click
from .. import context
from .. import discovery
from .. import logcontext as lc
from .. import utils
from .. import vring
from .. import zkutils
_LOGGER = logging.getLogger(__name__)
def init():
"""Top level command handler."""
@click.command(name='vring')
@click.argument('manifest', type=click.File('rb'))
def vring_cmd(manifest):
"""Run vring manager."""
context.GLOBAL.zk.conn.add_listener(zkutils.exit_on_disconnect)
app = yaml.load(manifest.read())
with lc.LogContext(_LOGGER, app['name'], lc.ContainerAdapter) as log:
utils.validate(app, [('vring', True, dict)])
ring = app['vring']
utils.validate(ring, [('rules', True, list), ('cells', True,
list)])
if context.GLOBAL.cell not in ring['cells']:
log.critical('cell %s not listed in vring.',
context.GLOBAL.cell)
sys.exit(-1)
ringname = 'TM_OUTPUT_RING_%d' % ring['cells'].index(
context.GLOBAL.cell)
rules = ring['rules']
for rule in rules:
utils.validate(rule, [('pattern', True, str),
('endpoints', True, list)])
# Create translation for endpoint name to expected port #.
routing = {}
for endpoint in app.get('endpoints', []):
routing[endpoint['name']] = endpoint['port']
# Check that all ring endpoints are listed in the manifest.
vring_endpoints = set()
for rule in rules:
for rule_endpoint in rule['endpoints']:
if rule_endpoint not in routing:
log.critical(
'vring references non-existing endpoint: [%s]',
rule_endpoint)
sys.exit(-1)
vring_endpoints.add(rule_endpoint)
# TODO: discovery is limited to one rule for now.
if len(rules) != 1:
log.critical('(TODO): multiple rules are not supported.')
sys.exit(-1)
pattern = rules[0]['pattern']
app_discovery = discovery.Discovery(context.GLOBAL.zk.conn,
pattern, '*')
app_discovery.sync()
# Restore default signal mask disabled by python spawning new
# thread for Zk connection.
#
# TODO: should this be done as part of ZK connect?
for sig in range(1, signal.NSIG):
try:
signal.signal(sig, signal.SIG_DFL)
except RuntimeError:
pass
vring.init(ringname)
vring.run(ringname, routing, vring_endpoints, app_discovery)
return vring_cmd
|
{
"content_hash": "2572e2d8e235d219e90177e077e63db5",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 77,
"avg_line_length": 33.32608695652174,
"alnum_prop": 0.5172863666014351,
"repo_name": "toenuff/treadmill",
"id": "11b59e3148589655a218d259137d1fdc82c243fc",
"size": "3066",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/python/treadmill/sproc/vring.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Prolog",
"bytes": "19323"
},
{
"name": "Python",
"bytes": "1511919"
},
{
"name": "Shell",
"bytes": "29014"
}
],
"symlink_target": ""
}
|
"""State management for eager execution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
import copy
import os
import random
import threading
from absl import logging
import numpy as np
import six
from tensorflow.core.framework import function_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python import pywrap_tfe
from tensorflow.python import tf2
from tensorflow.python.client import pywrap_tf_session
from tensorflow.python.eager import executor
from tensorflow.python.eager import monitoring
from tensorflow.python.framework import c_api_util
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import tfrt_utils
from tensorflow.python.util import compat
from tensorflow.python.util import is_in_graph_mode
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util.deprecation import deprecated
from tensorflow.python.util.tf_export import tf_export
GRAPH_MODE = 0
EAGER_MODE = 1
default_execution_mode = EAGER_MODE if tf2.enabled() else GRAPH_MODE
# Cache from (old_device_name, partial_new_device_name) -> (new_device_name,
# new_device_spec).
# Note that we do not protect this with a lock and instead rely on python's GIL
# and the idempotent nature of writes to provide thread safety.
_device_parsing_cache = {}
_starting_device_spec = pydev.DeviceSpec.from_string("")
_MAXINT32 = 2**31 - 1
DEVICE_PLACEMENT_EXPLICIT = pywrap_tfe.TFE_DEVICE_PLACEMENT_EXPLICIT
DEVICE_PLACEMENT_WARN = pywrap_tfe.TFE_DEVICE_PLACEMENT_WARN
DEVICE_PLACEMENT_SILENT = pywrap_tfe.TFE_DEVICE_PLACEMENT_SILENT
DEVICE_PLACEMENT_SILENT_FOR_INT32 = (
pywrap_tfe.TFE_DEVICE_PLACEMENT_SILENT_FOR_INT32)
SYNC = 0
ASYNC = 1
_KEEP_ALIVE_SECS = 600
_python_eager_context_create_counter = monitoring.Counter(
"/tensorflow/api/python/eager_context_create_counter",
"Counter for number of eager contexts created in Python.")
# Re-exporting through context.
is_tfrt_enabled = tfrt_utils.enabled
# Expose it as internally public APIs for Keras use cases in b/171080602.
tf_export("__internal__.is_tfrt_enabled", v1=[])(is_tfrt_enabled)
class _EagerTensorCache(object):
"""Simple cache which evicts items based on length in a FIFO manner."""
__slots__ = ["_data", "_max_items", "_max_tensor_size"]
def __init__(self, max_items=256, max_tensor_size=10000):
self._data = collections.OrderedDict()
self._max_items = max_items
self._max_tensor_size = max_tensor_size
def put(self, key, value):
if value._num_elements() > self._max_tensor_size: # pylint: disable=protected-access
return
self._data[key] = value
if len(self._data) > self._max_items:
self._data.popitem(last=False)
def get(self, key):
return self._data.get(key, None)
def flush(self):
self._data.clear()
class FunctionCallOptions(object):
"""Options applied at call sites of eager functions.
Eager functions are functions decorated with tf.contrib.eager.defun.
"""
__slots__ = ["_config_proto_serialized", "_executor_type"]
def __init__(self, executor_type=None, config_proto=None):
"""Constructor.
Args:
executor_type: (optional) name of the executor to be used to execute the
eager function. If None or an empty string, the default Tensorflow
executor will be used.
config_proto: (optional) a `config_pb2.ConfigProto` proto or
a serialized string of that proto.
The config used by Grappler when optimizing the function graph.
Each concrete function is optimized the first time is called. Changing
config_proto after the first call has no effect.
If config_proto is None, an empty RewriterConfig will be used.
"""
self.config_proto_serialized = config_proto
self.executor_type = executor_type
@property
def executor_type(self):
return self._executor_type
@executor_type.setter
def executor_type(self, executor_type):
self._executor_type = executor_type
@property
def config_proto_serialized(self):
return self._config_proto_serialized
@config_proto_serialized.setter
def config_proto_serialized(self, config):
if isinstance(config, config_pb2.ConfigProto):
self._config_proto_serialized = config.SerializeToString(
deterministic=True)
elif isinstance(config, str):
self._config_proto_serialized = config
elif config is None:
self._config_proto_serialized = (
config_pb2.ConfigProto().SerializeToString())
else:
raise ValueError("the rewriter config must be either a "
"config_pb2.ConfigProto, or a serialized string of that "
"proto or None. got: {}".format(type(config)))
# Map from context_id (an int) to _TensorCaches.
# Dicts are thread safe in CPython.
# TODO(iga): Remove this once TensorCaches are moved to C++.
_tensor_caches_map = {}
class _TensorCaches(threading.local):
"""Thread local tensor caches."""
__slots__ = ["_ones_rank_cache", "_zeros_cache"]
def __init__(self):
super(_TensorCaches, self).__init__()
self._ones_rank_cache = None
self._zeros_cache = None
@property
def ones_rank_cache(self):
if not self._ones_rank_cache:
self._ones_rank_cache = _EagerTensorCache()
return self._ones_rank_cache
@property
def zeros_cache(self):
if not self._zeros_cache:
self._zeros_cache = _EagerTensorCache()
return self._zeros_cache
ContextSwitch = collections.namedtuple(
"ContextSwitch", ["is_building_function", "enter_context_fn",
"device_stack"])
# `_ContextSwitchStack` is a `threading.local` to match the semantics of
# ``DefaultGraphStack`, which is also a `threading.local`.
class _ContextSwitchStack(threading.local):
"""A thread-local stack of context switches."""
def __init__(self, eager):
super(_ContextSwitchStack, self).__init__()
self.stack = []
if eager:
# Initialize the stack with a pointer to enter the eager context; this
# ensures that the fact that eager execution was enabled is propagated
# across threads, since (1) `enable_eager_execution` modifies a
# process-level flag (`default_execution_mode`) and (2) `__init__` is
# called each time a threading.local object is used in a separate thread.
self.push(is_building_function=False, enter_context_fn=eager_mode,
device_stack=None)
def push(self, is_building_function, enter_context_fn, device_stack):
"""Push metadata about a context switch onto the stack.
A context switch can take any one of the two forms: installing a graph as
the default graph, or entering the eager context. For each context switch,
we record whether or not the entered context is building a function.
Args:
is_building_function: (bool.) Whether the context is building a function.
enter_context_fn: (function.) A callable that executes the context switch.
For example, `graph.as_default` or `eager_mode`.
device_stack: If applicable, the device function stack for this
graph. When breaking out of graphs in init_scope, the innermost nonempty
device stack is used. Eager contexts put `None` here and the value is
never used.
"""
self.stack.append(
ContextSwitch(is_building_function, enter_context_fn, device_stack))
def pop(self):
"""Pop the stack."""
self.stack.pop()
@tf_export("config.LogicalDevice")
class LogicalDevice(
collections.namedtuple("LogicalDevice", ["name", "device_type"])):
"""Abstraction for a logical device initialized by the runtime.
A `tf.config.LogicalDevice` corresponds to an initialized logical device on a
`tf.config.PhysicalDevice` or a remote device visible to the cluster. Tensors
and operations can be placed on a specific logical device by calling
`tf.device` with a specified `tf.config.LogicalDevice`.
Fields:
name: The fully qualified name of the device. Can be used for Op or function
placement.
device_type: String declaring the type of device such as "CPU" or "GPU".
"""
pass
@tf_export("config.LogicalDeviceConfiguration",
"config.experimental.VirtualDeviceConfiguration")
class LogicalDeviceConfiguration(
collections.namedtuple("LogicalDeviceConfiguration",
["memory_limit", "experimental_priority"])):
"""Configuration class for a logical devices.
The class specifies the parameters to configure a `tf.config.PhysicalDevice`
as it is initialized to a `tf.config.LogicalDevice` during runtime
initialization. Not all fields are valid for all device types.
See `tf.config.get_logical_device_configuration` and
`tf.config.set_logical_device_configuration` for usage examples.
Fields:
memory_limit: (optional) Maximum memory (in MB) to allocate on the virtual
device. Currently only supported for GPUs.
experimental_priority: (optional) Priority to assign to a virtual device.
Lower values have higher priorities and 0 is the default.
Within a physical GPU, the GPU scheduler will prioritize ops on virtual
devices with higher priority. Currently only supported for Nvidia GPUs.
"""
def __new__(cls, memory_limit=None, experimental_priority=None):
return super(LogicalDeviceConfiguration,
cls).__new__(cls, memory_limit, experimental_priority)
@tf_export("config.PhysicalDevice")
class PhysicalDevice(
collections.namedtuple("PhysicalDevice", ["name", "device_type"])):
"""Abstraction for a locally visible physical device.
TensorFlow can utilize various devices such as the CPU or multiple GPUs
for computation. Before initializing a local device for use, the user can
customize certain properties of the device such as it's visibility or memory
configuration.
Once a visible `tf.config.PhysicalDevice` is initialized one or more
`tf.config.LogicalDevice` objects are created. Use
`tf.config.set_visible_devices` to configure the visibility of a physical
device and `tf.config.set_logical_device_configuration` to configure multiple
`tf.config.LogicalDevice` objects for a `tf.config.PhysicalDevice`. This is
useful when separation between models is needed or to simulate a multi-device
environment.
Fields:
name: Unique identifier for device.
device_type: String declaring the type of device such as "CPU" or "GPU".
"""
pass
class _AtomicCounter(object):
"""A simple atomic counter."""
__slots__ = ["_value", "_lock"]
def __init__(self):
self._value = 0
self._lock = threading.Lock()
def increment_and_get(self):
with self._lock:
self._value += 1
return self._value
_context_id_counter = _AtomicCounter()
class _TensorCacheDeleter(object):
"""Deletes tensor caches for a given context."""
__slots__ = ["_context_id"]
def __init__(self, context_id):
self._context_id = context_id
def __del__(self):
if _tensor_caches_map is None:
return
if self._context_id in _tensor_caches_map:
del _tensor_caches_map[self._context_id]
# TODO(agarwal): rename to EagerContext / EagerRuntime ?
# TODO(agarwal): consider keeping the corresponding Graph here.
class Context(object):
"""Environment in which eager operations execute."""
# TODO(agarwal): create and link in some documentation for `execution_mode`.
# pylint: disable=redefined-outer-name
def __init__(self,
config=None,
device_policy=None,
execution_mode=None,
server_def=None):
"""Creates a new Context.
Args:
config: (Optional.) A `ConfigProto` protocol buffer with configuration
options for the Context. Note that a lot of these options may be
currently unimplemented or irrelevant when eager execution is enabled.
device_policy: (Optional.) What policy to use when trying to run an
operation on a device with inputs which are not on that device.
When set to None, an appropriate value will be picked automatically.
The value picked may change between TensorFlow releases.
Defaults to DEVICE_PLACEMENT_SILENT.
Valid values:
- DEVICE_PLACEMENT_EXPLICIT: raises an error if the placement is
not correct.
- DEVICE_PLACEMENT_WARN: copies the tensors which are not on the
right device but raises a warning.
- DEVICE_PLACEMENT_SILENT: silently copies the tensors. This might
hide performance problems.
- DEVICE_PLACEMENT_SILENT_FOR_INT32: silently copies int32 tensors,
raising errors on the other ones.
execution_mode: (Optional.) Policy controlling how operations dispatched
are actually executed. When set to None, an appropriate value will be
picked automatically. The value picked may change between TensorFlow
releases.
Valid values:
- SYNC: executes each operation synchronously.
- ASYNC: executes each operation asynchronously. These
operations may return "non-ready" handles.
server_def: (Optional.) A tensorflow::ServerDef proto.
Enables execution on remote devices. GrpcServers need to be started by
creating an identical server_def to this, and setting the appropriate
task_indexes, so that the servers can communicate. It will then be
possible to execute operations on remote devices.
Raises:
ValueError: If execution_mode is not valid.
"""
# This _id is used only to index the tensor caches.
# TODO(iga): Remove this when tensor caches are moved to C++.
self._id = _context_id_counter.increment_and_get()
self._tensor_cache_deleter = _TensorCacheDeleter(self._id)
_tensor_caches_map[self._id] = _TensorCaches()
self._config = config
self._thread_local_data = pywrap_tfe.EagerContextThreadLocalData(
self,
is_eager=lambda: default_execution_mode == EAGER_MODE,
device_spec=_starting_device_spec)
self._context_switches = _ContextSwitchStack(self.executing_eagerly())
self._context_handle = None
self._context_devices = None
self._seed = None
self._initialize_lock = threading.Lock()
self._initialized = False
if device_policy is None:
device_policy = DEVICE_PLACEMENT_SILENT
self._device_policy = device_policy
self._mirroring_policy = None
if execution_mode not in (None, SYNC, ASYNC):
raise ValueError(
"execution_mode should be None/SYNC/ASYNC. Got %s" % execution_mode)
if execution_mode is None:
execution_mode = SYNC
self._default_is_async = execution_mode == ASYNC
self._lazy_remote_inputs_copy = None
self._use_tfrt = is_tfrt_enabled()
self._server_def = server_def
self._collective_ops_server_def = None
self._collective_leader = None
self._collective_scoped_allocator_enabled_ops = None
self._collective_use_nccl_communication = None
self._collective_device_filters = None
self._device_lock = threading.Lock()
self._physical_devices = None
self._physical_device_to_index = None
self._visible_device_list = []
self._memory_growth_map = None
self._virtual_device_map = {}
# Values set after construction
self._optimizer_jit = None
self._intra_op_parallelism_threads = None
self._inter_op_parallelism_threads = None
self._soft_device_placement = None
self._log_device_placement = None
self._enable_mlir_graph_optimization = None
self._optimizer_experimental_options = {}
_python_eager_context_create_counter.get_cell().increase_by(1)
# pylint: enable=redefined-outer-name
def _set_global_seed(self, seed):
"""Set a global eager mode seed for random ops."""
self._seed = seed
# `random.Random(seed)` needs `seed` to be hashable, while values of type
# e.g. `np.int64` or `np.ndarray` are not. We use `int(...)` to convert them
# to int.
try:
hash(seed)
except TypeError:
seed = int(np.array(seed))
self._rng = random.Random(seed)
# Also clear the kernel cache, to reset any existing seeds
if self._context_handle is not None:
pywrap_tfe.TFE_ContextClearCaches(self._context_handle)
def _internal_operation_seed(self):
"""Returns a fake operation seed.
In eager mode, user shouldn't set or depend on operation seed.
Here, we generate a random seed based on global seed to make
operation's randomness different and depend on the global seed.
Returns:
A fake operation seed based on global seed.
"""
return self._rng.randint(0, _MAXINT32)
def _initialize_logical_devices(self):
"""Helper to initialize devices."""
# Store list of devices
logical_devices = []
context_devices = []
device_list = pywrap_tfe.TFE_ContextListDevices(self._context_handle)
try:
self._num_gpus = 0
for i in range(pywrap_tfe.TF_DeviceListCount(device_list)):
dev_name = pywrap_tfe.TF_DeviceListName(device_list, i)
context_devices.append(pydev.canonical_name(dev_name))
spec = pydev.DeviceSpec.from_string(dev_name)
# If the job is localhost, we assume that the cluster has not yet been
# configured and thus clear the job, replica & task.
if spec.job == "localhost":
spec = spec.replace(job=None, replica=None, task=None)
logical_devices.append(
LogicalDevice(name=spec.to_string(), device_type=spec.device_type))
dev_type = pywrap_tfe.TF_DeviceListType(device_list, i)
if dev_type == "GPU":
self._num_gpus += 1
finally:
self._logical_devices = logical_devices
self._context_devices = context_devices
pywrap_tfe.TF_DeleteDeviceList(device_list)
def ensure_initialized(self):
"""Initialize handle and devices if not already done so."""
if self._initialized:
return
with self._initialize_lock:
if self._initialized:
return
assert self._context_devices is None
opts = pywrap_tfe.TFE_NewContextOptions()
try:
config_str = self.config.SerializeToString()
pywrap_tfe.TFE_ContextOptionsSetConfig(opts, config_str)
if self._device_policy is not None:
pywrap_tfe.TFE_ContextOptionsSetDevicePlacementPolicy(
opts, self._device_policy)
if self._mirroring_policy is not None:
pywrap_tfe.TFE_ContextOptionsSetMirroringPolicy(
opts, self._mirroring_policy)
if self._default_is_async == ASYNC:
pywrap_tfe.TFE_ContextOptionsSetAsync(opts, True)
if self._lazy_remote_inputs_copy is not None:
pywrap_tfe.TFE_ContextOptionsSetLazyRemoteInputsCopy(
opts, self._lazy_remote_inputs_copy)
if self._use_tfrt is not None:
pywrap_tfe.TFE_ContextOptionsSetTfrt(opts, self._use_tfrt)
context_handle = pywrap_tfe.TFE_NewContext(opts)
finally:
pywrap_tfe.TFE_DeleteContextOptions(opts)
assert not (self._server_def and self._collective_ops_server_def), (
"Cannot enable remote execution as well as collective ops at the "
"moment. If this is important to you, please file an issue.")
if self._server_def is not None:
server_def_str = self._server_def.SerializeToString()
pywrap_tfe.TFE_ContextSetServerDef(context_handle, _KEEP_ALIVE_SECS,
server_def_str)
elif self._collective_ops_server_def is not None:
server_def_str = self._collective_ops_server_def.SerializeToString()
pywrap_tfe.TFE_EnableCollectiveOps(context_handle, server_def_str)
self._context_handle = context_handle
self._initialize_logical_devices()
self._initialized = True
def _clear_caches(self):
self.ones_rank_cache().flush()
self.zeros_cache().flush()
pywrap_tfe.TFE_ClearScalarCache()
def get_server_def(self):
return self._server_def
def set_server_def(self, server_def, keep_alive_secs=_KEEP_ALIVE_SECS):
"""Allow setting a server_def on the context.
When a server def is replaced, it effectively clears a bunch of caches
within the context. If you attempt to use a tensor object that was pointing
to a tensor on the remote device, it will raise an error.
Args:
server_def: A tensorflow::ServerDef proto.
Enables execution on remote devices.
keep_alive_secs: Num. seconds after which the remote end will hang up.
As long as the client is still alive, the server state for the context
will be kept alive. If the client is killed (or there is some failure),
the server will clean up its context keep_alive_secs after the final RPC
it receives.
Raises:
ValueError: if server_def is None.
"""
if not server_def:
raise ValueError("server_def is None.")
self._server_def = server_def
if self._context_handle:
server_def_str = server_def.SerializeToString()
pywrap_tfe.TFE_ContextSetServerDef(self._context_handle, keep_alive_secs,
server_def_str)
self._initialize_logical_devices()
# Clear all the caches in case there are remote tensors in them.
self._clear_caches()
def update_server_def(self, server_def, keep_alive_secs=_KEEP_ALIVE_SECS):
"""Update a server_def on the context.
Args:
server_def: A tensorflow::ServerDef proto. Enables execution on remote
devices.
keep_alive_secs: Num. seconds after which the remote end will hang up. As
long as the client is still alive, the server state for the context will
be kept alive. If the client is killed (or there is some failure), the
server will clean up its context keep_alive_secs after the final RPC it
receives.
Raises:
ValueError: if server_def is None.
"""
if not server_def:
raise ValueError("server_def is None.")
self._server_def = server_def
if self._context_handle:
server_def_str = server_def.SerializeToString()
pywrap_tfe.TFE_ContextUpdateServerDef(self._context_handle,
keep_alive_secs, server_def_str)
self._initialize_logical_devices()
self._clear_caches()
def check_alive(self, worker_name):
"""Checks whether a remote worker is alive or not.
Args:
worker_name: a string representing the remote worker. It must be a fully
specified name like "/job:worker/replica:0/task:0".
Returns:
a boolean indicating whether the remote worker is alive or not.
Raises:
ValueError: if context is not initialized.
"""
# TODO(yuefengz): support checking multiple workers.
if self._context_handle:
return pywrap_tfe.TFE_ContextCheckAlive(self._context_handle, worker_name)
else:
raise ValueError("Context is not initialized.")
def sync_executors(self):
"""Sync both local executors and the ones on remote workers.
In async execution mode, local function calls can return before the
corresponding remote op/function execution requests are completed. Calling
this method creates a synchronization barrier for remote executors. It only
returns when all remote pending nodes are finished, potentially with errors
if any remote executors are in error state.
Raises:
ValueError: if context is not initialized.
"""
if self._context_handle:
pywrap_tfe.TFE_ContextSyncExecutors(self._context_handle)
else:
raise ValueError("Context is not initialized.")
def clear_executor_errors(self):
"""Clear errors in both local executors and remote workers.
After receiving errors from remote workers, additional requests on the fly
could further taint the status on the remote workers due to the async nature
of remote execution. Calling this method block on waiting for all pending
nodes in remote executors to finish and clear their error statuses.
Raises:
ValueError: if context is not initialized.
"""
if self._context_handle:
pywrap_tfe.TFE_ContextClearExecutors(self._context_handle)
else:
raise ValueError("Context is not initialized.")
def enable_collective_ops(self, server_def):
"""Enable distributed collective ops with an appropriate server_def.
Args:
server_def: A tensorflow::ServerDef proto. Enables execution on remote
devices.
Raises:
ValueError: if server_def is None.
RuntimeError: if this method is not called at program startup.
"""
if not server_def:
raise ValueError("server_def is None.")
self._collective_ops_server_def = server_def
# TODO(b/129298253): Allow creating datasets/tensors before enabling
# collective ops.
if self._context_handle is not None:
logging.warning("Enabling collective ops after program startup may cause "
"error when accessing previously created tensors.")
with self._initialize_lock:
assert self._initialized
server_def_str = self._collective_ops_server_def.SerializeToString()
pywrap_tfe.TFE_EnableCollectiveOps(self._context_handle, server_def_str)
self._initialize_logical_devices()
self._clear_caches()
def configure_collective_ops(
self,
collective_leader="",
scoped_allocator_enabled_ops=("CollectiveReduce",),
use_nccl_communication=False,
device_filters=None):
"""Configure collective ops.
Collective group leader is necessary for collective ops to run, other
configurations are mainly for the purpose of performance.
Args:
collective_leader: a device string for collective leader, e.g.
"/job:worker/replica:0/task:0"; empty string means local execution of
collective ops.
scoped_allocator_enabled_ops: a tuple or a list of op names for scoped
allocator to run with.
use_nccl_communication: whether to use nccl communication for collective
ops.
device_filters: a tuple or a list of device strings. If set, corresponding
task can only see the devices filtered by these device filters.
Raises:
RuntimeError: if this method is not called at program startup.
"""
if self._collective_leader is not None:
if (self._collective_leader != collective_leader or
self._collective_scoped_allocator_enabled_ops !=
scoped_allocator_enabled_ops or
self._collective_use_nccl_communication != use_nccl_communication or
self._collective_device_filters != device_filters):
raise ValueError("Collective ops are already configured.")
else:
return
if self._context_handle is not None:
raise RuntimeError("Collective ops must be configured at program startup")
self._collective_leader = collective_leader
self._collective_scoped_allocator_enabled_ops = scoped_allocator_enabled_ops
self._collective_use_nccl_communication = use_nccl_communication
self._collective_device_filters = device_filters
def abort_collective_ops(self, code, message):
"""Abort the collective ops.
This is intended to be used when a peer failure is detected, which allows
the user to handle the case instead of hanging. This aborts all on-going
collectives. After all subsequent collectives error immediately, and you
need to reset_context() to use collectives again.
Args:
code: a `tf.errors` error code.
message: a string. The error message.
"""
self.ensure_initialized()
pywrap_tfe.TFE_AbortCollectiveOps(self._handle, code, message)
def check_collective_ops_peer_health(self, task, timeout_in_ms):
"""Check collective peer health.
This probes each task to see if they're still alive. Note that restarted
tasks are considered a different one, and they're considered not healthy.
This should only be used in multi client multi worker training.
Args:
task: a task string, must be in the format of /job:xxx/replica:0/task:N.
timeout_in_ms: an integer, the timeout. If zero, there's no timeout.
Raises:
tf.errors.UnavailableError: when a peer is down.
tf.errors.FailedPreconditionError: when a peer is a different one from the
one this task has talked to, e.g. the peer has restarted.
tf.errors.InvalidArgumentError: when the task string is invalid.
"""
self.ensure_initialized()
pywrap_tfe.TFE_CollectiveOpsCheckPeerHealth(self._handle, task,
timeout_in_ms)
@property
def _handle(self):
if self._context_handle is None:
raise AssertionError("Context must be initialized first.")
return self._context_handle
@property
def _devices(self):
if self._context_devices is None:
raise AssertionError("Context must be initialized first.")
return self._context_devices
def __str__(self):
if self._context_handle is None:
return "Eager TensorFlow Context. Devices currently uninitialized."
else:
devices = self._devices
lines = ["Eager TensorFlow Context with %d devices" % (len(devices))]
for i, d in enumerate(devices):
lines.append(" Device %d: %s" % (i, d))
return "\n".join(lines)
@tf_contextlib.contextmanager
def _mode(self, mode):
"""A context manager to allow setting the mode to EAGER/GRAPH."""
ctx = self._thread_local_data
old_is_eager = ctx.is_eager
ctx.is_eager = mode == EAGER_MODE
if mode == EAGER_MODE:
# Entering graph mode does not provide us with sufficient information to
# record a context switch; graph-based context switches are only logged
# when a graph is registered as the default graph.
self.context_switches.push(False, eager_mode, None)
try:
yield
finally:
ctx.is_eager = old_is_eager
if mode == EAGER_MODE:
self.context_switches.pop()
def executing_eagerly(self):
"""Returns True if current thread has eager executing enabled."""
return self._thread_local_data.is_eager
def ones_rank_cache(self):
"""Per-device cache for scalars."""
return _tensor_caches_map[self._id].ones_rank_cache
def zeros_cache(self):
"""Per-device cache for scalars."""
return _tensor_caches_map[self._id].zeros_cache
@property
def scope_name(self):
"""Returns scope name for the current thread."""
return self._thread_local_data.scope_name
@scope_name.setter
def scope_name(self, s):
"""Sets scope name for the current thread."""
self._thread_local_data.scope_name = s
@property
def device_name(self):
"""Returns the device name for the current thread."""
return self._thread_local_data.device_name
@property
def device_spec(self):
"""Returns the device spec for the current thread."""
return self._thread_local_data.device_spec
def _set_device(self, device_name, device_spec):
self._thread_local_data.device_name = device_name
self._thread_local_data.device_spec = device_spec
def device(self, name):
"""Context-manager to force placement of operations and Tensors on a device.
Args:
name: Name of the device or None to get default placement.
Returns:
Context manager that forces device placement.
Raises:
ValueError: If name is not a string or is an invalid device name.
RuntimeError: If device scopes are not properly nested.
"""
if isinstance(name, LogicalDevice):
name = name.name
elif pydev.is_device_spec(name):
name = name.to_string()
return _EagerDeviceContext(self, name)
def devices(self):
"""List of the names of devices available to execute operations."""
return self._devices
def host_address_space(self):
self.ensure_initialized()
with c_api_util.tf_buffer() as buffer_:
pywrap_tfe.TFE_HostAddressSpace(self._context_handle, buffer_)
address_space = pywrap_tf_session.TF_GetBuffer(buffer_).decode("utf-8")
return address_space
# TODO(fishx): remove this property.
@property
def execution_mode(self):
"""Gets execution mode for current thread."""
return ASYNC if self.is_async() else SYNC
@execution_mode.setter
def execution_mode(self, mode):
"""Sets execution mode for current thread."""
if mode not in (None, SYNC, ASYNC):
raise ValueError(
"Execution mode should be None/SYNC/ASYNC. Got %s" % mode)
if mode is None:
mode = SYNC
enable_async = (mode == ASYNC)
if self.is_async() != enable_async:
# Only set the execution mode if the context has already been initialized
if self._context_handle is not None:
self.executor.wait()
executor_new = executor.new_executor(enable_async)
self._thread_local_data.executor = executor_new
pywrap_tfe.TFE_ContextSetExecutorForThread(self._context_handle,
executor_new.handle())
else:
self._default_is_async = enable_async
def is_async(self):
if self._context_handle is not None:
return self.executor.is_async()
else:
return self._default_is_async
@property
def executor(self):
self.ensure_initialized()
return executor.Executor(
pywrap_tfe.TFE_ContextGetExecutorForThread(self._context_handle))
@executor.setter
def executor(self, e):
self.ensure_initialized()
pywrap_tfe.TFE_ContextSetExecutorForThread(self._context_handle, e.handle())
@property
def config(self):
"""Return the ConfigProto with all runtime deltas applied."""
# Ensure physical devices have been discovered and config has been imported
self._initialize_physical_devices()
config = config_pb2.ConfigProto()
if self._config is not None:
config.CopyFrom(self._config)
if self._optimizer_jit is not None:
config.graph_options.optimizer_options.global_jit_level = (
config_pb2.OptimizerOptions.ON_1
if self._optimizer_jit else config_pb2.OptimizerOptions.OFF)
if self._intra_op_parallelism_threads is not None:
config.intra_op_parallelism_threads = self._intra_op_parallelism_threads
if self._inter_op_parallelism_threads is not None:
config.inter_op_parallelism_threads = self._inter_op_parallelism_threads
if self._soft_device_placement is not None:
config.allow_soft_placement = self._soft_device_placement
else:
config.allow_soft_placement = self.executing_eagerly()
if self._log_device_placement is not None:
config.log_device_placement = self._log_device_placement
is_mlir_bridge_enabled = pywrap_tfe.TF_IsMlirBridgeEnabled()
config.experimental.mlir_bridge_rollout = is_mlir_bridge_enabled
if (is_mlir_bridge_enabled ==
config_pb2.ConfigProto.Experimental.MLIR_BRIDGE_ROLLOUT_ENABLED):
config.experimental.enable_mlir_bridge = True
if self._enable_mlir_graph_optimization is not None:
config.experimental.enable_mlir_graph_optimization = (
self._enable_mlir_graph_optimization)
def rewriter_toggle(option):
toggle = self._optimizer_experimental_options.get(option, None)
if toggle is None:
return
setattr(config.graph_options.rewrite_options,
option,
(rewriter_config_pb2.RewriterConfig.ON
if toggle else rewriter_config_pb2.RewriterConfig.OFF))
def rewriter_bool(option):
toggle = self._optimizer_experimental_options.get(option, None)
if toggle is None:
return
setattr(config.graph_options.rewrite_options,
option,
toggle)
rewriter_toggle("layout_optimizer")
rewriter_toggle("constant_folding")
rewriter_toggle("shape_optimization")
rewriter_toggle("remapping")
rewriter_toggle("arithmetic_optimization")
rewriter_toggle("dependency_optimization")
rewriter_toggle("loop_optimization")
rewriter_toggle("function_optimization")
rewriter_toggle("debug_stripper")
rewriter_bool("disable_model_pruning")
rewriter_toggle("scoped_allocator_optimization")
rewriter_toggle("pin_to_host_optimization")
rewriter_toggle("implementation_selector")
rewriter_toggle("auto_mixed_precision")
rewriter_bool("disable_meta_optimizer")
nodes = self._optimizer_experimental_options.get("min_graph_nodes", None)
if nodes is not None:
config.graph_options.rewrite_options.min_graph_nodes = nodes
# Compute device counts
config.device_count["CPU"] = 0
config.device_count["GPU"] = 0
for dev in self._physical_devices:
if dev not in self._visible_device_list:
continue
virtual_devices = self._virtual_device_map.get(dev)
if virtual_devices is None:
config.device_count[dev.device_type] += 1
else:
config.device_count[dev.device_type] += len(virtual_devices)
# Configure gpu_options
gpu_options = self._compute_gpu_options()
config.gpu_options.MergeFrom(gpu_options)
# Configure collective ops
if self._collective_leader:
config.experimental.collective_group_leader = self._collective_leader
if self._collective_scoped_allocator_enabled_ops:
rewrite_options = config.graph_options.rewrite_options
rewrite_options.scoped_allocator_optimization = (
rewriter_config_pb2.RewriterConfig.ON)
del rewrite_options.scoped_allocator_opts.enable_op[:]
for op in self._collective_scoped_allocator_enabled_ops:
rewrite_options.scoped_allocator_opts.enable_op.append(op)
if self._collective_use_nccl_communication:
config.experimental.collective_nccl = True
if self._collective_device_filters:
del config.device_filters[:]
for f in self._collective_device_filters:
config.device_filters.append(f)
return config
def _compute_gpu_options(self):
"""Build the GPUOptions proto."""
visible_device_list = []
virtual_devices = []
gpu_index = -1
memory_growths = set()
for dev in self.list_physical_devices("GPU"):
gpu_index += 1
if dev not in self._visible_device_list:
continue
growth = self._memory_growth_map[dev]
memory_growths.add(growth)
visible_device_list.append(str(gpu_index))
if self._virtual_device_map:
vdevs = self._virtual_device_map.get(dev, [])
device_limits = []
priority = []
for virt_dev in vdevs:
device_limits.append(virt_dev.memory_limit)
if virt_dev.experimental_priority is not None:
priority.append(virt_dev.experimental_priority)
# If priority is specified, it must be specified for all virtual
# devices.
if priority and len(device_limits) != len(priority):
raise ValueError("priority must be specified for all virtual devices")
virtual_devices.append(
config_pb2.GPUOptions.Experimental.VirtualDevices(
memory_limit_mb=device_limits, priority=priority))
# Only compute growth if virtual devices have not been configured and we
# have GPUs
if not virtual_devices and memory_growths:
if len(memory_growths) > 1:
raise ValueError("Memory growth cannot differ between GPU devices")
allow_growth = memory_growths.pop()
else:
allow_growth = None
return config_pb2.GPUOptions(
allow_growth=allow_growth,
visible_device_list=",".join(visible_device_list),
experimental=config_pb2.GPUOptions.Experimental(
virtual_devices=virtual_devices))
@property
def function_call_options(self):
"""Returns function call options for current thread.
Note that the returned object is still referenced by the eager context.
Returns: the FunctionCallOptions for current thread.
"""
if self._thread_local_data.function_call_options is None:
config = self.config
# Default to soft placement for functions unless specified
if self._soft_device_placement is None:
config.allow_soft_placement = True
self._thread_local_data.function_call_options = FunctionCallOptions(
config_proto=config)
return self._thread_local_data.function_call_options
@function_call_options.setter
def function_call_options(self, options):
"""Returns function call options for current thread."""
self._thread_local_data.function_call_options = options
def num_gpus(self):
"""The number of GPUs available to execute operations."""
self.ensure_initialized()
return self._num_gpus
def add_function(self, fn):
"""Add a function definition to the context.
Once added, the function (identified by its name) can be executed like any
other operation.
Args:
fn: A wrapped TF_Function (returned from TF_GraphToFunction_wrapper).
"""
self.ensure_initialized()
pywrap_tfe.TFE_ContextAddFunction(self._handle, fn)
def add_function_def(self, fdef):
"""Add a function definition to the context.
Once added, the function (identified by its name) can be executed like any
other operation.
Args:
fdef: A FunctionDef protocol buffer message.
"""
self.ensure_initialized()
fdef_string = fdef.SerializeToString()
pywrap_tfe.TFE_ContextAddFunctionDef(self._handle, fdef_string,
len(fdef_string))
def get_function_def(self, name):
"""Get a function definition from the context.
Args:
name: function signature name.
Returns:
The requested FunctionDef.
Raises:
tf.errors.NotFoundError: if name is not the name of a registered function.
"""
with c_api_util.tf_buffer() as buffer_:
pywrap_tfe.TFE_ContextGetFunctionDef(self._handle, name, buffer_)
proto_data = pywrap_tf_session.TF_GetBuffer(buffer_)
function_def = function_pb2.FunctionDef()
function_def.ParseFromString(proto_data)
return function_def
def register_custom_device(self, device_capsule, device_name,
device_info_capsule):
"""Calls TFE_RegisterCustomDevice. See the non-member function."""
self.ensure_initialized()
pywrap_tfe.TFE_Py_RegisterCustomDevice(self._handle, device_capsule,
device_name, device_info_capsule)
def pack_eager_tensors(self, tensors):
"""Pack multiple `EagerTensor`s of the same dtype and shape.
Args:
tensors: a list of EagerTensors to pack.
Returns:
A packed EagerTensor.
"""
self.ensure_initialized()
if self._lazy_remote_inputs_copy is not None and (
not self._lazy_remote_inputs_copy):
raise ValueError("Packing eager tensors is not supported when "
"lazy_remote_inputs_copy is disabled.")
return pywrap_tfe.TFE_Py_PackEagerTensors(self._handle, tensors)
def remove_function(self, name):
"""Remove a function from the context.
Once removed, the function cannot be executed anymore.
Args:
name: function signature name.
"""
self.ensure_initialized()
pywrap_tfe.TFE_ContextRemoveFunction(self._handle, name)
def has_function(self, name):
"""Check if a function `name` is registered."""
self.ensure_initialized()
return bool(pywrap_tfe.TFE_ContextHasFunction(self._handle, name))
def add_op_callback(self, callback):
"""Add a post-op callback to the context.
A post-op callback is invoked immediately after an eager operation or
function has finished execution or after a op has been added to a graph,
providing access to the op's type, name input and output tensors. Multiple
op callbacks can be added, in which case the callbacks will be invoked in
the order in which they are added.
Args:
callback: a callable of the signature
`f(op_type, inputs, attrs, outputs, op_name=None, graph=None)`.
See doc strings in `op_callbacks.py` for details on the function
signature and its semantics.
"""
if callback not in self._thread_local_data.op_callbacks:
self._thread_local_data.op_callbacks.append(callback)
def remove_op_callback(self, callback):
"""Remove an already-registered op callback.
Args:
callback: The op callback to be removed.
Raises:
KeyError: If `callback` is not already registered.
"""
if callback not in self._thread_local_data.op_callbacks:
raise KeyError(
"The specified op callback has not been registered, "
"and hence cannot be removed.")
del self._thread_local_data.op_callbacks[
self._thread_local_data.op_callbacks.index(callback)]
@property
def op_callbacks(self):
return self._thread_local_data.op_callbacks
@property
def invoking_op_callbacks(self):
return self._thread_local_data.invoking_op_callbacks
@invoking_op_callbacks.setter
def invoking_op_callbacks(self, value):
self._thread_local_data.invoking_op_callbacks = value
def _initialize_physical_devices(self):
"""Get local devices visible to the system."""
# We lazy initialize self._physical_devices since we do not want to do this
# the constructor since the backend may not be initialized yet.
with self._device_lock:
if self._physical_devices is not None:
return
devs = pywrap_tfe.TF_ListPhysicalDevices()
self._physical_devices = [
PhysicalDevice(name=d.decode(),
device_type=d.decode().split(":")[1]) for d in devs]
self._physical_device_to_index = {
p: i for i, p in enumerate(self._physical_devices)
}
self._visible_device_list = list(self._physical_devices)
self._memory_growth_map = {
d: None for d in self._physical_devices if d.device_type == "GPU"
}
# Import device settings that may have been passed into the constructor
self._import_config()
def list_physical_devices(self, device_type=None):
"""List local devices visible to the system.
This API allows a client to query the devices before they have been
initialized by the eager runtime. Additionally a user can filter by device
type, to get only CPUs or GPUs.
Args:
device_type: Optional device type to limit results to
Returns:
List of PhysicalDevice objects.
"""
self._initialize_physical_devices()
if device_type is None:
return list(self._physical_devices)
return [d for d in self._physical_devices if d.device_type == device_type]
def get_device_details(self, device): # pylint: disable=redefined-outer-name
"""Returns details about a physical devices.
Args:
device: A `tf.config.PhysicalDevice` returned by
`tf.config.list_physical_devices` or `tf.config.get_visible_devices`.
Returns:
A dict with string keys.
"""
if not isinstance(device, PhysicalDevice):
raise ValueError("device must be a tf.config.PhysicalDevice, but got: "
"%s" % (device,))
if (self._physical_device_to_index is None or
device not in self._physical_device_to_index):
raise ValueError("The PhysicalDevice must be one obtained from "
"calling `tf.config.list_physical_devices`, but got: "
"%s" % (device,))
index = self._physical_device_to_index[device]
details = pywrap_tfe.TF_GetDeviceDetails(index)
# Change compute_capability from a string to a tuple
if "compute_capability" in details:
try:
major, minor = details["compute_capability"].split(".")
details["compute_capability"] = (int(major), int(minor))
except ValueError:
raise RuntimeError("Device returned compute capability an in invalid "
"format: %s" % details["compute_capability"])
return details
def _import_config(self):
"""Import config if passed in during construction.
If Context was created with a ConfigProto such as when calling
tf.compat.v1.enable_eager_execution(), then we need to pull out the
various pieces we might be replacing and import then into our internal
class representation.
"""
if self._config is None:
return
num_cpus = self._config.device_count.get("CPU", 1)
if num_cpus != 1:
cpus = [d for d in self._physical_devices if d.device_type == "CPU"]
if num_cpus == 0:
self.set_visible_devices([], "CPU")
elif num_cpus > 1:
self.set_logical_device_configuration(
cpus[0], [LogicalDeviceConfiguration() for _ in range(num_cpus)])
# Parse GPU options
gpus = [d for d in self._physical_devices if d.device_type == "GPU"]
# If there are no GPUs detected, simply ignore all the GPU options passed in
# rather than doing any validation checks.
if not gpus:
return
gpu_count = self._config.device_count.get("GPU", None)
visible_gpus = []
# TODO(gjn): Handle importing existing virtual GPU configuration
visible_indices = self._config.gpu_options.visible_device_list
if visible_indices:
for index in visible_indices.split(","):
if int(index) >= len(gpus):
raise ValueError("Invalid visible device index: %s" % index)
visible_gpus.append(gpus[int(index)])
else:
visible_gpus = gpus
if gpu_count is not None:
visible_gpus = visible_gpus[:gpu_count]
self.set_visible_devices(visible_gpus, "GPU")
def list_logical_devices(self, device_type=None):
"""Return logical devices."""
self.ensure_initialized()
if device_type is None:
return list(self._logical_devices)
return [d for d in self._logical_devices if d.device_type == device_type]
def get_visible_devices(self, device_type=None):
"""Get the list of visible devices."""
self._initialize_physical_devices()
if device_type is None:
return list(self._visible_device_list)
return [
d for d in self._visible_device_list if d.device_type == device_type
]
def set_visible_devices(self, devices, device_type=None):
"""Set the list of visible devices."""
self._initialize_physical_devices()
if not isinstance(devices, list):
devices = [devices]
for d in devices:
if d not in self._physical_devices:
raise ValueError("Unrecognized device: %s" % repr(d))
if device_type is not None and d.device_type != device_type:
raise ValueError("Unrecognized device: %s" % repr(d))
visible_device_list = []
if device_type is not None:
visible_device_list = [
d for d in self._visible_device_list if d.device_type != device_type
]
visible_device_list += devices
if self._visible_device_list == visible_device_list:
return
if self._context_handle is not None:
raise RuntimeError(
"Visible devices cannot be modified after being initialized")
self._visible_device_list = visible_device_list
def get_total_memory_usage(self, dev):
"""Returns total memory usage in bytes for the current device."""
self._initialize_physical_devices()
self.ensure_initialized()
return pywrap_tfe.TFE_GetTotalMemoryUsage(self._context_handle, dev)
def get_memory_growth(self, dev):
"""Get if memory growth is enabled for a PhysicalDevice."""
self._initialize_physical_devices()
if dev not in self._physical_devices:
raise ValueError("Unrecognized device: %s" % repr(dev))
return self._memory_growth_map[dev]
def set_memory_growth(self, dev, enable):
"""Set if memory growth should be enabled for a PhysicalDevice."""
self._initialize_physical_devices()
if dev not in self._physical_devices:
raise ValueError("Unrecognized device: %s" % repr(dev))
if dev in self._virtual_device_map:
raise ValueError(
"Cannot set memory growth on device when virtual devices configured")
if dev.device_type != "GPU":
raise ValueError("Cannot set memory growth on non-GPU devices")
if self._memory_growth_map.get(dev) == enable:
return
if self._context_handle is not None:
raise RuntimeError(
"Physical devices cannot be modified after being initialized")
self._memory_growth_map[dev] = enable
def get_logical_device_configuration(self, dev):
"""Get the virtual device configuration for a PhysicalDevice."""
self._initialize_physical_devices()
if dev not in self._physical_devices:
raise ValueError("Unrecognized device: %s" % repr(dev))
return self._virtual_device_map.get(dev)
def set_logical_device_configuration(self, dev, virtual_devices):
"""Set the virtual device configuration for a PhysicalDevice."""
self._initialize_physical_devices()
if dev not in self._physical_devices:
raise ValueError("Unrecognized device: %s" % repr(dev))
if dev.device_type == "CPU":
for vdev in virtual_devices:
if vdev.memory_limit is not None:
raise ValueError("Setting memory limit on CPU virtual devices is "
"currently not supported")
if vdev.experimental_priority is not None:
raise ValueError("Setting experimental_priority on CPU virtual "
" devices is currently not supported")
elif dev.device_type == "GPU":
for vdev in virtual_devices:
if vdev.memory_limit is None:
raise ValueError(
"Setting memory limit is required for GPU virtual devices")
else:
raise ValueError("Virtual devices are not supported for %s" %
dev.device_type)
if self._virtual_device_map.get(dev) == virtual_devices:
return
if self._context_handle is not None:
raise RuntimeError(
"Virtual devices cannot be modified after being initialized")
self._virtual_device_map[dev] = virtual_devices
def get_compiler_ir(self, device_name, function_name, args, stage="hlo"):
return pywrap_tfe.TF_GetCompilerIr(self._context_handle, function_name,
stage, device_name, args)
@deprecated(
None, "XLA:CPU and XLA:GPU devices are deprecated", warn_once=True)
def enable_xla_devices(self):
"""Enables XLA:CPU and XLA:GPU devices registration."""
pywrap_tfe.TF_EnableXlaDevices()
@property
def enable_mlir_bridge(self):
return pywrap_tfe.TF_IsMlirBridgeEnabled()
@property
def enable_mlir_graph_optimization(self):
return self._enable_mlir_graph_optimization
@enable_mlir_bridge.setter
def enable_mlir_bridge(self, enabled):
pywrap_tfe.TF_EnableMlirBridge(enabled)
self._thread_local_data.function_call_options = None
@enable_mlir_graph_optimization.setter
def enable_mlir_graph_optimization(self, enabled):
self._enable_mlir_graph_optimization = enabled
self._thread_local_data.function_call_options = None
@property
def optimizer_jit(self):
level = self.config.graph_options.optimizer_options.global_jit_level
return (level == config_pb2.OptimizerOptions.ON_1 or
level == config_pb2.OptimizerOptions.ON_2)
@optimizer_jit.setter
def optimizer_jit(self, enabled):
self._optimizer_jit = enabled
self._thread_local_data.function_call_options = None
def get_optimizer_experimental_options(self):
"""Get experimental options for the optimizer.
Returns:
Dictionary of current option values
"""
rewrite_options = self.config.graph_options.rewrite_options
options = {}
def rewriter_toggle(option):
attr = getattr(rewrite_options, option)
if attr != 0:
options[option] = (attr == rewriter_config_pb2.RewriterConfig.ON)
def rewriter_bool(option):
options[option] = getattr(rewrite_options, option)
rewriter_toggle("layout_optimizer")
rewriter_toggle("constant_folding")
rewriter_toggle("shape_optimization")
rewriter_toggle("remapping")
rewriter_toggle("arithmetic_optimization")
rewriter_toggle("dependency_optimization")
rewriter_toggle("loop_optimization")
rewriter_toggle("function_optimization")
rewriter_toggle("debug_stripper")
rewriter_bool("disable_model_pruning")
rewriter_toggle("scoped_allocator_optimization")
rewriter_toggle("pin_to_host_optimization")
rewriter_toggle("implementation_selector")
rewriter_toggle("auto_mixed_precision")
rewriter_bool("disable_meta_optimizer")
if rewrite_options.min_graph_nodes != 0:
options["min_graph_nodes"] = rewrite_options.min_graph_nodes
return options
def set_optimizer_experimental_options(self, options):
"""Set experimental options for the optimizer.
Args:
options: Dictionary of options to modify
"""
self._optimizer_experimental_options.update(options)
self._thread_local_data.function_call_options = None
@property
def intra_op_parallelism_threads(self):
return self.config.intra_op_parallelism_threads
@intra_op_parallelism_threads.setter
def intra_op_parallelism_threads(self, num_threads):
if self._intra_op_parallelism_threads == num_threads:
return
if self._context_handle is not None:
raise RuntimeError(
"Intra op parallelism cannot be modified after initialization.")
self._intra_op_parallelism_threads = num_threads
@property
def inter_op_parallelism_threads(self):
return self.config.inter_op_parallelism_threads
@inter_op_parallelism_threads.setter
def inter_op_parallelism_threads(self, num_threads):
if self._inter_op_parallelism_threads == num_threads:
return
if self._context_handle is not None:
raise RuntimeError(
"Inter op parallelism cannot be modified after initialization.")
self._inter_op_parallelism_threads = num_threads
@property
def soft_device_placement(self):
return self.config.allow_soft_placement
@soft_device_placement.setter
def soft_device_placement(self, enable):
if self._context_handle is not None:
pywrap_tfe.TFE_ContextSetSoftDevicePlacement(self._handle, enable)
self._soft_device_placement = enable
self._thread_local_data.function_call_options = None
@property
def log_device_placement(self):
return self.config.log_device_placement
@log_device_placement.setter
def log_device_placement(self, enable):
if self._context_handle is not None:
pywrap_tfe.TFE_ContextSetLogDevicePlacement(self._handle, enable)
self._log_device_placement = enable
self._thread_local_data.function_call_options = None
@property
def device_policy(self):
# Only get the policy from the context if it has already been initialized
if self._context_handle is not None:
return pywrap_tfe.TFE_ContextGetDevicePlacementPolicy(self._handle)
return self._device_policy
@device_policy.setter
def device_policy(self, policy):
if policy is None:
policy = DEVICE_PLACEMENT_SILENT
if self._device_policy != policy:
self._device_policy = policy
# Only set the policy if the context has already been initialized
if self._context_handle is not None:
pywrap_tfe.TFE_ContextSetThreadLocalDevicePlacementPolicy(
self._handle, self._device_policy)
@property
def lazy_remote_inputs_copy(self):
return self._lazy_remote_inputs_copy
@lazy_remote_inputs_copy.setter
def lazy_remote_inputs_copy(self, lazy_copy):
"""Sets whether to copy remote inputs lazily for functions."""
if not isinstance(lazy_copy, bool):
raise ValueError("Expecting a boolean but got %s" % type(lazy_copy))
if self._lazy_remote_inputs_copy != lazy_copy:
if self._initialized:
raise ValueError(
"lazy_remote_inputs_copy should be set before being initialized.")
self._lazy_remote_inputs_copy = lazy_copy
@property
def use_tfrt(self):
return self._use_tfrt
@use_tfrt.setter
def use_tfrt(self, tfrt):
"""Sets whether to use TFRT."""
if not isinstance(tfrt, bool):
raise ValueError("Expecting a boolean but got %s" % type(tfrt))
if self._use_tfrt != tfrt:
if self._initialized:
raise ValueError("use_tfrt should be set before being initialized.")
self._use_tfrt = tfrt
def enable_run_metadata(self):
"""Enables tracing of op execution via RunMetadata.
To retrieve the accumulated metadata call context.export_run_metadata()
and to stop tracing call context.disable_run_metadata().
"""
self.ensure_initialized()
pywrap_tfe.TFE_ContextEnableRunMetadata(self._handle)
def disable_run_metadata(self):
"""Disables tracing of op execution via RunMetadata."""
if not self._context_handle:
return
pywrap_tfe.TFE_ContextDisableRunMetadata(self._context_handle)
def enable_graph_collection(self):
"""Enables graph collection of executed functions.
To retrieve the accumulated graphs call context.export_run_metadata()
and to stop collecting graphs call context.disable_graph_collection().
"""
self.ensure_initialized()
pywrap_tfe.TFE_ContextEnableGraphCollection(self._handle)
def disable_graph_collection(self):
"""Disables graph collection of executed functions."""
if not self._context_handle:
return
pywrap_tfe.TFE_ContextDisableGraphCollection(self._context_handle)
def export_run_metadata(self):
"""Returns a RunMetadata proto with accumulated information.
The returned protocol buffer contains information since the most recent call
to either enable_run_metadata or export_run_metadata.
Returns:
A RunMetadata protocol buffer. Or None if not enabled.
"""
if not self._context_handle:
return None
with c_api_util.tf_buffer() as buffer_:
pywrap_tfe.TFE_ContextExportRunMetadata(self._context_handle, buffer_)
proto_data = pywrap_tf_session.TF_GetBuffer(buffer_)
run_metadata = config_pb2.RunMetadata()
run_metadata.ParseFromString(compat.as_bytes(proto_data))
return run_metadata
@property
def context_switches(self):
"""Returns a stack of context switches."""
return self._context_switches
class _EagerDeviceContext(object):
"""Context-manager forcing placement of ops and Tensors on a device."""
__slots__ = ["_device_name", "_ctx", "_stack"]
def __init__(self, ctx, device_name):
self._device_name = device_name
self._ctx = ctx
self._stack = []
def __enter__(self):
ctx = self._ctx
old_device_name = ctx.device_name
old_device_spec = ctx.device_spec
new_device_name = self._device_name
cache_key = (old_device_name, new_device_name)
try:
new_device_name, new_device_spec = _device_parsing_cache[cache_key]
except TypeError:
# Error while trying to compute the cache key.
raise ValueError("Expecting a string device name. Got %s(%s)" %
(type(new_device_name), new_device_name))
except KeyError:
# Handle a cache miss.
if new_device_name is not None:
if not isinstance(new_device_name, six.string_types):
raise ValueError("Expecting a string device name. Got %s(%s)" %
(type(new_device_name), new_device_name))
device_spec = pydev.DeviceSpec.from_string(new_device_name)
if old_device_name:
new_device_spec = copy.copy(old_device_spec)
else:
ctx.ensure_initialized()
new_device_spec = pydev.DeviceSpec.from_string(
ctx._context_devices[0]) # pylint: disable=protected-access
new_device_spec = new_device_spec.make_merged_spec(device_spec)
else:
new_device_spec = pydev.DeviceSpec.from_string("")
new_device_name = new_device_spec.to_string()
_device_parsing_cache[cache_key] = (new_device_name, new_device_spec)
ctx._set_device(new_device_name, new_device_spec) # pylint: disable=protected-access
self._stack.append((old_device_name, old_device_spec, new_device_spec))
def __exit__(self, *ex_info):
ctx = self._ctx
old_device_name, old_device_spec, new_device_spec = self._stack[-1]
if ctx.device_spec is not new_device_spec:
raise RuntimeError(
"Exiting device scope without proper scope nesting")
del self._stack[-1]
ctx._set_device(old_device_name, old_device_spec) # pylint: disable=protected-access
# Do not set directly. Use _set_context.
_context = None
_context_lock = threading.Lock()
def _set_context_locked(ctx):
global _context
pywrap_tfe.TFE_Py_SetEagerContext(ctx)
_context = ctx
def _set_context(ctx):
with _context_lock:
_set_context_locked(ctx)
def _create_context():
with _context_lock:
if _context is None:
ctx = Context()
_set_context_locked(ctx)
def _reset_context():
"""Clears and re-initializes the singleton context.
Should only be used for testing.
"""
global _context
global _device_parsing_cache
with _context_lock:
if _context is not None:
_context._clear_caches()
_context = None
_create_context()
_device_parsing_cache = {}
pywrap_tfe.TFE_ClearScalarCache()
def context():
"""Returns a singleton context object."""
if _context is None:
_create_context()
return _context
def context_safe():
"""Returns current context (or None if one hasn't been initialized)."""
return _context
def ensure_initialized():
"""Initialize the context."""
context().ensure_initialized()
def set_global_seed(seed):
"""Sets the eager mode seed."""
context()._set_global_seed(seed) # pylint: disable=protected-access
def global_seed():
"""Returns the eager mode seed."""
return context()._seed # pylint: disable=protected-access
def internal_operation_seed():
"""Returns the operation seed generated based on global seed."""
return context()._internal_operation_seed() # pylint: disable=protected-access
@tf_export("executing_eagerly", v1=[])
def executing_eagerly():
"""Checks whether the current thread has eager execution enabled.
Eager execution is enabled by default and this API returns `True`
in most of cases. However, this API might return `False` in the following use
cases.
* Executing inside `tf.function`, unless under `tf.init_scope` or
`tf.config.run_functions_eagerly(True)` is previously called.
* Executing inside a transformation function for `tf.dataset`.
* `tf.compat.v1.disable_eager_execution()` is called.
General case:
>>> print(tf.executing_eagerly())
True
Inside `tf.function`:
>>> @tf.function
... def fn():
... with tf.init_scope():
... print(tf.executing_eagerly())
... print(tf.executing_eagerly())
>>> fn()
True
False
Inside `tf.function` after `tf.config.run_functions_eagerly(True)` is called:
>>> tf.config.run_functions_eagerly(True)
>>> @tf.function
... def fn():
... with tf.init_scope():
... print(tf.executing_eagerly())
... print(tf.executing_eagerly())
>>> fn()
True
True
>>> tf.config.run_functions_eagerly(False)
Inside a transformation function for `tf.dataset`:
>>> def data_fn(x):
... print(tf.executing_eagerly())
... return x
>>> dataset = tf.data.Dataset.range(100)
>>> dataset = dataset.map(data_fn)
False
Returns:
`True` if the current thread has eager execution enabled.
"""
ctx = context_safe()
if ctx is None:
return default_execution_mode == EAGER_MODE
return ctx.executing_eagerly()
@tf_export(v1=["executing_eagerly"])
def executing_eagerly_v1():
"""Checks whether the current thread has eager execution enabled.
Eager execution is typically enabled via
`tf.compat.v1.enable_eager_execution`, but may also be enabled within the
context of a Python function via tf.contrib.eager.py_func.
When eager execution is enabled, returns `True` in most cases. However,
this API might return `False` in the following use cases.
* Executing inside `tf.function`, unless under `tf.init_scope` or
`tf.config.run_functions_eagerly(True)` is previously called.
* Executing inside a transformation function for `tf.dataset`.
* `tf.compat.v1.disable_eager_execution()` is called.
>>> tf.compat.v1.enable_eager_execution()
General case:
>>> print(tf.executing_eagerly())
True
Inside `tf.function`:
>>> @tf.function
... def fn():
... with tf.init_scope():
... print(tf.executing_eagerly())
... print(tf.executing_eagerly())
>>> fn()
True
False
Inside `tf.function`
after `tf.config.run_functions_eagerly(True)` is called:
>>> tf.config.run_functions_eagerly(True)
>>> @tf.function
... def fn():
... with tf.init_scope():
... print(tf.executing_eagerly())
... print(tf.executing_eagerly())
>>> fn()
True
True
>>> tf.config.run_functions_eagerly(False)
Inside a transformation function for `tf.dataset`:
>>> def data_fn(x):
... print(tf.executing_eagerly())
... return x
>>> dataset = tf.data.Dataset.range(100)
>>> dataset = dataset.map(data_fn)
False
Returns:
`True` if the current thread has eager execution enabled.
"""
return executing_eagerly()
def in_eager_mode():
"""Use executing_eagerly() instead. This function will be removed."""
return executing_eagerly()
def shared_name(name=None):
"""Returns the anonymous shared name GUID if no shared name is specified.
In eager mode we need to use a unique shared name to avoid spurious sharing
issues. The runtime generates a unique name on our behalf when the reserved
GUID is used as a shared name.
Args:
name: Optional shared name
Returns:
Eager compatible shared name.
"""
if name or not executing_eagerly():
return name
# Ensure a unique name when eager execution is enabled to avoid spurious
# sharing issues.
return "cd2c89b7-88b7-44c8-ad83-06c2a9158347"
def graph_mode():
"""Context-manager to disable eager execution for the current thread."""
return context()._mode(GRAPH_MODE) # pylint: disable=protected-access
# Used by b/167638505 for keras backend API and Lambda layer.
@tf_export("__internal__.eager_context.eager_mode", v1=[])
def eager_mode():
"""Context-manager to enable eager execution for the current thread."""
return context()._mode(EAGER_MODE) # pylint: disable=protected-access
def scope_name():
"""Name of the current scope."""
return context().scope_name
def device(name):
"""Context-manager to force placement of operations and Tensors on a device.
Example:
```python
with tf.device('gpu:0'):
with tf.device('cpu:0'):
shape = tf.constant([], dtype=tf.int32)
x = tf.random.truncated_normal(shape, tf.float32)
```
will ensure that the `shape` Tensor is on CPU but the `truncated_normal`
operation runs on GPU 0.
Args:
name: Name of the device (see context().devices()), or None to
perform automatic placement.
Returns:
Context manager for setting the device.
"""
ensure_initialized()
return context().device(name)
# Expose some properties of Context as internally public APIs (b/160348781).
@tf_export("__internal__.eager_context.get_config", v1=[])
def get_config():
"""Get the ConfigProto of Context.
Returns:
The ConfigProto of Context.
"""
return context().config
@tf_export("__internal__.eager_context.get_device_name", v1=[])
def get_device_name():
"""Get the device name for the current thread.
Returns:
The device name for the current thread.
"""
return context().device_name
@tf_export("__internal__.eager_context.set_soft_device_placement", v1=[])
def set_soft_device_placement(enabled):
"""Set if soft device placements should be allowed.
Args:
enabled: Whether to enable soft device placement.
"""
context().soft_device_placement = enabled
@tf_export("__internal__.eager_context.get_executor", v1=[])
def get_executor():
"""Get the Executor of the current thread.
Returns:
The Executor of the current thread.
"""
return context().executor
@tf_export("debugging.get_log_device_placement")
def get_log_device_placement():
"""Get if device placements are logged.
Returns:
If device placements are logged.
"""
return context().log_device_placement
@tf_export("debugging.set_log_device_placement")
def set_log_device_placement(enabled):
"""Set if device placements should be logged.
Args:
enabled: Whether to enabled device placement logging.
"""
context().log_device_placement = enabled
@tf_contextlib.contextmanager
def device_policy(policy):
"""Context manager for setting device placement policy for current thread."""
ctx = context()
old_policy = ctx.device_policy
try:
ctx.device_policy = policy
yield
finally:
ctx.device_policy = old_policy
def set_execution_mode(mode):
"""Sets execution mode for the current thread."""
context().execution_mode = mode
# TODO(fishx): remove this method.
@tf_contextlib.contextmanager
def execution_mode(mode):
"""Context manager for setting execution mode for current thread."""
if mode is None:
yield
else:
ctx = context()
executor_new = executor.new_executor(mode == ASYNC)
executor_old = ctx.executor
try:
executor_old.wait()
ctx.executor = executor_new
yield
finally:
ctx.executor = executor_old
executor_new.wait()
@tf_contextlib.contextmanager
def executor_scope(e):
"""Context manager for changing executor for current thread.
Args:
e: A Executor to execute eager ops under this scope. Setting it to None will
switch back to use the default executor for the context.
Yields:
Context manager for setting the executor for current thread.
"""
ctx = context()
executor_old = ctx.executor
try:
ctx.executor = e
yield
finally:
ctx.executor = executor_old
@tf_export("experimental.function_executor_type")
@tf_contextlib.contextmanager
def function_executor_type(executor_type):
"""Context manager for setting the executor of eager defined functions.
Eager defined functions are functions decorated by tf.contrib.eager.defun.
Args:
executor_type: a string for the name of the executor to be used to execute
functions defined by tf.contrib.eager.defun.
Yields:
Context manager for setting the executor of eager defined functions.
"""
current_options = context().function_call_options
old_options = copy.copy(current_options)
try:
current_options.executor_type = executor_type
yield
finally:
context().function_call_options = old_options
def is_async():
"""Returns true if current thread is in async mode."""
return context().is_async()
def num_gpus():
"""Get the number of available GPU devices.
Returns:
The number of available GPU devices.
"""
return context().num_gpus()
def enable_run_metadata():
"""Enables tracing of op execution via RunMetadata.
To retrieve the accumulated metadata call context.export_run_metadata()
and to stop tracing call context.disable_run_metadata().
"""
context().enable_run_metadata()
def disable_run_metadata():
"""Disables tracing of op execution via RunMetadata."""
context().disable_run_metadata()
def enable_graph_collection():
"""Enables graph collection of executed functions.
To retrieve the accumulated graphs call context.export_run_metadata()
and to stop collecting graphs call context.disable_graph_collection().
"""
context().enable_graph_collection()
def disable_graph_collection():
"""Disables graph collection of executed functions."""
context().disable_graph_collection()
def export_run_metadata():
"""Returns a RunMetadata proto with accumulated information.
The returned protocol buffer contains information since the most recent call
to either enable_run_metadata or export_run_metadata.
Returns:
A RunMetadata protocol buffer.
"""
return context().export_run_metadata()
@contextlib.contextmanager
def collect_graphs(optimized=True):
"""Collects a flat list of pre- or post-optimization graphs.
The collected graphs include device placements, which can be useful for
testing.
Usage:
```
@def_function.function
def f(x):
return x + constant_op.constant(1.)
with context.collect_graphs() as graphs:
with ops.device("CPU:0"):
f(constant_op.constant(1.))
graph, = graphs # `graph` contains a single GraphDef for inspection
```
Args:
optimized: whether to collect optimized graphs or non-optimized graphs
Yields:
A list of GraphDefs, populated when the context manager exits.
"""
ctx = context()
ctx.enable_graph_collection()
try:
graphs = []
yield graphs
metadata = ctx.export_run_metadata()
finally:
ctx.disable_graph_collection()
for graph in metadata.function_graphs:
if optimized:
graphs.append(graph.post_optimization_graph)
else:
graphs.append(graph.pre_optimization_graph)
def get_server_def():
return context().get_server_def()
def set_server_def(server_def):
context().set_server_def(server_def)
def update_server_def(server_def):
context().update_server_def(server_def)
def check_alive(worker_name):
return context().check_alive(worker_name)
@tf_export("experimental.async_scope")
@tf_contextlib.contextmanager
def async_scope():
"""Context manager for grouping async operations.
Ops/function calls inside the scope can return before finishing the actual
execution. When exiting the async scope, a synchronization barrier will be
automatically added to ensure the completion of all async op and function
execution, potentially raising exceptions if async execution results in
an error state.
Users may write the following code to asynchronously invoke `train_step_fn`
and log the `loss` metric for every `num_steps` steps in a training loop.
`train_step_fn` internally consumes data using `iterator.get_next()`, and may
throw OutOfRangeError when running out of data. In the case:
```
try:
with tf.experimental.async_scope():
for _ in range(num_steps):
# Step function updates the metric `loss` internally
train_step_fn()
except tf.errors.OutOfRangeError:
tf.experimental.async_clear_error()
logging.info('loss = %s', loss.numpy())
```
Yields:
Context manager for grouping async operations.
"""
# TODO(haoyuzhang): replace env var once we have a config method to turn on
# and off async streaming RPC
remote_async_env_var = "TF_ENABLE_EAGER_CLIENT_STREAMING_ENQUEUE"
old_policy = os.environ.get(remote_async_env_var)
try:
os.environ[remote_async_env_var] = str(True)
yield
# Note: sync local and remote executors iff the async block does not raise
# an exception. Triggering sync after an exception may lead to derived
# runtime errors and unexpected exception types.
context().sync_executors()
finally:
if old_policy is None:
del os.environ[remote_async_env_var]
else:
os.environ[remote_async_env_var] = old_policy
def async_wait():
"""Sync all async operations and raise any errors during execution.
In async execution mode, an op/function call can return before finishing the
actual execution. Calling this method creates a synchronization barrier for
all async op and function execution. It only returns when all pending nodes
are finished, potentially raising exceptions if async execution results in
an error state.
"""
context().sync_executors()
@tf_export("experimental.async_clear_error")
def async_clear_error():
"""Clear pending operations and error statuses in async execution.
In async execution mode, an error in op/function execution can lead to errors
in subsequent ops/functions that are scheduled but not yet executed. Calling
this method clears all pending operations and reset the async execution state.
Example:
```
while True:
try:
# Step function updates the metric `loss` internally
train_step_fn()
except tf.errors.OutOfRangeError:
tf.experimental.async_clear_error()
break
logging.info('loss = %s', loss.numpy())
```
"""
context().clear_executor_errors()
def add_function(fdef):
"""Add a function definition to the context."""
context().add_function(fdef)
def remove_function(name):
"""Remove a function from the context."""
context().remove_function(name)
def get_function_def(name):
return context().get_function_def(name)
def register_custom_device(device_capsule, device_name, device_info_capsule):
"""Calls TFE_RegisterCustomDevice to register a custom device with Python.
Enables using C extensions specifying a custom device from Python. See the
experimental eager C API in tensorflow/c/eager/c_api_experimental.h for
details.
Note that custom devices are not currently supported inside `tf.function`s.
Args:
device_capsule: A PyCapsule with the name set to 'TFE_CustomDevice'
containing a pointer to a TFE_CustomDevice struct. The capsule retains
ownership of the memory.
device_name: A string indicating the name to register the custom device
under, e.g. '/job:localhost/replica:0/task:0/device:CUSTOM:0'. It may
subsequently be passed to `with tf.device(...):`.
device_info_capsule: A PyCapsule with the name set to
'TFE_CustomDevice_DeviceInfo' containing a pointer to a device-specific
struct with the initial state of the custom device (the void* device_info
argument to TFE_RegisterCustomDevice). This method takes ownership of the
memory and clears the capsule destructor.
"""
context().register_custom_device(device_capsule, device_name,
device_info_capsule)
# Not every user creates a Context via context.context()
# (for example, enable_eager_execution in python/framework/ops.py),
# but they do all import this file. Note that IS_IN_GRAPH_MODE and
# in_graph_mode are both parameterless functions.
def _tmp_in_graph_mode():
if context_safe() is None:
# Context not yet initialized. Assume graph mode following the
# default implementation in `is_in_graph_mode`.
return True
return not executing_eagerly()
is_in_graph_mode.IS_IN_GRAPH_MODE = _tmp_in_graph_mode
|
{
"content_hash": "312579646406a764c11a301831b2d25b",
"timestamp": "",
"source": "github",
"line_count": 2435,
"max_line_length": 89,
"avg_line_length": 33.91129363449692,
"alnum_prop": 0.6876498655751205,
"repo_name": "freedomtan/tensorflow",
"id": "046a09f96382bf0f147ed561d116e5169edf5b0c",
"size": "83263",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/eager/context.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "32479"
},
{
"name": "Batchfile",
"bytes": "38366"
},
{
"name": "C",
"bytes": "1035837"
},
{
"name": "C#",
"bytes": "13395"
},
{
"name": "C++",
"bytes": "99324075"
},
{
"name": "CMake",
"bytes": "107781"
},
{
"name": "Dockerfile",
"bytes": "283435"
},
{
"name": "Go",
"bytes": "2013128"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "928595"
},
{
"name": "Jupyter Notebook",
"bytes": "981916"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "4489624"
},
{
"name": "Makefile",
"bytes": "97500"
},
{
"name": "NASL",
"bytes": "8048"
},
{
"name": "Objective-C",
"bytes": "141623"
},
{
"name": "Objective-C++",
"bytes": "360423"
},
{
"name": "PHP",
"bytes": "20570"
},
{
"name": "Pawn",
"bytes": "32277"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "42762396"
},
{
"name": "RobotFramework",
"bytes": "2661"
},
{
"name": "Roff",
"bytes": "2515"
},
{
"name": "Ruby",
"bytes": "6723"
},
{
"name": "Shell",
"bytes": "647623"
},
{
"name": "Smarty",
"bytes": "52687"
},
{
"name": "Starlark",
"bytes": "4632847"
},
{
"name": "Swift",
"bytes": "56924"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
}
|
"""URLs for the ``calendarium`` app."""
from django.conf.urls import url
from . import views
urlpatterns = [
# event views
url(r'^event/create/$',
views.EventCreateView.as_view(),
name='calendar_event_create'),
url(r'^event/(?P<pk>\d+)/$',
views.EventDetailView.as_view(),
name='calendar_event_detail'),
url(r'^event/(?P<pk>\d+)/update/$',
views.EventUpdateView.as_view(),
name='calendar_event_update'),
url(r'^event/(?P<pk>\d+)/delete/$',
views.EventDeleteView.as_view(),
name='calendar_event_delete'),
# occurrence views
url(r'^event/(?P<pk>\d+)/date/(?P<year>\d+)/(?P<month>\d+)/(?P<day>\d+)/$',
views.OccurrenceDetailView.as_view(),
name='calendar_occurrence_detail'),
url(
r'^event/(?P<pk>\d+)/date/(?P<year>\d+)/(?P<month>\d+)/(?P<day>\d+)/update/$', # NOPEP8
views.OccurrenceUpdateView.as_view(),
name='calendar_occurrence_update'),
url(
r'^event/(?P<pk>\d+)/date/(?P<year>\d+)/(?P<month>\d+)/(?P<day>\d+)/delete/$', # NOPEP8
views.OccurrenceDeleteView.as_view(),
name='calendar_occurrence_delete'),
# calendar views
url(r'^(?P<year>\d+)/(?P<month>\d+)/$',
views.MonthView.as_view(),
name='calendar_month'),
url(r'^(?P<year>\d+)/week/(?P<week>\d+)/$',
views.WeekView.as_view(),
name='calendar_week'),
url(r'^(?P<year>\d+)/(?P<month>\d+)/(?P<day>\d+)/$',
views.DayView.as_view(),
name='calendar_day'),
url(r'^get-events/$',
views.UpcomingEventsAjaxView.as_view(),
name='calendar_upcoming_events'),
url(r'^$',
views.CalendariumRedirectView.as_view(),
name='calendar_current_month'),
]
|
{
"content_hash": "5591f0754486a8867b63adb3e21e59a5",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 96,
"avg_line_length": 29.098360655737704,
"alnum_prop": 0.5526760563380282,
"repo_name": "jimga150/HealthNet",
"id": "075dfe00884dffcec2359cec7b75eb10951a5b68",
"size": "1775",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "HealthNet/calendarium/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "764"
},
{
"name": "CSS",
"bytes": "180096"
},
{
"name": "HTML",
"bytes": "794145"
},
{
"name": "JavaScript",
"bytes": "189857"
},
{
"name": "Python",
"bytes": "296717"
},
{
"name": "Shell",
"bytes": "1428"
}
],
"symlink_target": ""
}
|
import pickle
import greentest
from gevent.ares import ares_host_result
class TestPickle(greentest.TestCase):
# Issue 104: ares.ares_host_result unpickleable
def _test(self, protocol):
r = ares_host_result('family', ('arg1', 'arg2', ))
dumped = pickle.dumps(r, protocol)
loaded = pickle.loads(dumped)
assert r == loaded, (r, loaded)
assert r.family == loaded.family, (r, loaded)
def test0(self):
return self._test(0)
def test1(self):
return self._test(1)
def test2(self):
return self._test(2)
if pickle.HIGHEST_PROTOCOL == 3:
def test3(self):
return self._test(3)
else:
assert pickle.HIGHEST_PROTOCOL == 2, pickle.HIGHEST_PROTOCOL
if __name__ == '__main__':
greentest.main()
|
{
"content_hash": "49b1b507e80d4eb5e800d075f6605253",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 68,
"avg_line_length": 24.484848484848484,
"alnum_prop": 0.6039603960396039,
"repo_name": "ubuntuvim/GoAgent",
"id": "1881dd0e6574bc142c837c27bfc9d9ae67a9bca6",
"size": "808",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "local/gevent-1.0rc2/greentest/test__ares_host_result.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "1180"
},
{
"name": "Batchfile",
"bytes": "3932"
},
{
"name": "C",
"bytes": "2826955"
},
{
"name": "CSS",
"bytes": "1026"
},
{
"name": "HTML",
"bytes": "1489"
},
{
"name": "JavaScript",
"bytes": "265957"
},
{
"name": "Makefile",
"bytes": "29339"
},
{
"name": "PHP",
"bytes": "5225"
},
{
"name": "Python",
"bytes": "2371042"
},
{
"name": "Shell",
"bytes": "663032"
},
{
"name": "Visual Basic",
"bytes": "3152"
}
],
"symlink_target": ""
}
|
"""This example adds various types of targeting criteria to a given campaign.
To get campaigns, run get_campaigns.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
from googleads import adwords
CAMPAIGN_ID = 'INSERT_CAMPAIGN_ID_HERE'
# Replace the value below with the ID of a feed that has been configured for
# location targeting, meaning it has an ENABLED FeedMapping with criterionType
# of 77. Feeds linked to a GMB account automatically have this FeedMapping.
# If you don't have such a feed, set this value to None.
LOCATION_FEED_ID = 'INSERT_LOCATION_FEED_ID_HERE'
def main(client, campaign_id, location_feed_id=None):
# Initialize appropriate service.
campaign_criterion_service = client.GetService(
'CampaignCriterionService', version='v201506')
# Create locations. The IDs can be found in the documentation or retrieved
# with the LocationCriterionService.
california = {
'xsi_type': 'Location',
'id': '21137'
}
mexico = {
'xsi_type': 'Location',
'id': '2484'
}
# Create languages. The IDs can be found in the documentation or retrieved
# with the ConstantDataService.
english = {
'xsi_type': 'Language',
'id': '1000'
}
spanish = {
'xsi_type': 'Language',
'id': '1003'
}
# Create location groups. The IDs can be found in the documentation or
# retrieved with the LocationCriterionService.
florida_tier3 = {
'xsi_type': 'LocationGroups',
'matchingFunction': {
'operator': 'AND',
'lhsOperand': [{
'xsi_type': 'IncomeOperand',
# Tiers are numbered 1-10, and represent 10% segments of earners.
# For example, TIER_1 is the top 10%, TIER_2 is the 80-90%, etc.
# Tiers 6 through 10 are grouped into TIER_6_TO_10.
'tier': 'TIER_3'
}],
'rhsOperand': [{
'xsi_type': 'GeoTargetOperand',
'locations': [1015116] # Miami, FL
}]
},
}
florida_downtown = {
'xsi_type': 'LocationGroups',
'matchingFunction': {
'operator': 'AND',
'lhsOperand': [{
'xsi_type': 'PlacesOfInterestOperand',
# Other valid options: AIRPORT, UNIVERSITY.
'category': 'DOWNTOWN',
}],
'rhsOperand': [{
'xsi_type': 'GeoTargetOperand',
'locations': [1015116] # Miami, FL
}]
}
}
# Create a negative campaign criterion operation.
negative_campaign_criterion_operand = {
'xsi_type': 'NegativeCampaignCriterion',
'campaignId': campaign_id,
'criterion': {
'xsi_type': 'Keyword',
'matchType': 'BROAD',
'text': 'jupiter cruise'
}
}
criteria = [california, mexico, english, spanish, florida_tier3,
florida_downtown]
if location_feed_id:
# Distance targeting. Area of 10 miles around targets above.
criteria.append({
'xsi_type': 'LocationGroups',
'feedId': location_feed_id,
'matchingFunction': {
'operator': 'IDENTITY',
'lhsOperand': [{
'xsi_type': 'LocationExtensionOperand',
'radius': {
'xsi_type': 'ConstantOperand',
'type': 'DOUBLE',
'unit': 'MILES',
'doubleValue': 10
}
}]
}
})
# Create operations
operations = []
for criterion in criteria:
operations.append({
'operator': 'ADD',
'operand': {
'campaignId': campaign_id,
'criterion': criterion
}
})
# Add the negative campaign criterion.
operations.append({
'operator': 'ADD',
'operand': negative_campaign_criterion_operand
})
# Make the mutate request.
result = campaign_criterion_service.mutate(operations)
# Display the resulting campaign criteria.
for campaign_criterion in result['value']:
print ('Campaign criterion with campaign id \'%s\', criterion id \'%s\', '
'and type \'%s\' was added.'
% (campaign_criterion['campaignId'],
campaign_criterion['criterion']['id'],
campaign_criterion['criterion']['type']))
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client, CAMPAIGN_ID, LOCATION_FEED_ID)
|
{
"content_hash": "bff03aef38bb340f01490ed36c477f4c",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 79,
"avg_line_length": 31.815068493150687,
"alnum_prop": 0.5939720129171152,
"repo_name": "richardfergie/googleads-python-lib",
"id": "16db54a21c3c96a5bd7745e4740392b57373b00f",
"size": "5263",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "examples/adwords/v201506/targeting/add_campaign_targeting_criteria.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "168602"
}
],
"symlink_target": ""
}
|
import datetime
from flask import Blueprint
from flask.ext.login import LoginManager
from flask import Flask
# from compass.api.v1.api import v1_app
from compass.utils import setting_wrapper as setting
from compass.utils import util
app = Flask(__name__)
app.debug = True
# blueprint = Blueprint('v2_app', __name__)
# app.register_blueprint(v1_app, url_prefix='/v1.0')
# app.register_blueprint(blueprint, url_prefix='/api')
app.config['SECRET_KEY'] = 'abcd'
app.config['AUTH_HEADER_NAME'] = setting.USER_AUTH_HEADER_NAME
app.config['REMEMBER_COOKIE_DURATION'] = (
datetime.timedelta(
seconds=util.parse_time_interval(setting.USER_TOKEN_DURATION)
)
)
login_manager = LoginManager()
login_manager.login_view = 'login'
login_manager.init_app(app)
|
{
"content_hash": "f1a9ef9e89ffb6c5f862944ecd103c36",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 69,
"avg_line_length": 27.357142857142858,
"alnum_prop": 0.737597911227154,
"repo_name": "baigk/compass-core",
"id": "784fe231cbe3cdc4d3bef4d7dec0e10509c2b7d9",
"size": "1354",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "compass/api/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "1489"
},
{
"name": "Python",
"bytes": "1354192"
},
{
"name": "RAML",
"bytes": "111925"
},
{
"name": "Ruby",
"bytes": "4345"
},
{
"name": "Shell",
"bytes": "124261"
}
],
"symlink_target": ""
}
|
"""
WSGI config for depotexample project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "depotexample.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Configure a "default" storage based on DEPOT settings
from django.conf import settings
from depot.manager import DepotManager
DepotManager.configure('default', settings.DEPOT, prefix='')
# Wrap the application with depot middleware to serve files on /depot
application = DepotManager.make_middleware(application)
|
{
"content_hash": "da6018d4979eddbd03a17cc2faa2428e",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 78,
"avg_line_length": 32.54545454545455,
"alnum_prop": 0.7946927374301676,
"repo_name": "amol-/depot",
"id": "68d77d03bb2ac994f6af71bc2511461ddbef1f67",
"size": "716",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/django/depotexample/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "161671"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: tower_job_list
author: "Wayne Witzel III (@wwitzel3)"
short_description: List Ansible Tower jobs.
description:
- List Ansible Tower jobs. See
U(https://www.ansible.com/tower) for an overview.
options:
status:
description:
- Only list jobs with this status.
choices: ['pending', 'waiting', 'running', 'error', 'failed', 'canceled', 'successful']
type: str
page:
description:
- Page number of the results to fetch.
type: int
all_pages:
description:
- Fetch all the pages and return a single result.
type: bool
default: 'no'
query:
description:
- Query used to further filter the list of jobs. C({"foo":"bar"}) will be passed at C(?foo=bar)
type: dict
extends_documentation_fragment: awx.awx.auth
'''
EXAMPLES = '''
- name: List running jobs for the testing.yml playbook
tower_job_list:
status: running
query: {"playbook": "testing.yml"}
tower_config_file: "~/tower_cli.cfg"
register: testing_jobs
'''
RETURN = '''
count:
description: Total count of objects return
returned: success
type: int
sample: 51
next:
description: next page available for the listing
returned: success
type: int
sample: 3
previous:
description: previous page available for the listing
returned: success
type: int
sample: 1
results:
description: a list of job objects represented as dictionaries
returned: success
type: list
sample: [{"allow_simultaneous": false, "artifacts": {}, "ask_credential_on_launch": false,
"ask_inventory_on_launch": false, "ask_job_type_on_launch": false, "failed": false,
"finished": "2017-02-22T15:09:05.633942Z", "force_handlers": false, "forks": 0, "id": 2,
"inventory": 1, "job_explanation": "", "job_tags": "", "job_template": 5, "job_type": "run"}, ...]
'''
from ..module_utils.tower_api import TowerAPIModule
def main():
# Any additional arguments that are not fields of the item can be added here
argument_spec = dict(
status=dict(choices=['pending', 'waiting', 'running', 'error', 'failed', 'canceled', 'successful']),
page=dict(type='int'),
all_pages=dict(type='bool', default=False),
query=dict(type='dict'),
)
# Create a module for ourselves
module = TowerAPIModule(
argument_spec=argument_spec,
mutually_exclusive=[
('page', 'all_pages'),
]
)
# Extract our parameters
query = module.params.get('query')
status = module.params.get('status')
page = module.params.get('page')
all_pages = module.params.get('all_pages')
job_search_data = {}
if page:
job_search_data['page'] = page
if status:
job_search_data['status'] = status
if query:
job_search_data.update(query)
if all_pages:
job_list = module.get_all_endpoint('jobs', **{'data': job_search_data})
else:
job_list = module.get_endpoint('jobs', **{'data': job_search_data})
# Attempt to look up jobs based on the status
module.exit_json(**job_list['json'])
if __name__ == '__main__':
main()
|
{
"content_hash": "7afcaeb0ce00de55a102f6e7f79cb27f",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 112,
"avg_line_length": 28.783333333333335,
"alnum_prop": 0.6137811233352635,
"repo_name": "GoogleCloudPlatform/sap-deployment-automation",
"id": "642a48b03b15cedcc067cc85a9e066e71668aeb5",
"size": "3642",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "third_party/github.com/ansible/awx/awx_collection/plugins/modules/tower_job_list.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
"""
Combinations from multiple sequences
Source: ASPN: Python Cookbook
Title: Generating combinations of objects from multiple sequences
Submitter: David Klaffenbach (other recipes)
Last Updated: 2004/08/29
Version no: 1.0
Category: Algorithms
Description:
The function combine takes multiple sequences and creates a list in which
each item is constructed from items from each input sequence, and all possible
combinations are created. If that description is confusing, look at the
example in the docstring. It's a pretty simple transformation. The function
xcombine is similar, but returns a generator rather than creating the output
all at once.
"""
def combine(*seqin):
'''returns a list of all combinations of argument sequences.
for example: combine((1,2),(3,4)) returns
[[1, 3], [1, 4], [2, 3], [2, 4]]'''
def rloop(seqin,listout,comb):
'''recursive looping function'''
if seqin: # any more sequences to process?
for item in seqin[0]:
newcomb=comb+[item] # add next item to current comb
# call rloop w/ rem seqs, newcomb
rloop(seqin[1:],listout,newcomb)
else: # processing last sequence
listout.append(comb) # comb finished, add to list
listout=[] # listout initialization
rloop(seqin,listout,[]) # start recursive process
return listout
def xcombine(*seqin):
'''returns a generator which returns combinations of argument sequences
for example xcombine((1,2),(3,4)) returns a generator; calling the next()
method on the generator will return [1,3], [1,4], [2,3], [2,4] and
StopIteration exception. This will not create the whole list of
combinations in memory at once.'''
def rloop(seqin,comb):
'''recursive looping function'''
if seqin: # any more sequences to process?
for item in seqin[0]:
newcomb=comb+[item] # add next item to current combination
# call rloop w/ remaining seqs, newcomb
for item in rloop(seqin[1:],newcomb):
yield item # seqs and newcomb
else: # processing last sequence
yield comb # comb finished, add to list
return rloop(seqin,[])
|
{
"content_hash": "1b6407fdf41ff5f1d193d9d8b94abc0b",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 78,
"avg_line_length": 44.51851851851852,
"alnum_prop": 0.620216306156406,
"repo_name": "rothadamg/UPSITE",
"id": "feea9eba87c1d64c41ede8a1a7dd7f0a684a93ab",
"size": "2404",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Utils/Libraries/combine.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Perl",
"bytes": "3148"
},
{
"name": "Python",
"bytes": "1816733"
}
],
"symlink_target": ""
}
|
"""The AES decrypter object implementation."""
from Crypto.Cipher import AES
from dfvfs.encryption import decrypter
from dfvfs.encryption import manager
from dfvfs.lib import definitions
class AESDecrypter(decrypter.Decrypter):
"""Class that implements a AES decrypter using pycrypto."""
ENCRYPTION_METHOD = definitions.ENCRYPTION_METHOD_AES
ENCRYPTION_MODES = {
definitions.ENCRYPTION_MODE_CBC : AES.MODE_CBC,
definitions.ENCRYPTION_MODE_CFB : AES.MODE_CFB,
definitions.ENCRYPTION_MODE_ECB : AES.MODE_ECB,
definitions.ENCRYPTION_MODE_OFB : AES.MODE_OFB}
def __init__(
self, cipher_mode=None, initialization_vector=None, key=None, **kwargs):
"""Initializes the decrypter object.
Args:
cipher_mode (Optional[str]): cipher mode.
initialization_vector (Optional[bytes]): initialization vector.
key (Optional[bytes]): key.
kwargs (dict): keyword arguments depending on the decrypter.
Raises:
ValueError: when key is not set, block cipher mode is not supported,
or initialization_vector is required and not set.
"""
if not key:
raise ValueError(u'Missing key.')
cipher_mode = self.ENCRYPTION_MODES.get(cipher_mode, None)
if cipher_mode is None:
raise ValueError(u'Unsupported cipher mode: {0!s}'.format(cipher_mode))
if cipher_mode != AES.MODE_ECB and not initialization_vector:
# Pycrypto does not create a meaningful error when initialization vector
# is missing. Therefore, we report it ourselves.
raise ValueError(u'Missing initialization vector.')
super(AESDecrypter, self).__init__()
if cipher_mode == AES.MODE_ECB:
self._aes_cipher = AES.new(key, mode=cipher_mode)
else:
self._aes_cipher = AES.new(
key, IV=initialization_vector, mode=cipher_mode)
def Decrypt(self, encrypted_data):
"""Decrypts the encrypted data.
Args:
encrypted_data (bytes): encrypted data.
Returns:
tuple[bytes, bytes]: decrypted data and remaining encrypted data.
"""
index_split = -(len(encrypted_data) % AES.block_size)
if index_split:
remaining_encrypted_data = encrypted_data[index_split:]
encrypted_data = encrypted_data[:index_split]
else:
remaining_encrypted_data = b''
decrypted_data = self._aes_cipher.decrypt(encrypted_data)
return decrypted_data, remaining_encrypted_data
manager.EncryptionManager.RegisterDecrypter(AESDecrypter)
|
{
"content_hash": "a3c187ad427d7a9f478e36764b99798c",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 78,
"avg_line_length": 33.12,
"alnum_prop": 0.6952495974235104,
"repo_name": "dc3-plaso/dfvfs",
"id": "bea363c02c378943d5f1b7b05a783b4786242746",
"size": "2508",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dfvfs/encryption/aes_decrypter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "609"
},
{
"name": "Python",
"bytes": "1397977"
},
{
"name": "Shell",
"bytes": "1522"
}
],
"symlink_target": ""
}
|
import re
import textwrap
from flowui.widget import Widget
class Table(Widget):
'''Table widget
The table is a widget for structured data presentation. It is very similar
to the table widget of HTML or as used by many spreadsheet tools. A table
can consist of rows and or cells that are laid out in an orderly fashion on
screen.
Cells that are stored in rows are sized so that each column lines up
vertically in every row. The size of each cell is calculated based on the
maximum available table width and the size of each cell. If the cells of a
row are too big to fit on one row they are broken up into multiple lines
which are aligned within each column.
Cells that are not stored in rows are all normalized to the same size and
lined up vertically in the table as if they all belonged to a row
consisting of the maximum number of cells that will fit on a row.
'''
def _max_cell_width(self, terminal):
max_width = 0
for cell in self._cells:
max_width = max(max_width, cell.width(terminal))
return max_width
def __init__(self):
self._rows = []
self._cols_per_row = 0
self._cells = []
def add_cell(self, cell):
'''Adds the cell to the table'''
assert isinstance(cell, Cell)
self._cells.append(cell)
def add_row(self, row):
'''Adds the row to the table'''
assert isinstance(row, Row)
self._cols_per_row = max(len(row.cells()), self._cols_per_row)
self._rows.append(row)
def _draw_cells(self, terminal, width):
cell_width = self._max_cell_width(terminal)
cells_per_row = int(width / cell_width)
assert cells_per_row
cell_row_width = (cell_width * cells_per_row)
row_padding_begin = int((width - cell_row_width) / 2)
row_padding_end = (width - cell_row_width - row_padding_begin)
terminal.write('%s' % ' ' * int(row_padding_begin))
cell_offset = 0
for cell in self._cells:
if cells_per_row <= cell_offset:
cell_offset = 0
terminal.write('%s\n%s' % (' ' * row_padding_end,
' ' * row_padding_begin))
cell.draw(terminal, cell_width)
cell_offset += 1
last_row_padding = (width - (cell_width * cell_offset) -
row_padding_begin)
terminal.write('%s\n' % (' ' * last_row_padding))
def _cols_median(self, terminal):
cols_width = [[0 for i in range(len(self._rows))]
for j in range(self._cols_per_row)]
for i in range(len(self._rows)):
row = self._rows[i]
for j in range(len(row.cells())):
cell = row.cells()[j]
cols_width[j][i] = cell.width(terminal)
cols_median = [0 for i in range(len(cols_width))]
for i in range(len(cols_width)):
lst = sorted(cols_width[i])
length = len(lst)
if not length % 2:
neighbours_sum = (lst[int(length / 2)] +
lst[int(length / 2) - 1])
cols_median[i] = int(neighbours_sum / 2)
else:
cols_median[i] = lst[int(length / 2)]
return cols_median
def _cols_mean(self, terminal):
cols_width = [[0 for i in range(len(self._rows))]
for j in range(self._cols_per_row)]
for i in range(len(self._rows)):
row = self._rows[i]
for j in range(len(row.cells())):
cell = row.cells()[j]
cols_width[j][i] = cell.width(terminal)
cols_mean = [0 for i in range(len(cols_width))]
for i in range(len(cols_width)):
cols_mean[i] = int(sum(cols_width[i]) / len(cols_width[i]))
return cols_mean
def _fill_widths(self, widths, wanted_widths, max_widths):
for i in range(len(max_widths)):
if widths[i] is not None:
continue
elif wanted_widths[i] <= max_widths[i]:
widths[i] = wanted_widths[i]
return widths
def _col_widths(self, terminal, width):
cell_widths = []
for row in self._rows:
cells = len(row.cells())
cell_widths.extend([0] * (cells - len(cell_widths)))
for i in range(len(row.cells())):
cell_widths[i] = max(cell_widths[i],
row.cells()[i].width(terminal))
if width < sum(cell_widths):
adjusted_widths = [None] * len(cell_widths)
mean_widths = [(int(width / len(cell_widths)))] * len(cell_widths)
adjusted_widths = self._fill_widths(adjusted_widths, cell_widths,
mean_widths)
mean_widths = self._cols_mean(terminal)
median_widths = self._cols_median(terminal)
if adjusted_widths.count(None):
left_width = width - sum([x for x in adjusted_widths
if x is not None])
max_cell_width = int(left_width / adjusted_widths.count(None))
max_widths = [min(max(mean_widths[i], median_widths[i]),
max_cell_width)
for i in range(len(cell_widths))]
adjusted_widths = self._fill_widths(adjusted_widths,
cell_widths, max_widths)
while adjusted_widths.count(None):
left_width = width - sum([x for x in adjusted_widths
if x is not None])
max_cell_width = int(left_width / adjusted_widths.count(None))
current_min = min([cell_widths[i]
for i in range(len(cell_widths))
if adjusted_widths[i] is None])
index = cell_widths.index(current_min)
adjusted_widths[index] = max_cell_width
return adjusted_widths
return cell_widths
def _draw_rows(self, terminal, width):
cell_widths = self._col_widths(terminal, width)
row_width = sum(cell_widths)
for row in self._rows:
while row is not None:
row = row.draw(terminal, cell_widths)
padding = ' ' * (width - row_width)
terminal.write('%s\n' % padding)
def draw(self, terminal, width):
'''Draw the table on the specified terminal constrained to the
specified width'''
if self._rows:
self._draw_rows(terminal, width)
if self._cells:
self._draw_cells(terminal, width)
class Row(Widget):
'''A table row'''
def __init__(self):
super(Row, self).__init__()
self._cells = []
def cells(self):
'''Returns a list of the cells stored in the row'''
return self._cells
def add_cell(self, cell):
'''Appends the cell to the row'''
self._cells.append(cell)
def width(self, terminal):
'''Calculate and return the width of the row in characters'''
width = 0
for cell in self._cells:
width += cell.width(terminal)
return width
def draw(self, terminal, cell_widths):
'''Draw the row on the terminal using the defined column widths'''
assert len(cell_widths) == len(self._cells)
cells_rest = []
for i in range(len(self._cells)):
rest = self._cells[i].draw(terminal, cell_widths[i])
if len(rest):
cells_rest.append(rest)
else:
cells_rest.append(None)
if cells_rest.count(None) < len(cells_rest):
row = Row()
for content in cells_rest:
if content is None:
row.add_cell(Cell(''))
else:
row.add_cell(Cell(content))
return row
return None
class Cell(Widget):
'''A table cell'''
_format_exp = re.compile(r'(%\([^\s]+?\)s)')
def __init__(self, contents=''):
super(Cell, self).__init__()
self._contents = ''
if contents:
self._contents = (' %s ' % contents)
def width(self, theme):
'''Calculate and return the width in characters of the cell contents'''
return theme.len(self._contents)
def contents(self):
'''Return the contents of the cell'''
return self._contents
def draw(self, terminal, width):
'''Draw the cell on the terminal constrained to the specified width'''
split = self._format_exp.split(self._contents)
contents = [[]]
width_left = width
last_format = ''
for item in split:
if self._format_exp.match(item):
contents[-1].append(item)
last_format = item
else:
item_len = len(item.replace('%%', '%'))
if item_len <= width_left:
contents[-1].append(item)
width_left -= item_len
elif item_len < width:
contents.append([last_format, item])
width_left = width - item_len
elif len(item[:width_left]):
first = textwrap.wrap(item[:width_left], width_left)[0]
contents[-1].append(first)
rest = textwrap.wrap(item[len(first):], width)
for line in rest:
contents.append([last_format, line])
width_left = width - len(contents[-1][-1])
contents = [''.join(x) for x in contents]
cell_content = ''
if len(contents):
cell_content = contents[0]
line_width = terminal.len(cell_content)
padding = ' ' * (width - line_width)
terminal.write(('%(contents)s%(padding)s' %
{'contents': cell_content,
'padding': padding}))
return ''.join(contents[1:])
|
{
"content_hash": "797105f4aa9073fbc93f0a8ad009106b",
"timestamp": "",
"source": "github",
"line_count": 283,
"max_line_length": 79,
"avg_line_length": 36.09540636042403,
"alnum_prop": 0.5211943220753793,
"repo_name": "dholm/FlowUI",
"id": "a7b76c4039680062aa16486eb6ea3dd701625fcc",
"size": "11804",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flowui/widgets/table.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "63028"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import django.contrib.auth.models
import django.core.validators
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0007_alter_validators_add_error_messages'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=30, unique=True, validators=[django.core.validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username. This value may contain only letters, numbers and @/./+/-/_ characters.')], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=30, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
|
{
"content_hash": "003a807b994143a46a6347ab20847831",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 421,
"avg_line_length": 66.97727272727273,
"alnum_prop": 0.6426874787919918,
"repo_name": "whitesmith/hawkpost",
"id": "752d31c0354ff2d0a45d6e9dffca024d96b695ac",
"size": "3019",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "humans/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "61"
},
{
"name": "HTML",
"bytes": "79786"
},
{
"name": "JavaScript",
"bytes": "20849"
},
{
"name": "Python",
"bytes": "117564"
},
{
"name": "SCSS",
"bytes": "45475"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django import http
from django.conf import settings
from django.contrib import messages
from django.core import exceptions
from daydreamer.core import urlresolvers
from . import base
__all__ = ("Denial",)
class Denial(base.Deny):
"""
An abstract base class providing a denial behavior framework.
Implements the deny() method, which when called:
* If exceptions are enabled, raises the configured exception.
* If a denial message has been specified, enqueues a message with the
django.contrib.messages framework.
* Finally, redirects to a denial URL with an optional query string
specifying the next URL, i.e. to redirect back when desired.
This encapsulates common view patterns and makes them configurable
through a declarative API, facilitating the implementation of many
view behaviors and helping you avoid repetition of boilerplate code. The
view behaviors in daydreamer.views.behaviors make heavy use of these
features and can simplify your view implementations.
The deny() method should be called with a prefix name that it
will use to retrieve attributes that customize its behavior as follows:
When the <prefix>_raise attribute is set to a truthy value, raises
<prefix>_exception.
Set the <prefix>_exception attribute to an exception class or
instance to raise. Defaults to django.core.exceptions.PermissionDenied
when falsy.
Set the <prefix>_message attribute to a message to enqueue via the
django.contrib.messages framework. If falsy, no message will
be enqueued.
Set the <prefix>_message_level attribute to the level of the failure
message, a value from django.contrib.messages.constants. Defaults to
WARNING when falsy.
Set the <prefix>_message_tags attribute to a space-separated string
specifying additional tags to add to the message, typically used for
CSS styling purposes. Defaults to the empty string when falsy.
Set the <prefix>_redirect_url attribute to the failure redirect URL.
Defaults to settings.LOGIN_URL when falsy. The URL must be a string or
a lazy string, such as the result of
django.core.urlresovlers.reverse_lazy(). However, settings.LOGIN_URL
may be a named URL pattern, as documented for Django's settings.
Set the <prefix>_redirect_next_url attribute to the URL of the value
for the query string parameter to append to the redirect URL. Defaults
to the request's fully-qualified URL when falsy.
Set the <prefix>_redirect_next_name attribute to the name of the
query string parameter to append to the redirect URL for the value
of the next URL. If None, no query string parameter will be added to
the redirect URL. A typical value would be
django.contrib.auth.REDIRECT_FIELD_NAME.
Additional object-oriented hooks are provided by the implementation. See
the source code for details.
"""
# Hooks for resolving attribute values used by deny().
def get_denial_attr(self, prefix, attr):
"""
A hook to customize the way that attributes for the deny()
method are retrieved from a prefix and attribute name.
The default implementation joins the prefix and attribute name with
"_" and looks up the attribute with getattr. This will raise an
AttributeError if the generated attribute name does not exist.
"""
return getattr(self, "_".join((prefix, attr)))
def get_denial_raise(self, prefix):
"""
A hook to customize resolution of the exception raising setting
used by deny().
The default implementation returns self.<prefix>_raise.
"""
return self.get_denial_attr(prefix, "raise")
def get_denial_exception(self, prefix):
"""
A hook to customize resolution of the exception value to raise
used by deny().
The default implementation returns self.<prefix>_exception,
defaulting to django.core.exceptions.PermissionDenied when falsy.
"""
return (
self.get_denial_attr(prefix, "exception") or
exceptions.PermissionDenied)
def get_denial_message(self, prefix):
"""
A hook to customize resolution of the message value to enqueue,
used by deny().
The default implementation returns self.<prefix>_message.
"""
return self.get_denial_attr(prefix, "message")
def get_denial_message_level(self, prefix):
"""
A hook to customize resolution of the message level used
by deny().
The default implementation returns self.<prefix>_message_level,
defaulting to django.contrib.messages.WARNING when falsy.
"""
return (
self.get_denial_attr(prefix, "message_level") or
messages.WARNING)
def get_denial_message_tags(self, prefix):
"""
A hook to customize resolution of the message tags used
by deny().
The default implementation returns self.<prefix>_message_tags,
defaulting to the empty string when falsy.
"""
return (
self.get_denial_attr(prefix, "message_tags") or "")
def get_denial_redirect_url(self, prefix):
"""
A hook to customize resolution of the redirect URL used
by deny().
The default implementation returns self.<prefix>_redirect_url,
defaulting to the resolved value for settings.LOGIN_URL when falsy.
"""
redirect_url = self.get_denial_attr(prefix, "redirect_url")
try:
redirect_url = (
redirect_url or urlresolvers.reverse(settings.LOGIN_URL))
except urlresolvers.NoReverseMatch:
redirect_url = settings.LOGIN_URL
return redirect_url
def get_denial_redirect_next_url(self, prefix):
"""
A hook to customize resolution of the redirect's next URL used
by deny().
The default implementation returns self.<prefix>_redirect_next_url,
defaulting to the request's fully-qualified URL when falsy.
"""
return (
self.get_denial_attr(prefix, "redirect_next_url") or
self.request.build_absolute_uri())
def get_denial_redirect_next_name(self, prefix):
"""
A hook to customize resolution of the redirect next URL query parameter
name used by deny().
The default implementation returns self.<prefix>_redirect_next_name.
"""
return self.get_denial_attr(prefix, "redirect_next_name")
def get_denial_full_redirect_url(self, prefix):
"""
A hook to customize building of the full redirect URL used
by deny().
The default implementation updates the query string for
self.get_denial_redirect_url() to include a parameter named
self.get_denial_redirect_next_name() with a value of
self.get_denial_redirect_next_url(). The query string is updated
only when the parameter name is truthy. Simplifies the query string
value with respect to the redirect URL and request.
"""
redirect_url = self.get_denial_redirect_url(prefix)
next_name = self.get_denial_redirect_next_name(prefix)
if next_name:
return urlresolvers.update_query(
redirect_url, {
next_name: urlresolvers.simplify_redirect(
self.get_denial_redirect_next_url(prefix),
redirect_url,
request=self.request)})
return redirect_url
# Hooks to modify the implementation of deny().
def denial_raise_exception(self, prefix):
"""
A hook to customize raising of an exception in deny().
In the default implementation, when self.get_denial_raise() is
truthy, raises self.get_denial_exception().
"""
if self.get_denial_raise(prefix):
raise self.get_denial_exception(prefix)
def denial_enqueue_message(self, prefix):
"""
A hook to customize message enqueuing in deny().
The default implementation enqueues self.get_denial_messge() using
the other self.get_denial_message_*() settings when the message
is truthy.
"""
message = self.get_denial_message(prefix)
if message:
messages.add_message(
self.request,
self.get_denial_message_level(prefix),
message,
extra_tags=self.get_denial_message_tags(prefix))
def denial_respond(self, prefix):
"""
A hook to customize the response upon in deny().
The default implementation resolves the redirect URL with
self.get_denial_full_redirect_url() and returns the
resolved URL in a redirect response.
"""
return http.HttpResponseRedirect(
self.get_denial_full_redirect_url(prefix))
def deny(self, prefix):
"""
The handler that should be called upon access test failure with
the prefix name for the behavior-controlling attributes.
The default implementation optionally attempts to raise an exception
with denial_raise_exception(). Then, it optionally enqueues a
message with denial_enqueue_message(). Finally, it returns a
redirect response which may include an optional next URL query string
value with denial_respond().
"""
self.denial_raise_exception(prefix)
self.denial_enqueue_message(prefix)
return self.denial_respond(prefix)
|
{
"content_hash": "641fcef9490955e3864b6e527f8b0844",
"timestamp": "",
"source": "github",
"line_count": 266,
"max_line_length": 79,
"avg_line_length": 38.36466165413534,
"alnum_prop": 0.6295933365997061,
"repo_name": "skibblenybbles/django-daydreamer",
"id": "edff76611e339f6966470fedcd194e3277fc1a75",
"size": "10205",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "daydreamer/views/core/behaviors.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "241801"
}
],
"symlink_target": ""
}
|
from flask import Flask, request, jsonify, render_template, copy_current_request_context
from flask.ext.socketio import SocketIO, emit, session
import gevent
from lib import create_monitor
import os
app = Flask(__name__)
app.config['DEBUG'] = True
socketio = SocketIO(app)
monitor = create_monitor(os.environ["MONITOR_ADDR"], socketio)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/statuses')
def statuses():
return jsonify({"statuses": monitor.get_statuses()})
@app.route('/available-workers')
def available_workers():
return jsonify(monitor.get_available_workers_per_model())
@socketio.on("stream_statuses")
def start(message):
pass
if __name__ == "__main__":
from gevent import monkey
monkey.patch_all()
gevent.spawn(monitor.run)
socketio.run(app, host="0.0.0.0", port=80)
|
{
"content_hash": "b11fa602ee3282e489cd6ddb70a799ae",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 88,
"avg_line_length": 26.46875,
"alnum_prop": 0.7083825265643447,
"repo_name": "UFAL-DSG/cloud-asr",
"id": "f35b30e662bd64944f0265a1db70f72bf2aeca71",
"size": "847",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cloudasr/monitor/run.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25465"
},
{
"name": "Dockerfile",
"bytes": "2323"
},
{
"name": "HTML",
"bytes": "51375"
},
{
"name": "JavaScript",
"bytes": "18939"
},
{
"name": "Makefile",
"bytes": "13090"
},
{
"name": "Python",
"bytes": "146531"
},
{
"name": "Shell",
"bytes": "3780"
}
],
"symlink_target": ""
}
|
import unittest, random, sys, time
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_hosts, h2o_import as h2i, h2o_exec, h2o_glm, h2o_gbm, h2o_exec as h2e
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global localhost
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(node_count=1, java_heap_GB=10, base_port=54333)
else:
h2o_hosts.build_cloud_with_hosts(node_count=1, java_heap_GB=10)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_GLM2_covtype_train_predict_all_all(self):
h2o.beta_features = True
importFolderPath = "standard"
csvFilename = 'covtype.shuffled.data'
csvPathname = importFolderPath + "/" + csvFilename
hex_key = csvFilename + ".hex"
# Parse and Exec************************************************
parseResult = h2i.import_parse(bucket='home-0xdiag-datasets', path=csvPathname, schema='put', hex_key=hex_key, timeoutSecs=180)
execExpr="A.hex=%s" % parseResult['destination_key']
h2e.exec_expr(execExpr=execExpr, timeoutSecs=30)
# use exec to change the output col to binary, case_mode/case_val doesn't work if we use predict
# will have to live with random extract. will create variance
# class 4 = 1, everything else 0
y = 54
execExpr="A.hex[,%s]=(A.hex[,%s]==%s)" % (y+1, y+1, 1) # class 1
h2e.exec_expr(execExpr=execExpr, timeoutSecs=30)
inspect = h2o_cmd.runInspect(key="A.hex")
print "\n" + csvPathname, \
" numRows:", "{:,}".format(inspect['numRows']), \
" numCols:", "{:,}".format(inspect['numCols'])
print "Use same data (full) for train and test"
trainDataKey = "A.hex"
testDataKey = "A.hex"
# start at 90% rows + 1
# GLM, predict, CM*******************************************************8
kwargs = {
'response': 'C' + str(y+1),
'max_iter': 20,
'n_folds': 0,
# 'alpha': 0.1,
# 'lambda': 1e-5,
'alpha': 0.0,
'lambda': None,
'family': 'binomial',
}
timeoutSecs = 60
for trial in range(1):
# test/train split **********************************************8
aHack = {'destination_key': trainDataKey}
# GLM **********************************************8
start = time.time()
glm = h2o_cmd.runGLM(parseResult=aHack, timeoutSecs=timeoutSecs, pollTimeoutSecs=180, **kwargs)
print "glm end on ", parseResult['destination_key'], 'took', time.time() - start, 'seconds'
h2o_glm.simpleCheckGLM(self, glm, None, **kwargs)
modelKey = glm['glm_model']['_key']
submodels = glm['glm_model']['submodels']
# hackery to make it work when there's just one
validation = submodels[-1]['validation']
best_threshold = validation['best_threshold']
thresholds = validation['thresholds']
# have to look up the index for the cm, from the thresholds list
best_index = None
for i,t in enumerate(thresholds):
if t == best_threshold:
best_index = i
break
cms = validation['_cms']
cm = cms[best_index]
trainPctWrong = h2o_gbm.pp_cm_summary(cm['_arr']);
# Score **********************************************
predictKey = 'Predict.hex'
start = time.time()
predictResult = h2o_cmd.runPredict(
data_key=testDataKey,
model_key=modelKey,
destination_key=predictKey,
timeoutSecs=timeoutSecs)
predictCMResult = h2o.nodes[0].predict_confusion_matrix(
actual=testDataKey,
vactual='C' + str(y+1),
predict=predictKey,
vpredict='predict',
)
cm = predictCMResult['cm']
# These will move into the h2o_gbm.py
pctWrong = h2o_gbm.pp_cm_summary(cm);
self.assertEqual(pctWrong, trainPctWrong,"Should see the same error rate on train and predict? (same data set)")
print "\nTest\n==========\n"
print h2o_gbm.pp_cm(cm)
print "Trial #", trial, "completed"
if __name__ == '__main__':
h2o.unit_main()
|
{
"content_hash": "c85a0279b03e137d0cecc2d08f79512c",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 135,
"avg_line_length": 38.256198347107436,
"alnum_prop": 0.5154461006696911,
"repo_name": "woobe/h2o",
"id": "3320cce54b64f0977ab0d787d8ed287e7b4d78e0",
"size": "4629",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "py/testdir_single_jvm/test_GLM2_covtype_train_predict_all_all.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
import struct
from sys import argv
from msc import *
from param import *
from time import sleep
from random import randint,random
from os.path import isfile
from argparse import ArgumentParser
from math import sqrt, cos, sin, atan2
class FunctionInfo:
def __init__(self, thisLocalVarPos, returnAddress, stackPos):
self.localVarPos = thisLocalVarPos
self.returnAddress = returnAddress
self.stackPos = stackPos
def restore(self):
global evalPos, localVarPos
localVarPos = self.localVarPos
evalPos = returnAddress
#Simulate an MSC syscall given the information from
def syscall(syscallNum, args, pushBit):
global sharedVars,evalPos,stack,y_unit
#Random int in range
if syscallNum == 0x9:
push(randint(args[0], args[1]-1), pushBit)
#Variable access
elif syscallNum == 0x16:
operation = args[0]
if operation == 0x6:
if not args[1] in sharedVars:
print("ERROR: Variable 0x%08X doesn't not exist (Accessed at %X)" % (args[1],evalPos))
quit()
else:
push(sharedVars[args[1]], pushBit)
elif operation == 0x7:
sharedVars[args[2]] = args[1]
elif operation == 0x10:
if not args[1] in sharedVars:
print("ERROR: Variable 0x%08X doesn't not exist (Accessed at %X)" % (args[1],evalPos))
quit()
else:
push(0 if sharedVars[args[1]] == 0 else 1, pushBit)
elif operation == 0x2710:
sharedVars[args[1]] = 0
elif operation == 0x2711:
sharedVars[args[1]] = 1
elif syscallNum == 0xA:
operation = args[0]
if operation == 0: #sqrt
push(sqrt(intToFloat(args[1])),pushBit)
elif operation == 1: #angle
push(atan2(intToFloat(args[1]), intToFloat(args[2])),pushBit)
elif operation == 2:
push(intToFloat(args[1])**args[2],pushBit)
elif operation == 3:
push(sqrt((intToFloat(args[1])**2)+(intToFloat(args[2])**2)+(intToFloat(args[3])**2)),pushBit)
elif operation == 4:
push(cos(intToFloat(args[1])),pushBit)
elif operation == 5:
push(sin(intToFloat(args[1])),pushBit)
elif operation == 6:
push(random(), pushBit)
elif operation == 7:
push(abs(atan2(intToFloat(args[1]), intToFloat(args[2])) - atan2(intToFloat(args[3]), intToFloat(args[4]))),pushBit)
elif operation == 8:
push(y_unit, pushBit)
elif operation == 0xA:
mag = sqrt((intToFloat(args[1])**2)+(intToFloat(args[2])**2))
x = intToFloat(args[1]) / mag
y_unit = intToFloat(args[2]) / mag
push(x,pushBit)
#Variable access
elif syscallNum == 0x17:
operation = args[0]
if operation == 0x0:
if not args[1] in sharedVars:
print("ERROR: Variable 0x%08X doesn't not exist (Accessed at %X)" % (args[1],evalPos))
quit()
else:
push(sharedVars[args[1]], pushBit)
#Debug stack dump
elif syscallNum == 0xF0:
stackString = "DEBUG: ["
for i in range(len(stack)):
if stack[i] != None:
stackString += ('*' if i == stackPos else '') + hex(stack[i]) + (', ' if i != len(stack) - 1 else '')
if stackString != "[":
stackString = stackString[:-2]
print("Stack [Position = %i] - %s" % (stackPos, str([intToFloat(j) if j else 0 for j in stack])))
#Debug var print
elif syscallNum == 0xF1:
if len(args) == 0:
l = tuple(["0x%08X : 0x%08X, " % (i,j) for i,j in sharedVars.items()])
print('DEBUG: {' + (('%s' * len(l)) % l).rstrip(', ') + '}')
else:
if args[0] in sharedVars:
print("DEBUG: 0x%08X = 0x%08X" % (args[0], sharedVars[args[0]]))
else:
print("ERROR: Unsupported syscall 0x%X at location %X" % (syscallNum,evalPos))
quit()
#push a value onto the stack given that the push bit is enabled
def push(val, actuallyPush=True):
global mscFile,mscFileBytes,stack,functionStack,stackPos,localVarPos,evalPos,exceptionRegister,globalVars,executing,strings,linkRegister
if not actuallyPush:
return
if stackPos == 0x80:
print("At instruction %08X:")
print("WARNING: STACK OVERFLOW, STACK INDEX OVERWRITTEN ")
newVal = None
if type(val) == int:
newVal = (val & 0xFFFFFFFF)
elif type(val) == float:
newVal = floatToInt(val)
else:
print("ERROR: Invalid type to push type=%s at position %X (Object = %s)" % (str(type(val)), evalPos, str(val)))
raise TypeError("Invalid push type")
if stackPos < 0x80 and stackPos >= 0:
stack[stackPos] = newVal
elif stackPos == 0x80:
stackPos = newVal
elif stackPos < 0:
globalVars[0x8A + stackPos] = newVal
else:
print("WARNING: WRITE OOB (Not in emulated memory)")
stackPos += 1
def pop():
global mscFile,mscFileBytes,stack,functionStack,stackPos,localVarPos,evalPos,exceptionRegister,globalVars,executing,strings,linkRegister
if stackPos == 0:
print("At instruction %08X:" % evalPos)
print("WARNING: STACK UNDERFLOW")
stackPos -= 1
value = None
if stackPos < 0 and stackPos >= -0x8A:
value = globalVars[0x8A + stackPos]
elif stackPos >= 0 and stackPos < 0x80:
value = stack[stackPos]
elif value == 0x80:
value = stackPos
else:
print("WARNING: OOB POP UNHANDLED BY EMU, RETURNING 0")
print(" this will cause inaccuracy in emulation")
return 0
if value == None:
print("WARNING: POPPED UNINITIALIZED VALUE, ASSUMING 0")
return 0
else:
return value
def getVar(varType, varNum):
global mscFile,mscFileBytes,stack,functionStack,stackPos,localVarPos,evalPos,exceptionRegister,globalVars,executing,strings,linkRegister
if varType == 0: #(Local)
if localVarPos + varNum == 0x80:
return stackPos
elif localVarPos + varNum < 0x80:
return stack[localVarPos+varNum]
else:
print("WARNING: OOB READ OF LOCAL VAR %i AT LOCATION %X" % (varNum, evalPos))
print(" IS UNMAPPED IN EMULATOR MEMORY, TO AVOID")
print(" ERRORS ASSUMING VALUE OF 0, THIS WILL")
print(" LIKELY BE INACCURATE TO ON CONSOLE BEHAIVIOR")
return 0
elif varType == 1: #(global variable)
if varNum < 0x8A:
return globalVars[varNum]
elif varNum >= 0x8A and varNum < 0x10A:
return stack[varNum - 0x8A]
elif varNum == 0x10A:
return stackPos
elif varNum > 0x10A:
print("WARNING: OOB READ OF GLOBAL VAR %i AT LOCATION %X" % (varNum, evalPos))
print(" IS UNMAPPED IN EMULATOR MEMORY, TO AVOID")
print(" ERRORS ASSUMING VALUE OF 0, THIS WILL")
print(" LIKELY BE INACCURATE TO ON CONSOLE BEHAIVIOR")
return 0
else:
print("ERROR: UNKNOWN VARIABLE TYPE %i AT LOCATION %X" % (varType, evalPos))
raise ValueError
def setVar(varType, varNum, value, pushBit):
global mscFile,mscFileBytes,stack,functionStack,stackPos,localVarPos,evalPos,exceptionRegister,globalVars,executing,strings,linkRegister
if varType == 0: #(Local)
if localVarPos + varNum == 0x80:
stackPos = value
elif localVarPos + varNum < 0x80:
stack[localVarPos+varNum] = value
else:
print("WARNING: OOB WRITE OF LOCAL VAR %i AT LOCATION %X" % (varNum, evalPos))
print(" IS UNMAPPED IN EMULATOR MEMORY, THIS WRITE")
print(" WILL NOT HAVE HAPPENED MORE OR LESS")
elif varType == 1: #(global variable)
if varNum < 0x8A:
globalVars[varNum] = value
elif varNum >= 0x8A and varNum < 0x10A:
stack[varNum - 0x8A] = value
elif varNum == 0x10A:
stackPos = value
elif varNum > 0x10A:
print("WARNING: OOB READ OF GLOBAL VAR %i AT LOCATION %X" % (varNum, evalPos))
print(" IS UNMAPPED IN EMULATOR MEMORY, THIS WRITE")
print(" WILL NOT HAVE HAPPENED MORE OR LESS")
else:
print("ERROR: UNKNOWN VARIABLE TYPE %i AT LOCATION %X" % (varType, evalPos))
raise ValueError
if pushBit:
push(value)
#Converts an int representing bytes to a float
#Example 0x3F800000 -> 1.0
def intToFloat(val):
return struct.unpack('>f', struct.pack('>L', val))[0]
#Converts a float to an int representing bytes
#Example 1.0 -> 0x3F800000
def floatToInt(val):
return struct.unpack('>L', struct.pack('>f', val))[0]
def printf(printString, args):
specifierLocs = [i for i,j in enumerate(printString) if j == '%' and i < len(printString) and printString[i+1] in ['x', 'X', 'i', 'f', '0']]
for i,j in enumerate(specifierLocs):
if printString[j+1] == 'f':
args[i] = intToFloat(args[i])
print(printString % tuple(args))
def evalCommand(command):
global mscFile,mscFileBytes,stack,functionStack,stackPos,localVarPos,evalPos,exceptionRegister,globalVars,executing,strings,linkRegister
if command == None or command.command == 0xFFFE:
if evalPos != None:
print("Error: Invalid command at %X" % evalPos)
quit()
else:
print("Error: Invalid command (And Invalid eval position)")
executing = False
return
#This is used for determining if to add command size to
isJump = False
c = command.command
cParams = command.parameters
pushBit = command.pushBit
if c == 0x0: #nop
pass
elif c == 0x1:
pass
elif c == 0x2: #begin
stackPos -= cParams[0]
functionStack.append(FunctionInfo(localVarPos, linkRegister, stackPos))
localVarPos = stackPos
stackPos += cParams[1]
elif c in [0x3, 0x6, 0x7, 0x8, 0x9]: #end or return
if len(functionStack) == 0:
executing = False
return
fInfo = functionStack.pop()
if fInfo.returnAddress == None:
executing = False
return
if c in [0x6, 0x8]: #return a value
v = pop()
stackPos = fInfo.stackPos
push(v)
localVarPos = fInfo.localVarPos
evalPos = fInfo.returnAddress
isJump = True
elif c in [0x4, 0x5, 0x36]:
isJump = True
evalPos = cParams[0]
elif c == 0xA or c == 0xD:
push(cParams[0], pushBit)
elif c == 0xB:
push(getVar(cParams[0], cParams[1]), pushBit)
elif c == 0xC:
pass
elif c == 0xE:
push(pop() + pop(), pushBit) #Add int
elif c == 0xF:
push((-pop()) + pop(), pushBit) #Subtract int
elif c == 0x10:
push(pop() * pop(), pushBit) #Multiply int
elif c == 0x11:
divideBy = pop()
push(pop() // divideBy, pushBit) #Divide int
elif c == 0x12:
divideBy = pop()
push(pop() % divideBy, pushBit) #Mod int
elif c == 0x13:
push(-pop(), pushBit) #Negate value
elif c == 0x14:
setVar(cParams[0], cParams[1], getVar(cParams[0], cParams[1]) + 1,pushBit) #Var++
elif c == 0x15:
setVar(cParams[0], cParams[1], getVar(cParams[0], cParams[1]) - 1,pushBit) #Var--
elif c == 0x16:
push(pop() & pop(), pushBit)#bitAnd
elif c == 0x17:
push(pop() | pop(), pushBit)#bitOr
elif c == 0x18:
push(pop() ^ 0xFFFFFFFF, pushBit)#bitNot
elif c == 0x19:
push(pop() ^ pop(), pushBit)#bitXor
elif c == 0x1A:
shiftBy = pop() #leftShift
push(pop() << shiftBy, pushBit)
elif c == 0x1B:
shiftBy = pop()
push(pop() >> shiftBy, pushBit)#rightShift
elif c == 0x1C:
setVar(cParams[0], cParams[1], pop(),pushBit) #setVar
elif c == 0x1D:
setVar(cParams[0], cParams[1], getVar(cParams[0], cParams[1]) + pop(),pushBit) #Var +=
elif c == 0x1E:
setVar(cParams[0], cParams[1], getVar(cParams[0], cParams[1]) - pop(),pushBit) #Var -=
elif c == 0x1F:
setVar(cParams[0], cParams[1], getVar(cParams[0], cParams[1]) * pop(),pushBit) #Var *=
elif c == 0x20:
setVar(cParams[0], cParams[1], getVar(cParams[0], cParams[1]) // pop(),pushBit) #Var /=
elif c == 0x21:
setVar(cParams[0], cParams[1], getVar(cParams[0], cParams[1]) % pop(),pushBit) #Var %=
elif c == 0x22:
setVar(cParams[0], cParams[1], getVar(cParams[0], cParams[1]) & pop(),pushBit) #Var &=
elif c == 0x23:
setVar(cParams[0], cParams[1], getVar(cParams[0], cParams[1]) | pop(),pushBit) #Var |=
elif c == 0x24:
setVar(cParams[0], cParams[1], getVar(cParams[0], cParams[1]) ^ pop(),pushBit) #Var ^=
elif c == 0x25:
push(int(pop() == pop()), pushBit) #equals
elif c == 0x26:
push(int(pop() != pop()), pushBit) #not equals
elif c == 0x27:
compareTo = pop()
push(int(pop() < compareTo), pushBit) #less than
elif c == 0x28:
compareTo = pop()
push(int(pop() <= compareTo), pushBit) #less than or equal
elif c == 0x29:
compareTo = pop()
push(int(pop() > compareTo), pushBit) #greater than
elif c == 0x2A:
compareTo = pop()
push(int(pop() >= compareTo), pushBit) #greater than or equal to
elif c == 0x2B:
push(0 if pop() != 0 else 1, pushBit)#logic not
elif c == 0x2C:
formatString = strings[pop()]
formatValues = []
for i in range(cParams[0]-1):
formatValues.insert(0, pop())
printf(formatString, formatValues)
elif c == 0x2D:
args = []
for i in range(cParams[0]):
args.insert(0, pop())
syscall(cParams[1], args, pushBit)
elif c == 0x2E:
exceptionRegister = cParams[0]
elif c in [0x2F, 0x30, 0x31]:
isJump = True
jumpPos = pop()
#paramList = [pop() for i in range(cParams[0])]
hitException = False
if c == 0x2F:
gottenScript = mscFile.getScriptAtLocation(jumpPos)
if gottenScript == None or gottenScript.getCommand(jumpPos).command != 0x2:
print("WARNING: at %X invalid function call, jumping to exception register (%X)" % (evalPos, exceptionRegister))
evalPos = exceptionRegister
hitException = True
isJump = True
if not hitException:
isJump = True
linkRegister = evalPos + len(command)
evalPos = jumpPos
elif c == 0x32:
v = pop()
push(v) #push, the essentially pushes the last return value
push(v)
push(v,pushBit)
elif c == 0x33:
push(pop(), pushBit)
elif c == 0x34:
if pop() == 0:
isJump = True
evalPos = cParams[0]
elif c == 0x35:
if pop() != 0:
isJump = True
evalPos = cParams[0]
elif c == 0x38:
convertToFloat = lambda i: floatToInt(float(i))
stack[stackPos - (1 + cParams[0])] = convertToFloat(stack[stackPos - (1 + cParams[0])]) # intToFloat
elif c == 0x39:
convertToInt = lambda f: int(intToFloat(f))
stack[stackPos - (1 + cParams[0])] = convertToInt(stack[stackPos - (1 + cParams[0])]) # floatToInt
elif c == 0x3A:
push(intToFloat(pop()) + intToFloat(pop()), pushBit)
elif c == 0x3B:
v = intToFloat(pop())
push(intToFloat(pop()) - v, pushBit)
elif c == 0x3C:
push(intToFloat(pop()) * intToFloat(pop()), pushBit)
elif c == 0x3D:
v = intToFloat(pop())
push(intToFloat(pop()) / v, pushBit)
elif c == 0x3E:
push(-intToFloat(pop()), pushBit)
elif c == 0x3F:
setVar(cParams[0], cParams[1], floatToInt(intToFloat(getVar(cParams[0], cParams[1])) + 1),pushBit) #float Var++
elif c == 0x40:
setVar(cParams[0], cParams[1], floatToInt(intToFloat(getVar(cParams[0], cParams[1])) - 1),pushBit) #float Var--
elif c == 0x41:
setVar(cParams[0], cParams[1], pop(), pushBit) #setFloatVar
elif c == 0x42:
setVar(cParams[0], cParams[1], floatToInt(intToFloat(getVar(cParams[0], cParams[1])) + intToFloat(pop())),pushBit) #float Var+=
elif c == 0x43:
setVar(cParams[0], cParams[1], floatToInt(intToFloat(getVar(cParams[0], cParams[1])) - intToFloat(pop())),pushBit) #float Var-=
elif c == 0x44:
setVar(cParams[0], cParams[1], floatToInt(intToFloat(getVar(cParams[0], cParams[1])) * intToFloat(pop())),pushBit) #float Var*=
elif c == 0x45:
setVar(cParams[0], cParams[1], floatToInt(intToFloat(getVar(cParams[0], cParams[1])) / intToFloat(pop())),pushBit) #float Var/=
elif c == 0x46:
compTo = intToFloat(pop())
push(int(intToFloat(pop()) == compTo), pushBit)
elif c == 0x47:
compTo = intToFloat(pop())
push(int(intToFloat(pop()) != compTo), pushBit)
elif c == 0x48:
compTo = intToFloat(pop())
push(int(intToFloat(pop()) < compTo), pushBit)
elif c == 0x49:
push(int(intToFloat(pop()) <= intToFloat(pop())), pushBit) #float equals
elif c == 0x4A:
push(int(intToFloat(pop()) > intToFloat(pop())), pushBit) #float equals
elif c == 0x4B:
compTo = intToFloat(pop())
push(int(intToFloat(pop()) >= compTo), pushBit)
elif c == 0x4D:
executing = False
return
if not isJump:
evalPos += len(command)
def evalMscFile(mscFileObject):
global mscFile,mscFileBytes,stack,functionStack,stackPos,localVarPos,evalPos,exceptionRegister,globalVars,executing,strings,linkRegister,mainLoopFunc
mscFile = mscFileObject
strings = mscFile.strings
evalPos = mscFile.entryPoint
startScript = mscFile.getScriptAtLocation(mscFile.entryPoint)
if startScript != None:
executing = True
while executing:
currentExecutingScript = mscFile.getScriptAtLocation(evalPos)
if currentExecutingScript != None:
evalCommand(currentExecutingScript.getCommand(evalPos))
if executing:
executing = (evalPos != None)
else:
executing = False
def evalFile(filepath):
with open(filepath, 'rb') as f:
mscFile = MscFile().readFromFile(f)
evalMscFile(mscFile)
def evalText():
global stack, stackPos
mscFile = MscFile()
strs = []
scriptString = ""
print("+----------------------------------------------+")
print("| Text interpreter - Type in your script. |")
print("| Script input will stop after you type 'end' |")
print("+----------------------------------------------+")
nextLine = input()
while nextLine.strip().lower() != "end":
scriptString += nextLine + "\n"
nextLine = input()
scriptString += nextLine
print("------------------------------------------------")
scr = MscScript()
cmds = parseCommands(scriptString, mscStrings=strs)
cmdsSize = 0
for c in cmds:
cmdsSize += len(c)
scr.bounds = [0x10, 0x10+cmdsSize]
scr.cmds = cmds
scr.setStart(0x10)
scr.offset(0x10)
mscFile.entryPoint = 0x10
mscFile.strings = strs
mscFile.scripts.append(scr)
if scr[0].command == 0x2 and scr[0].parameters[0] > 0:
stackPos = scr[0].parameters[0]
print('Input %i parameter(s)' % scr[0].parameters[0])
for i in range(scr[0].parameters[0]):
p = input('Input parameter %i: ' % i).strip()
if p[-1] == 'f':
stack[i] = int(floatToInt(float(p[0 : len(p)-1])))
else:
stack[i] = int(p, 0)
evalMscFile(mscFile)
def load_fighter_param_common(filepath):
global sharedVars
p = openParam(filepath)
for i in range(len(p)):
val = p[i]
if isinstance(val, f32):
val = floatToInt(val)
elif not True in [isinstance(val, t) for t in [u8, s8, u16, s16, u32, s32]]:
continue
sharedVars[0x12000000 + i] = int(val)
sharedVars[0x02000000 + i] = int(val)
def load_fighter_param(filepath, entry):
global sharedVars
p = openParam(filepath)[0].entry(entry)
for i in range(len(p)):
val = p[i]
if isinstance(val, f32):
val = floatToInt(val)
elif not True in [isinstance(val, t) for t in [u8, s8, u16, s16, u32, s32]]:
continue
sharedVars[0x13000000 + i] = int(val)
sharedVars[0x03000000 + i] = int(val)
def main():
global mscFile,mscFileBytes,stack,functionStack,stackPos,localVarPos,evalPos,exceptionRegister,globalVars,executing,strings,linkRegister,sharedVars,mainLoopFunc
mscFile = None
mscFileBytes = None
mainLoopFunc = None
stack = [None] * 0x80
functionStack = []
stackPos = 0
localVarPos = 0
evalPos = 0
exceptionRegister = 0
linkRegister = None
globalVars = [None] * 0x8A #Note a lot of this is actually unused but is simulated for exploitation
executing = False
strings = []
sharedVars = {}
#Parse arguments
parse = ArgumentParser(description="Emulate MSC bytecode")
parse.add_argument("--fighter_param_common", action="store", dest="fighter_param_common", help="Path of fighter_param_common to load")
parse.add_argument("--fighter_param", action="store", dest="fighter_param", help="Path of fighter_param to load")
parse.add_argument("--character", action="store", dest="character", help="Name of character to load from fighter_param")
parse.add_argument("--character_list", action="store_true", dest="charLS", help="List character names")
parse.add_argument("mscFile", nargs='?', type=str, help="MSC File to emulate")
args = parse.parse_args()
charIds = {'miienemyf': 62, 'miienemys': 63, 'miienemyg': 64, 'littlemacg': 60, 'mariod': 36, 'pikmin': 26, 'sheik': 17, 'roy': 54, 'yoshi': 7, 'duckhunt': 45, 'koopajr': 46, 'pit': 24, 'metaknight': 23, 'cloud': 55, 'miifighter': 0, 'miiswordsman': 1, 'miigunner': 2, 'wiifit': 40, 'pacman': 49, 'gamewatch': 19, 'peach': 14, 'robot': 31, 'rockman': 50, 'fox': 9, 'zelda': 16, 'bayonetta': 56, 'purin': 35, 'donkey': 4, 'shulk': 47, 'ryu': 52, 'toonlink': 32, 'sonic': 34, 'lucariom': 61, 'lizardon': 33, 'littlemac': 41, 'kirby': 8, 'pikachu': 10, 'murabito': 42, 'ness': 13, 'palutena': 43, 'diddy': 27, 'mario': 3, 'wario': 22, 'link': 5, 'ike': 29, 'rosetta': 39, 'samus': 6, 'falcon': 12, 'mewtwo': 51, 'lucas': 53, 'ganon': 20, 'koopag': 58, 'gekkouga': 48, 'dedede': 28, 'pitb': 38, 'lucina': 37, 'warioman': 59, 'marth': 18, 'szerosuit': 25, 'koopa': 15, 'kamui': 57, 'lucario': 30, 'luigi': 11, 'reflet': 44, 'falco': 21}
if args.charLS:
print(list(charIds.keys()))
quit()
if args.fighter_param != None and isfile(args.fighter_param) and args.character in charIds:
print("loading fighter_param")
load_fighter_param(args.fighter_param, charIds[args.character])
if args.fighter_param_common != None and isfile(args.fighter_param_common):
load_fighter_param_common(args.fighter_param_common)
if args.mscFile == None:
evalText()
else:
evalFile(args.mscFile)
if __name__ == '__main__':
main()
|
{
"content_hash": "7a718bc3db9519f517e0b30cab485968",
"timestamp": "",
"source": "github",
"line_count": 585,
"max_line_length": 935,
"avg_line_length": 40.24273504273504,
"alnum_prop": 0.5827032537592388,
"repo_name": "jam1garner/pymsc",
"id": "669b82fe6917e745cfaa0da2b315f64a826fee5b",
"size": "23927",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "emu.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "425"
},
{
"name": "Python",
"bytes": "77139"
}
],
"symlink_target": ""
}
|
"""カスタムレイヤー。"""
# pylint: skip-file
# flake8: noqa
from .activations import *
from .attentions import *
from .blocks import *
from .convolutional import *
from .endpoint import *
from .misc import *
from .noise import *
from .normalization import *
from .pooling import *
|
{
"content_hash": "11ceb89061296d971a46eecc972bc77d",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 28,
"avg_line_length": 21,
"alnum_prop": 0.7289377289377289,
"repo_name": "ak110/pytoolkit",
"id": "ae22863ca61561f151e97dfe6419b3b244cfb1cd",
"size": "291",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pytoolkit/layers/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "175"
},
{
"name": "Python",
"bytes": "562006"
},
{
"name": "Shell",
"bytes": "595"
}
],
"symlink_target": ""
}
|
import os
import random
import shutil
from datapipe.io import images
NUM_IMAGES = 1000
INPUT_DIR_PATH = os.path.expanduser("~/data/astri_mini_array_konrad/fits/astri_v2/gamma/")
OUTPUT_FILE_PATH = "/dev/shm/.jd/astri_konrad/gamma"
NPE_MIN = 30
NPE_MAX = 2000
# TODO: filter contained images -> pb: la meth doit etre adaptee pour les images hexagonales...
# MAKE THE FILE LIST ##########################################################
assert os.path.isdir(OUTPUT_FILE_PATH)
print(INPUT_DIR_PATH)
input_file_path_list = []
for dir_item in os.listdir(INPUT_DIR_PATH):
dir_item_path = os.path.join(INPUT_DIR_PATH, dir_item)
if dir_item_path.lower().endswith('.fits') and os.path.isfile(dir_item_path):
input_file_path_list.append(dir_item_path)
print("The input directory contains {} FITS files.".format(len(input_file_path_list)))
# SHUFFLE THE FILE LIST #######################################################
# shuffle the list to avoid having always the same tel_id
random.shuffle(input_file_path_list)
# COPY FILES IN THE RAMDISK ###################################################
image_counter = 0
for input_file_path in input_file_path_list:
if image_counter > NUM_IMAGES:
break
fits_images_dict, fits_metadata_dict = images.load_benchmark_images(input_file_path)
if NPE_MIN <= fits_metadata_dict["npe"] <= NPE_MAX:
print(image_counter, input_file_path)
shutil.copy(input_file_path, OUTPUT_FILE_PATH)
image_counter += 1
else:
print("reject", input_file_path)
|
{
"content_hash": "0816e2bbca1df17fd2beb10ec5de25a1",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 95,
"avg_line_length": 27.821428571428573,
"alnum_prop": 0.6251604621309371,
"repo_name": "jdhp-sap/data-pipeline-standalone-scripts",
"id": "148a60e4d1ef259609ae313f4d2050f510ffc71a",
"size": "2796",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "utils/make_imageset_in_ramdisk_astri_konrad_gamma.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "802"
},
{
"name": "Python",
"bytes": "189446"
},
{
"name": "Shell",
"bytes": "25749"
}
],
"symlink_target": ""
}
|
import os.path
import sys
sys.path.append(os.path.abspath('../../'))
from FileSystem import FileSystem
from src.util.OctaveAST import OctaveAST
from termcolor import colored
class Printer(object):
@staticmethod
def highlightNode(octave, nodeId):
ast = OctaveAST(octave)
(startLine, startCol, endLine, endCol) = ast.getCodeBounds(nodeId)
presnip,snip,postsnip = ast.snippetSplit( \
startLine,startCol,endLine,endCol)
cpre = colored(presnip,'blue')
csnip = colored(snip,'green',attrs=['bold'])
cpost = colored(postsnip,'blue')
print(cpre+csnip+cpost)
@staticmethod
def mask(octave, maskMap):
fgColor = 'green'
bgColor = 'blue'
s = ''
codeLines = octave.code.split('\n')
for line, maskline in zip(codeLines,maskMap):
for char,mchar in zip(line,maskline):
if mchar == 1:
s += colored(char, fgColor, attrs = ['bold'])
else:
s += colored(char, bgColor)
s += '\n'
print(s)
|
{
"content_hash": "d1ffaf437ac403f83eff482942d135e8",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 68,
"avg_line_length": 30.324324324324323,
"alnum_prop": 0.5686274509803921,
"repo_name": "tanonev/codewebs",
"id": "d6a2727267b04203c890cb54b07bcd860082ebcf",
"size": "1122",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/util/Printer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "790"
},
{
"name": "C++",
"bytes": "301221"
},
{
"name": "Java",
"bytes": "479184"
},
{
"name": "Makefile",
"bytes": "5459"
},
{
"name": "Matlab",
"bytes": "50455"
},
{
"name": "Python",
"bytes": "230306"
},
{
"name": "Shell",
"bytes": "13311"
}
],
"symlink_target": ""
}
|
"""Test cases for mupub.commands.check
"""
import os
from unittest import TestCase
from .tutils import PREFIX
import mupub
TEST_DATA = 'data'
class CheckTest(TestCase):
def test_basic_check(self):
"""Basic check command"""
basic = os.path.join(os.path.dirname(__file__),
TEST_DATA,
'basic-hdr.ly')
mupub.check(basic, header_file=None)
def test_validate(self):
"""Can validate files"""
header = mupub.find_header('SorF/O5/sor-op5-5', PREFIX)
fails = mupub.Validator.basic_checks(header)
self.assertEqual(len(fails), 0)
|
{
"content_hash": "0e4a48da76bfa4275d462f4c9acffd9a",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 63,
"avg_line_length": 26.875,
"alnum_prop": 0.5937984496124031,
"repo_name": "MutopiaProject/mupub",
"id": "737e6bd6fbc91a56b4ec191a9190f43e38e2559a",
"size": "645",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mupub/tests/test_check.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "LilyPond",
"bytes": "46993"
},
{
"name": "Makefile",
"bytes": "523"
},
{
"name": "Python",
"bytes": "102876"
}
],
"symlink_target": ""
}
|
"""
StormSpans brings support for PostgreSQL's range types to Canonical's Storm
using PsycoSpans paired with Spans.
from spans import intrange
from storm.locals import *
from stormspans import IntRange
class Model(Storm):
id = Int(primary=True)
span = IntRange(default=intrange(1, 10))
To connect to the database "postgres+spans://..." must be specified instead of
"postgres://..."
"""
__version__ = "0.1.0"
from storm.database import register_scheme
from .database import PostgresStormSpans, install_range
from .properties import *
__all__ = [
"IntRange",
"FloatRange",
"DateRange",
"DateTimeRange",
"install_range"
]
register_scheme("postgres+spans", PostgresStormSpans)
|
{
"content_hash": "b638d43e179f63f99b913bc6d5233440",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 78,
"avg_line_length": 22.90625,
"alnum_prop": 0.694406548431105,
"repo_name": "runfalk/stormspans",
"id": "20b7ff486d38f4adad0920b32fce25e8c553c64f",
"size": "733",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stormspans/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "365"
},
{
"name": "Python",
"bytes": "18969"
}
],
"symlink_target": ""
}
|
import numpy as np
def estimate_slope(x, y):
poly_coeff = np.polyfit(np.log(x), np.log(y), 1)
return poly_coeff[0]
def is_strictly_decreasing(vector):
differences = np.diff(vector)
return np.all(differences < 0)
def errors_within_relative_tolerance(actual, expected, tolerance=1e-6):
actual = np.asarray(actual)
expected = np.asarray(expected)
assert np.all(actual > 0)
assert np.all(expected > 0)
diff = np.abs(actual - expected)
return np.all(diff < tolerance * expected)
|
{
"content_hash": "e9e727ec05eba3a150f7163a04d95f4a",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 71,
"avg_line_length": 23.727272727272727,
"alnum_prop": 0.6743295019157088,
"repo_name": "Andlon/crest",
"id": "c84705cb238a08d8408f059f1010f9daa57ef61e",
"size": "522",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/python-support/util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "494891"
},
{
"name": "CMake",
"bytes": "3988"
},
{
"name": "Jupyter Notebook",
"bytes": "17535"
},
{
"name": "Python",
"bytes": "9485"
},
{
"name": "Shell",
"bytes": "605"
}
],
"symlink_target": ""
}
|
from kubernetes_py.models.v1.AWSElasticBlockStoreVolumeSource import AWSElasticBlockStoreVolumeSource
from kubernetes_py.models.v1.EmptyDirVolumeSource import EmptyDirVolumeSource
from kubernetes_py.models.v1.GCEPersistentDiskVolumeSource import GCEPersistentDiskVolumeSource
from kubernetes_py.models.v1.GitRepoVolumeSource import GitRepoVolumeSource
from kubernetes_py.models.v1.HostPathVolumeSource import HostPathVolumeSource
from kubernetes_py.models.v1.NFSVolumeSource import NFSVolumeSource
from kubernetes_py.models.v1.SecretVolumeSource import SecretVolumeSource
from kubernetes_py.models.v1.PersistentVolumeClaimVolumeSource import PersistentVolumeClaimVolumeSource
from kubernetes_py.models.v1.ConfigMapVolumeSource import ConfigMapVolumeSource
from kubernetes_py.utils import is_valid_string, filter_model
class Volume(object):
"""
http://kubernetes.io/docs/api-reference/v1/definitions/#_v1_volume
"""
VOLUME_TYPES_TO_SOURCE_MAP = {
"awsElasticBlockStore": AWSElasticBlockStoreVolumeSource,
"emptyDir": EmptyDirVolumeSource,
"gcePersistentDisk": GCEPersistentDiskVolumeSource,
"gitRepo": GitRepoVolumeSource,
"hostPath": HostPathVolumeSource,
"nfs": NFSVolumeSource,
"secret": SecretVolumeSource,
"persistentVolumeClaim": PersistentVolumeClaimVolumeSource,
"configMap": ConfigMapVolumeSource,
}
def __init__(self, model=None):
# TODO(froch): add support for the below
# self._iscsi = None
# self._glusterfs = None
# self._rbd = None
# self._flex_volume = None
# self._cinder = None
# self._cephfs = None
# self._flocker = None
# self._downward_api = None
# self._fc = None
# self._azure_file = None
# self._vsphere_volume
# self._quobyte = None
# self._azuredisk = None
self._awsElasticBlockStore = None
self._emptyDir = None
self._gcePersistentDisk = None
self._gitRepo = None
self._hostPath = None
self._name = None
self._nfs = None
self._persistentVolumeClaim = None
self._secret = None
self._config_map = None
if model is not None:
m = filter_model(model)
self._build_with_model(m)
def __eq__(self, other):
# see https://github.com/kubernetes/kubernetes/blob/master/docs/design/identifiers.md
if isinstance(other, self.__class__):
return self.name == other.name
return NotImplemented
def _build_with_model(self, model=None):
if "awsElasticBlockStore" in model:
self.awsElasticBlockStore = AWSElasticBlockStoreVolumeSource(model["awsElasticBlockStore"])
if "emptyDir" in model:
self.emptyDir = EmptyDirVolumeSource(model["emptyDir"])
if "gcePersistentDisk" in model:
self.gcePersistentDisk = GCEPersistentDiskVolumeSource(model["gcePersistentDisk"])
if "gitRepo" in model:
self.gitRepo = GitRepoVolumeSource(model["gitRepo"])
if "hostPath" in model:
self.hostPath = HostPathVolumeSource(model["hostPath"])
if "name" in model:
self.name = model["name"]
if "nfs" in model:
self.nfs = NFSVolumeSource(model["nfs"])
if "secret" in model:
self.secret = SecretVolumeSource(model["secret"])
if "persistentVolumeClaim" in model:
self.persistentVolumeClaim = PersistentVolumeClaimVolumeSource(model["persistentVolumeClaim"])
if "configMap" in model:
self.configMap = ConfigMapVolumeSource(model["configMap"])
@staticmethod
def vol_type_to_source(vol_type=None):
return Volume.VOLUME_TYPES_TO_SOURCE_MAP[vol_type]()
# ------------------------------------------------------------------------------------- aws ebs
@property
def awsElasticBlockStore(self):
return self._awsElasticBlockStore
@awsElasticBlockStore.setter
def awsElasticBlockStore(self, ebs=None):
if not isinstance(ebs, AWSElasticBlockStoreVolumeSource):
raise SyntaxError("Volume: aws_elastic_block_store: [ {0} ] is invalid.".format(ebs))
self._awsElasticBlockStore = ebs
# ------------------------------------------------------------------------------------- configMap
@property
def configMap(self):
return self._config_map
@configMap.setter
def configMap(self, config_map=None):
if not isinstance(config_map, ConfigMapVolumeSource):
raise SyntaxError("Volume: config_map: [ {0} ] is invalid.".format(config_map))
self._config_map = config_map
# ------------------------------------------------------------------------------------- emptyDir
@property
def emptyDir(self):
return self._emptyDir
@emptyDir.setter
def emptyDir(self, edir=None):
if not isinstance(edir, EmptyDirVolumeSource):
raise SyntaxError("Volume: empty_dir: [ {0} ] is invalid.".format(edir))
self._emptyDir = edir
# ------------------------------------------------------------------------------------- gce pd
@property
def gcePersistentDisk(self):
return self._gcePersistentDisk
@gcePersistentDisk.setter
def gcePersistentDisk(self, pd=None):
if not isinstance(pd, GCEPersistentDiskVolumeSource):
raise SyntaxError("Volume: gce_persistent_disk: [ {0} ] is invalid.".format(pd))
self._gcePersistentDisk = pd
# ------------------------------------------------------------------------------------- gitRepo
@property
def gitRepo(self):
return self._gitRepo
@gitRepo.setter
def gitRepo(self, repo=None):
if not isinstance(repo, GitRepoVolumeSource):
raise SyntaxError("Volume: git_repo: [ {0} ] is invalid.".format(repo))
self._gitRepo = repo
# ------------------------------------------------------------------------------------- hostPath
@property
def hostPath(self):
return self._hostPath
@hostPath.setter
def hostPath(self, hp=None):
if not isinstance(hp, HostPathVolumeSource):
raise SyntaxError("Volume: host_path: [ {0} ] is invalid.".format(hp))
self._hostPath = hp
# ------------------------------------------------------------------------------------- name
@property
def name(self):
return self._name
@name.setter
def name(self, name=None):
if not is_valid_string(name):
raise SyntaxError("Volume: name: [ {0} ] is invalid.".format(name))
self._name = name
# ------------------------------------------------------------------------------------- nfs
@property
def nfs(self):
return self._nfs
@nfs.setter
def nfs(self, nfs=None):
if not isinstance(nfs, NFSVolumeSource):
raise SyntaxError("Volume: nfs: [ {0} ] is invalid.".format(nfs))
self._nfs = nfs
# ------------------------------------------------------------------------------------- secret
@property
def secret(self):
return self._secret
@secret.setter
def secret(self, secret=None):
if not isinstance(secret, SecretVolumeSource):
raise SyntaxError("Volume: secret: [ {0} ] is invalid.".format(secret))
self._secret = secret
# ------------------------------------------------------------------------------------- persistentVolumeClaim
@property
def persistentVolumeClaim(self):
return self._persistentVolumeClaim
@persistentVolumeClaim.setter
def persistentVolumeClaim(self, pvc=None):
if not isinstance(pvc, PersistentVolumeClaimVolumeSource):
raise SyntaxError("Volume: persistentVolumeClaim: [ {0} ] is invalid.".format(pvc))
self._persistentVolumeClaim = pvc
# ------------------------------------------------------------------------------------- serialize
def serialize(self):
data = {}
if self.awsElasticBlockStore is not None:
data["awsElasticBlockStore"] = self.awsElasticBlockStore.serialize()
if self.emptyDir is not None:
data["emptyDir"] = self.emptyDir.serialize()
if self.gcePersistentDisk is not None:
data["gcePersistentDisk"] = self.gcePersistentDisk.serialize()
if self.gitRepo is not None:
data["gitRepo"] = self.gitRepo.serialize()
if self.hostPath is not None:
data["hostPath"] = self.hostPath.serialize()
if self.name is not None:
data["name"] = self.name
if self.nfs is not None:
data["nfs"] = self.nfs.serialize()
if self.secret is not None:
data["secret"] = self.secret.serialize()
if self.persistentVolumeClaim is not None:
data["persistentVolumeClaim"] = self.persistentVolumeClaim.serialize()
if self.configMap is not None:
data["configMap"] = self.configMap.serialize()
return data
|
{
"content_hash": "f06b2318aab83ba05273fa15439c504e",
"timestamp": "",
"source": "github",
"line_count": 239,
"max_line_length": 113,
"avg_line_length": 38.15481171548117,
"alnum_prop": 0.5801074679241145,
"repo_name": "sebastienc/kubernetes-py",
"id": "a26fba3a75d48d64f28202a5b16a241a7ca6427a",
"size": "9297",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "kubernetes_py/models/v1/Volume.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1021008"
},
{
"name": "Shell",
"bytes": "423"
}
],
"symlink_target": ""
}
|
import os
import pandas as pd
from zipfile import ZipFile
from sklearn.preprocessing import LabelEncoder
from ..contrib.category_encoder import CategoryEncoder
class DataSetLoader(object):
"""
Provides a number of pre-staged data sets to load into memory.
"""
def __init__(self):
pass
@staticmethod
def load_bike_sharing():
"""
Loads and returns the data set from Kaggle's Bike Sharing Demand competition.
Link: https://www.kaggle.com/c/bike-sharing-demand
Returns
----------
data : array-like
Pandas data frame containing the entire data set.
X : array-like
Training input samples.
y1 : array-like
First variable target values.
y2 : array-like
Second variable target values.
"""
file_location = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'data', 'bike_sharing.zip')
z = ZipFile(file_location)
data = pd.read_csv(z.open('train.csv'))
data['datetime'] = pd.to_datetime(data['datetime'])
data = data.set_index('datetime')
# drop the total count label and move the registered/casual counts to the front
num_features = len(data.columns) - 3
cols = data.columns.tolist()
cols = cols[-3:-1] + cols[0:num_features]
data = data[cols]
X = data.iloc[:, 2:].values
y1 = data.iloc[:, 0].values
y2 = data.iloc[:, 1].values
return data, X, y1, y2
@staticmethod
def load_forest_cover():
"""
Loads and returns the data set from Kaggle's Forest Cover Type Prediction
competition.
Link: https://www.kaggle.com/c/forest-cover-type-prediction
Returns
----------
data : array-like
Pandas data frame containing the entire data set.
X : array-like
Training input samples.
y : array-like
Target values.
"""
file_location = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'data', 'forest_cover.zip')
z = ZipFile(file_location)
data = pd.read_csv(z.open('train.csv'))
data = data.set_index('Id')
# move the label to the first position
cols = data.columns.tolist()
cols = cols[-1:] + cols[0:-1]
data = data[cols]
X = data.iloc[:, 1:].values
y = data.iloc[:, 0].values
return data, X, y
@staticmethod
def load_otto_group():
"""
Loads and returns the data set from Kaggle's Otto Group Product Classification
competition.
Link: https://www.kaggle.com/c/otto-group-product-classification-challenge
Returns
----------
data : array-like
Pandas data frame containing the entire data set.
X : array-like
Training input samples.
y : array-like
Target values.
"""
file_location = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'data', 'otto_group.zip')
z = ZipFile(file_location)
data = pd.read_csv(z.open('train.csv'))
data = data.set_index('id')
# move the label to the first position
cols = data.columns.tolist()
cols = cols[-1:] + cols[0:-1]
data = data[cols]
X = data.iloc[:, 1:].values
y = data.iloc[:, 0].values
# transform the labels from strings to integers
encoder = LabelEncoder()
y = encoder.fit_transform(y)
return data, X, y
@staticmethod
def load_property_inspection():
"""
Loads and returns the data set from Kaggle's Property Inspection Prediction
competition.
Link: https://www.kaggle.com/c/liberty-mutual-group-property-inspection-prediction
Returns
----------
data : array-like
Pandas data frame containing the entire data set.
X : array-like
Training input samples.
y : array-like
Target values.
"""
file_location = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'data', 'property_inspection.zip')
z = ZipFile(file_location)
data = pd.read_csv(z.open('train.csv'))
data = data.set_index('Id')
X = data.iloc[:, 1:].values
y = data.iloc[:, 0].values
# transform the categorical variables from strings to integers
encoder = CategoryEncoder()
X = encoder.fit_transform(X)
return data, X, y
@staticmethod
def load_time_series():
"""
Loads and returns a generic time series data set.
Returns
----------
data : array-like
Pandas data frame containing the entire data set.
X : array-like
Training input samples.
y : array-like
Target values.
"""
file_location = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'data', 'time_series.zip')
z = ZipFile(file_location)
data = pd.read_csv(z.open('train.csv'))
data['ds'] = pd.to_datetime(data['ds'])
X = data['ds'].values
y = data['y'].values
return data, X, y
|
{
"content_hash": "10a93a76f6267d7a7f55b91a67da0cb5",
"timestamp": "",
"source": "github",
"line_count": 184,
"max_line_length": 90,
"avg_line_length": 29.58695652173913,
"alnum_prop": 0.5464731814842028,
"repo_name": "jdwittenauer/ionyx",
"id": "244383e5b0178ae6ede9120bb81fb26136247d8e",
"size": "5444",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ionyx/datasets/dataset_loader.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "137516"
}
],
"symlink_target": ""
}
|
import os
import setuptools
from pok.version import get_version, read_requirements
readme = open('README.md').read()
dependencies, requirements = read_requirements('requirements.txt')
long_description = """
pok %s
Configuration should be easy.
To install use pip install git+git://git@github.com:iPlantCollaborativeOpenSource/pok.git
----
%s
----
For more information, please see: https://github.com/iPlantCollaborativeOpenSource/pok
""" % (get_version('short'), readme)
setuptools.setup(
name='pok',
version=get_version('short'),
author='jmatt',
author_email='jmatt@jmatt.org',
description="Configuration should be easy.",
long_description=long_description,
license="BSD 3 Clause",
url="https://github.com/iPlantCollaborativeOpenSource/pok",
packages=setuptools.find_packages(),
dependency_links=dependencies,
install_requires=requirements,
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Intended Audience :: Information Technology",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: System",
"Topic :: System :: Clustering",
"Topic :: System :: Distributed Computing",
"Topic :: System :: Systems Administration"
])
|
{
"content_hash": "746245927734773132df16b3fa00540a",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 89,
"avg_line_length": 29.31111111111111,
"alnum_prop": 0.6823351023502654,
"repo_name": "iPlantCollaborativeOpenSource/pok",
"id": "0ab8f13103f69295063ebd7e637d9c3415221d8a",
"size": "1319",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "6583"
},
{
"name": "Shell",
"bytes": "302"
}
],
"symlink_target": ""
}
|
"""Make node instance's version column not nullable
Revision ID: f1dab814a4a0
Revises: 7aae863786af
Create Date: 2017-10-08 12:59:58.047124
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'f1dab814a4a0'
down_revision = '7aae863786af'
branch_labels = None
depends_on = None
def upgrade():
op.alter_column('node_instances', 'version',
existing_type=sa.INTEGER(),
nullable=False)
def downgrade():
op.alter_column('node_instances', 'version',
existing_type=sa.INTEGER(),
nullable=True)
|
{
"content_hash": "299943223811c37de1ce8d20d3901279",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 51,
"avg_line_length": 22.607142857142858,
"alnum_prop": 0.6492890995260664,
"repo_name": "cloudify-cosmo/cloudify-manager",
"id": "de6c9a780563b5bc9598c0867a7f042dcd15b6f3",
"size": "633",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "resources/rest-service/cloudify/migrations/versions/f1dab814a4a0_node_instance_version.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Clojure",
"bytes": "4067"
},
{
"name": "Dockerfile",
"bytes": "3843"
},
{
"name": "HTML",
"bytes": "320"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "PLpgSQL",
"bytes": "119062"
},
{
"name": "Python",
"bytes": "3825971"
},
{
"name": "Shell",
"bytes": "49121"
}
],
"symlink_target": ""
}
|
"""Worker machine web server."""
import argparse
import BaseHTTPServer
import collections
import json
import logging
import os
import re
import SocketServer
import subprocess
import sys
import traceback
import urllib
import urlparse
import worker
_CHOICE_START = 'start'
_CHOICES = [
_CHOICE_START
]
_CLIENT_JS_PATH = os.path.join(worker.ROOT_PATH, 'client.js')
_DEFAULT_HOST = subprocess.check_output(['hostname']).strip()
_DEFAULT_PORT = 8080
_DEFAULT_LOG_PATH = os.path.join(worker.ROOT_PATH, 'server.log')
_INDEX_HTML_PATH = os.path.join(worker.ROOT_PATH, 'index.html')
_LOG = logging.getLogger('android.server')
_STATUS = 'status'
# Keep _STATUS_* in sync with _ExternalTask.STATUSES.
_STATUS_COMPLETE = 'complete'
_STATUS_CREATED = 'created'
_STATUS_DELETED = 'deleted'
_STATUS_FAILED = 'failed'
_STATUS_RUNNING = 'running'
# Translates from worker statuses to fe statuses.
_STATUS_MAP = {
worker.TestRun.BUILD_FAILED: _STATUS_FAILED,
worker.TestRun.BUILD_SUCCEEDED: _STATUS_RUNNING,
worker.TestRun.CONTENTS_MALFORMED: _STATUS_FAILED,
worker.TestRun.NOT_FOUND: _STATUS_FAILED,
worker.TestRun.PROJECT_MISCONFIGURED: _STATUS_FAILED,
worker.TestRun.RUNTIME_MISCONFIGURED: _STATUS_FAILED,
worker.TestRun.RUNTIME_NOT_RUNNING: _STATUS_FAILED,
worker.TestRun.TESTS_FAILED: _STATUS_FAILED,
worker.TestRun.TESTS_RUNNING: _STATUS_RUNNING,
worker.TestRun.TESTS_SUCCEEDED: _STATUS_COMPLETE,
worker.TestRun.UNAVAILABLE: _STATUS_FAILED,
}
assert len(worker.TestRun.STATUSES) == len(_STATUS_MAP)
_TICKET = 'ticket'
_WORKER_ID = 'worker_id'
_PARSER = argparse.ArgumentParser()
_PARSER.add_argument(
'--log_file', type=str, default=_DEFAULT_LOG_PATH,
help='Absolute path of the file used for logging')
_PARSER.add_argument(
'--log_level', type=str, choices=worker.LOG_LEVEL_CHOICES,
default=worker.LOG_INFO,
help='Display log messages at or above this level')
_PARSER.add_argument(
'--host', type=str, default=_DEFAULT_HOST, help='Host to run on')
_PARSER.add_argument(
'--port', type=int, default=_DEFAULT_PORT, help='Port to run on')
_SystemState = collections.namedtuple(
'_SystemState',
['success', 'project_name', 'request_args', 'config', 'project', 'runtime'])
class _Environment(object):
HOST = None
PORT = None
@classmethod
def get_worker_id(cls):
assert cls.HOST is not None and cls.PORT is not None
return 'http://%s:%s' % (cls.HOST, cls.PORT)
@classmethod
def set(cls, host, port):
cls.HOST = host
cls.PORT = port
class _Handler(BaseHTTPServer.BaseHTTPRequestHandler):
_POST_DELETE = re.compile('^/.*/delete$')
def _dispatch_rest_post(self):
if self._POST_DELETE.match(self.path):
self._do_rest_POST_delete()
else:
self._do_rest_POST_create()
def _do_404_response(self):
self.send_response(404)
self._set_headers({
'Content-Length': 0,
'Content-Type': 'text/html',
})
def _do_GET_health(self):
# 'Healthy' means 'can work on new tasks'. 'Unhealthy' workers can still
# answer get requests for projects or task results -- probably. This
# health check could be made more robust.
self.send_response(500 if worker.Lock.active() else 200)
self._set_headers({'Content-Type': 'text/html'})
def _do_json_response(self, response, code=200):
full_response = {'payload': response}
self.send_response(code)
self._set_headers({'Content-Type': 'text/javascript'})
self.wfile.write(json.dumps(full_response))
def _do_rest_GET_project(self):
state = self._get_system_state_or_record_error(
get_request_args_fn=self._get_get_args)
if not state.success:
return
contents = None
with open(state.project.editor_file) as f:
contents = f.read()
self._do_json_response({
'contents': contents,
'filename': state.project.editor_file,
'projectName': state.project_name,
})
def _do_rest_GET_test_run(self):
request_args = self._get_get_args()
ticket = request_args.get(_TICKET)
worker_id = request_args.get(_WORKER_ID)
if worker_id != _Environment.get_worker_id():
self._do_json_response('Request sent to wrong worker', code=500)
return
# Treat as module-protected. pylint: disable=protected-access
test_run = worker._TestEnvironment.get_test_run(ticket)
code = 200
status = test_run.get_status()
if status == worker.TestRun.NOT_FOUND:
code = 404
result = test_run.to_dict()
result[_STATUS] = _STATUS_MAP.get(status)
self._do_json_response(result, code=code)
def _do_rest_POST_create(self):
if worker.Lock.active():
self._do_json_response('Worker locked', code=500)
return
state = self._get_system_state_or_record_error(
get_request_args_fn=self._get_post_args)
if not state.success:
return
patches = []
for patch in state.request_args.get('payload', {}).get('patches', []):
patches.append(worker.Patch(patch['filename'], patch['contents']))
ticket = state.request_args.get('ticket')
pid = worker.fork_test(
state.config, state.project_name, ticket, patches=patches)
if pid is None:
self._do_json_response('Unable to start worker process', code=500)
self._do_json_response({
_TICKET: ticket,
_WORKER_ID: _Environment.get_worker_id(),
})
def _do_rest_POST_delete(self):
_LOG.info('TODO: implement rest POST delete')
def _get_get_args(self):
encoded = urlparse.urlparse(self.path).query.lstrip('request=')
return json.loads(urllib.unquote_plus(encoded))
def _get_post_args(self):
data = self.rfile.read(int(self.headers.getheader('content-length')))
return json.loads(data)
def _get_project_name(self, path):
return path.split('=')[1]
def _get_system_state_or_record_error(self, get_request_args_fn=None):
config = worker.Config.load()
request_args = get_request_args_fn()
payload = request_args.get('payload', {})
project_name = payload.get('project', None)
if not project_name:
self._do_json_response('Must specify project', code=400)
return _SystemState(False, None, None, None, None, None)
project = config.projects.get(project_name)
runtime = config.runtimes.get(project_name)
if not (project and runtime):
self._do_json_response('Environment not configured', code=500)
return _SystemState(False, None, None, None, None, None)
if not os.path.exists(project.editor_file):
self._do_json_response('Projects misconfigured', code=500)
return _SystemState(False, None, None, None, None, None)
return _SystemState(
True, project_name, request_args, config, project, runtime)
def _set_headers(self, headers):
for key, value in headers.iteritems():
self.send_header(key, value)
self.end_headers()
def do_GET(self):
if self.path == '/health':
self._do_GET_health()
elif self.path.startswith('/rest/v1/project'):
self._do_rest_GET_project()
elif self.path.startswith('/rest/v1'):
self._do_rest_GET_test_run()
else:
self._do_404_response()
def do_POST(self):
if self.path.startswith('/rest/v1'):
self._dispatch_rest_post()
else:
self._do_404_response()
def log_message(self, format_template, *args):
_LOG.info('%(address)s - - [%(timestamp)s] %(rest)s', {
'address': self.address_string(),
'timestamp': self.log_date_time_string(),
'rest': format_template % args,
})
class _HttpServer(SocketServer.ThreadingMixIn, BaseHTTPServer.HTTPServer):
# Allow address reuse immediately after a server has stopped so we don't get
# spurious errors during dev.
allow_reuse_address = True
def _get_last_exception_str():
return ''.join(traceback.format_exception(*sys.exc_info()))
def _get_server(host, port):
return _HttpServer((host, port), _Handler)
def main(args):
worker.configure_logger(args.log_level, log_file=args.log_file)
_start(args.host, args.port)
def _start(host, port):
server = _get_server(host, port)
try:
_LOG.info('Starting server at http://%(host)s:%(port)s', {
'host': host,
'port': port,
})
server.serve_forever()
except: # Treat all errors the same. pylint: disable=bare-except
_LOG.info('Stopping server; reason:\n' + _get_last_exception_str())
server.socket.close()
if __name__ == '__main__':
parsed_args = _PARSER.parse_args()
_Environment.set(parsed_args.host, parsed_args.port)
main(parsed_args)
|
{
"content_hash": "6a38db7949202898fac55ce9ba38462c",
"timestamp": "",
"source": "github",
"line_count": 285,
"max_line_length": 80,
"avg_line_length": 32.18245614035088,
"alnum_prop": 0.6250545137374618,
"repo_name": "google/coursebuilder-android-container-module",
"id": "47dca4a540870daf2f20d5519864b473c42c0e2e",
"size": "9770",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "android/server.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "3088"
},
{
"name": "Java",
"bytes": "7822"
},
{
"name": "JavaScript",
"bytes": "10446"
},
{
"name": "Python",
"bytes": "55128"
},
{
"name": "Shell",
"bytes": "3130"
}
],
"symlink_target": ""
}
|
"""
Support for Loop Energy sensors.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.loop_energy/
"""
import logging
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.const import (
CONF_UNIT_SYSTEM_METRIC, CONF_UNIT_SYSTEM_IMPERIAL)
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import EVENT_HOMEASSISTANT_STOP
_LOGGER = logging.getLogger(__name__)
REQUIREMENTS = ['pyloopenergy==0.0.17']
CONF_ELEC = 'electricity'
CONF_GAS = 'gas'
CONF_ELEC_SERIAL = 'electricity_serial'
CONF_ELEC_SECRET = 'electricity_secret'
CONF_GAS_SERIAL = 'gas_serial'
CONF_GAS_SECRET = 'gas_secret'
CONF_GAS_CALORIFIC = 'gas_calorific'
CONF_GAS_TYPE = 'gas_type'
DEFAULT_CALORIFIC = 39.11
DEFAULT_UNIT = 'kW'
ELEC_SCHEMA = vol.Schema({
vol.Required(CONF_ELEC_SERIAL): cv.string,
vol.Required(CONF_ELEC_SECRET): cv.string,
})
GAS_TYPE_SCHEMA = vol.In([CONF_UNIT_SYSTEM_METRIC, CONF_UNIT_SYSTEM_IMPERIAL])
GAS_SCHEMA = vol.Schema({
vol.Required(CONF_GAS_SERIAL): cv.string,
vol.Required(CONF_GAS_SECRET): cv.string,
vol.Optional(CONF_GAS_TYPE, default=CONF_UNIT_SYSTEM_METRIC):
GAS_TYPE_SCHEMA,
vol.Optional(CONF_GAS_CALORIFIC, default=DEFAULT_CALORIFIC):
vol.Coerce(float)
})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_ELEC): vol.All(
dict, ELEC_SCHEMA),
vol.Optional(CONF_GAS, default={}): vol.All(
dict, GAS_SCHEMA)
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the Loop Energy sensors."""
import pyloopenergy
elec_config = config.get(CONF_ELEC)
gas_config = config.get(CONF_GAS)
# pylint: disable=too-many-function-args
controller = pyloopenergy.LoopEnergy(
elec_config.get(CONF_ELEC_SERIAL),
elec_config.get(CONF_ELEC_SECRET),
gas_config.get(CONF_GAS_SERIAL),
gas_config.get(CONF_GAS_SECRET),
gas_config.get(CONF_GAS_TYPE),
gas_config.get(CONF_GAS_CALORIFIC)
)
def stop_loopenergy(event):
"""Shutdown loopenergy thread on exit."""
_LOGGER.info("Shutting down loopenergy.")
controller.terminate()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop_loopenergy)
sensors = [LoopEnergyElec(controller)]
if gas_config.get(CONF_GAS_SERIAL):
sensors.append(LoopEnergyGas(controller))
add_devices(sensors)
class LoopEnergyDevice(Entity):
"""Implementation of an Loop Energy base sensor."""
def __init__(self, controller):
"""Initialize the sensor."""
self._state = None
self._unit_of_measurement = DEFAULT_UNIT
self._controller = controller
self._name = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
def _callback(self):
self.update_ha_state(True)
class LoopEnergyElec(LoopEnergyDevice):
"""Implementation of an Loop Energy Electricity sensor."""
def __init__(self, controller):
"""Initialize the sensor."""
super(LoopEnergyElec, self).__init__(controller)
self._name = 'Power Usage'
self._controller.subscribe_elecricity(self._callback)
def update(self):
"""Get the cached Loop energy."""
self._state = round(self._controller.electricity_useage, 2)
class LoopEnergyGas(LoopEnergyDevice):
"""Implementation of an Loop Energy Gas sensor."""
def __init__(self, controller):
"""Initialize the sensor."""
super(LoopEnergyGas, self).__init__(controller)
self._name = 'Gas Usage'
self._controller.subscribe_gas(self._callback)
def update(self):
"""Get the cached Loop energy."""
self._state = round(self._controller.gas_useage, 2)
|
{
"content_hash": "fce000391208aaa51a32eb6b4e1290fc",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 78,
"avg_line_length": 28.276315789473685,
"alnum_prop": 0.6675197766402978,
"repo_name": "Duoxilian/home-assistant",
"id": "06d1fd954f2abf360ec3b321e4a44748586fb727",
"size": "4298",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/sensor/loopenergy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1584258"
},
{
"name": "Python",
"bytes": "5414513"
},
{
"name": "Ruby",
"bytes": "379"
},
{
"name": "Shell",
"bytes": "14220"
}
],
"symlink_target": ""
}
|
import gym
import readchar
LEFT = 0
DOWN = 1
RIGHT = 2
UP = 3
arrow_keys = {'\x1b[A': UP, '\x1b[B': DOWN, '\x1b[C' : RIGHT, '\x1b[D' : LEFT}
env = gym.make("FrozenLake-v0")
env.reset()
env.render()
while True:
key = readchar.readkey()
if key not in arrow_keys.keys():
print("Game aborted!")
break
action = arrow_keys[key]
state, reward, done, info = env.step(action)
env.render()
print("State: ", state, "Action: ", action, "Reward: ", reward, "Info: ", info)
if done:
print("Finished with reward", reward)
break
|
{
"content_hash": "9b2c64ca6abde8a5c3fee72f4c5036e5",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 83,
"avg_line_length": 20.642857142857142,
"alnum_prop": 0.5847750865051903,
"repo_name": "zzsza/TIL",
"id": "c6bca66a644016e24eba63867ed889b472d8e748",
"size": "578",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reinforcement_learning/All for deeplearing/Lab 05-1. Q-learning on Nondeterministic Worlds.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "431717"
},
{
"name": "Java",
"bytes": "19334"
},
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "Julia",
"bytes": "314"
},
{
"name": "Jupyter Notebook",
"bytes": "15381217"
},
{
"name": "Python",
"bytes": "124497"
},
{
"name": "Shell",
"bytes": "1958"
}
],
"symlink_target": ""
}
|
from flask import Flask, make_response
from pulsar.apps import wsgi
def FlaskApp():
app = Flask(__name__)
@app.errorhandler(404)
def not_found(e):
return make_response("404 Page", 404)
@app.route('/', methods=['GET'])
def add_org():
return "Flask Example"
return app
class Site(wsgi.LazyWsgi):
def setup(self, environ=None):
app = FlaskApp()
return wsgi.WsgiHandler((wsgi.wait_for_body_middleware,
wsgi.middleware_in_executor(app)))
def server(**kwargs):
return wsgi.WSGIServer(Site(), **kwargs)
if __name__ == '__main__': # pragma nocover
server().start()
|
{
"content_hash": "14bdaf6caa229513d49d4e69ddb5d1a3",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 67,
"avg_line_length": 20.454545454545453,
"alnum_prop": 0.5896296296296296,
"repo_name": "nooperpudd/pulsar",
"id": "f04446a933965d86c673c2516d1f944127484836",
"size": "675",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "examples/flaskapp/manage.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "553"
},
{
"name": "C++",
"bytes": "1517"
},
{
"name": "CSS",
"bytes": "2152"
},
{
"name": "HTML",
"bytes": "11096"
},
{
"name": "JavaScript",
"bytes": "27140"
},
{
"name": "Python",
"bytes": "1239402"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.